我的目的是在训练分类器后识别形状,类似于 OpenIMAJ 教程http://openimaj.org/tutorial/classification101.html第 12 章中所做的。第 12 章使用 Caltech101 类,这对我没有帮助,因为我想使用自己的一组图像来训练分类器。我创建了这个基于第 12 章的工作代码:
package com.mycompany.video.analytics;
import de.bwaldvogel.liblinear.SolverType;
import org.openimaj.data.DataSource;
import org.openimaj.data.dataset.Dataset;
import org.openimaj.data.dataset.GroupedDataset;
import org.openimaj.data.dataset.ListDataset;
import org.openimaj.data.dataset.VFSGroupDataset;
import org.openimaj.experiment.dataset.sampling.GroupSampler;
import org.openimaj.experiment.dataset.sampling.GroupedUniformRandomisedSampler;
import org.openimaj.experiment.dataset.split.GroupedRandomSplitter;
import org.openimaj.experiment.evaluation.classification.ClassificationEvaluator;
import org.openimaj.experiment.evaluation.classification.ClassificationResult;
import org.openimaj.experiment.evaluation.classification.analysers.confusionmatrix.CMAnalyser;
import org.openimaj.experiment.evaluation.classification.analysers.confusionmatrix.CMResult;
import org.openimaj.feature.DoubleFV;
import org.openimaj.feature.FeatureExtractor;
import org.openimaj.feature.SparseIntFV;
import org.openimaj.feature.local.data.LocalFeatureListDataSource;
import org.openimaj.feature.local.list.LocalFeatureList;
import org.openimaj.image.FImage;
import org.openimaj.image.ImageUtilities;
import org.openimaj.image.feature.dense.gradient.dsift.ByteDSIFTKeypoint;
import org.openimaj.image.feature.dense.gradient.dsift.DenseSIFT;
import org.openimaj.image.feature.dense.gradient.dsift.PyramidDenseSIFT;
import org.openimaj.image.feature.local.aggregate.BagOfVisualWords;
import org.openimaj.image.feature.local.aggregate.BlockSpatialAggregator;
import org.openimaj.io.IOUtils;
import org.openimaj.ml.annotation.ScoredAnnotation;
import org.openimaj.ml.annotation.linear.LiblinearAnnotator;
import org.openimaj.ml.clustering.ByteCentroidsResult;
import org.openimaj.ml.clustering.assignment.HardAssigner;
import org.openimaj.ml.clustering.kmeans.ByteKMeans;
import org.openimaj.util.pair.IntFloatPair;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
/**
* Created by yschondorf on 5/29/2018.
*/
public class Chapter12Generic {
private static String IMAGES_PATH = "C:\\Development\\Video Analytics\\tpImages";
public static void main(String[] args) {
try {
LiblinearAnnotator<FImage, String> trainer = null;
VFSGroupDataset<FImage> allData = null;
allData = new VFSGroupDataset<FImage>(
IMAGES_PATH,
ImageUtilities.FIMAGE_READER);
GroupedDataset<String, ListDataset<FImage>, FImage> data =
GroupSampler.sample(allData, 1, false);
GroupedRandomSplitter<String, FImage> splits =
new GroupedRandomSplitter<String, FImage>(data, 15, 0, 15); // 15 training, 15 testing
DenseSIFT denseSIFT = new DenseSIFT(5, 7);
PyramidDenseSIFT<FImage> pyramidDenseSIFT = new PyramidDenseSIFT<FImage>(denseSIFT, 6f, 7);
GroupedDataset<String, ListDataset<FImage>, FImage> sample =
GroupedUniformRandomisedSampler.sample(splits.getTrainingDataset(), 15);
HardAssigner<byte[], float[], IntFloatPair> assigner = trainQuantiser(sample, pyramidDenseSIFT);
FeatureExtractor<DoubleFV, FImage> extractor = new PHOWExtractor(pyramidDenseSIFT, assigner);
//
// Now we’re ready to construct and train a classifier
//
trainer = new LiblinearAnnotator<FImage, String>(
extractor, LiblinearAnnotator.Mode.MULTICLASS, SolverType.L2R_L2LOSS_SVC, 1.0, 0.00001);
Date start = new Date();
System.out.println("Classifier training: start");
trainer.train(splits.getTrainingDataset());
System.out.println("Classifier training: end");
Date end = new Date();
long durationSec = (end.getTime() - start.getTime()) / 1000;
System.out.println("Classifier training duration: " + durationSec + " seconds");
final GroupedDataset<String, ListDataset<FImage>, FImage> testDataSet = splits.getTestDataset();
ClassificationEvaluator<CMResult<String>, String, FImage> eval =
new ClassificationEvaluator<CMResult<String>, String, FImage>(
trainer, testDataSet, new CMAnalyser<FImage, String>(CMAnalyser.Strategy.SINGLE));
start = new Date();
System.out.println("Classifier evaluation: start");
Map<FImage, ClassificationResult<String>> guesses = eval.evaluate();
System.out.println("Classifier evaluation - tp: end");
end = new Date();
durationSec = (end.getTime() - start.getTime()) / 1000;
System.out.println("Classifier evaluation duration: " + durationSec + " seconds");
CMResult<String> result = eval.analyse(guesses);
System.out.println("Result - tp: " + result);
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* This method extracts the first 10000 dense SIFT features from the images in the dataset, and then clusters them
* into 300 separate classes. The method then returns a HardAssigner which can be used to assign SIFT features to
* identifiers
*
* @param pyramidDenseSIFT
* @return
*/
static HardAssigner<byte[], float[], IntFloatPair> trainQuantiser(
Dataset<FImage> sample,
// VFSGroupDataset<FImage> trainingImages,
PyramidDenseSIFT<FImage> pyramidDenseSIFT)
{
System.out.println("trainQuantiser: start");
Date start = new Date();
List<LocalFeatureList<ByteDSIFTKeypoint>> allKeys = new ArrayList<LocalFeatureList<ByteDSIFTKeypoint>>();
int i = 0;
int total = sample.numInstances();
// for (FImage image: sample) {
// ListDataset<FImage> images = trainingImages.get(key);
// total = images.size();
// break;
// }
for (FImage rec : sample) {
i++;
System.out.println(String.format("Analysing image %d out of %d", i, total));
FImage img = rec.getImage();
pyramidDenseSIFT.analyseImage(img);
allKeys.add(pyramidDenseSIFT.getByteKeypoints(0.005f));
}
final int numberOfDenseSiftFeaturesToExtract = 10000;
final int numberOfClassesInCluster = 300;
if (allKeys.size() > numberOfDenseSiftFeaturesToExtract)
allKeys = allKeys.subList(0, numberOfDenseSiftFeaturesToExtract);
ByteKMeans km = ByteKMeans.createKDTreeEnsemble(numberOfClassesInCluster);
DataSource<byte[]> dataSource = new LocalFeatureListDataSource<ByteDSIFTKeypoint, byte[]>(allKeys);
System.out.println(String.format(
"Clustering %d image features into %d classes...",
numberOfDenseSiftFeaturesToExtract, numberOfClassesInCluster));
ByteCentroidsResult result = km.cluster(dataSource);
Date end = new Date();
System.out.println("trainQuantiser: end");
System.out.println("trainQuantiser duration: " + (end.getTime() - start.getTime())/1000 + " seconds");
return result.defaultHardAssigner();
}
static class PHOWExtractor implements FeatureExtractor<DoubleFV, FImage> {
PyramidDenseSIFT<FImage> pdsift;
HardAssigner<byte[], float[], IntFloatPair> assigner;
public PHOWExtractor(PyramidDenseSIFT<FImage> pdsift, HardAssigner<byte[], float[], IntFloatPair> assigner)
{
this.pdsift = pdsift;
this.assigner = assigner;
}
public DoubleFV extractFeature(FImage object) {
FImage image = object.getImage();
pdsift.analyseImage(image);
BagOfVisualWords<byte[]> bovw = new BagOfVisualWords<byte[]>(assigner);
BlockSpatialAggregator<byte[], SparseIntFV> spatial = new BlockSpatialAggregator<byte[], SparseIntFV>(
bovw, 2, 2);
return spatial.aggregate(pdsift.getByteKeypoints(0.015f), image.getBounds()).normaliseFV();
}
}
}
该代码有效并产生以下输出:
trainQuantiser: start
Analysing image 1 out of 15
Analysing image 2 out of 15
Analysing image 3 out of 15
Analysing image 4 out of 15
Analysing image 5 out of 15
Analysing image 6 out of 15
Analysing image 7 out of 15
Analysing image 8 out of 15
Analysing image 9 out of 15
Analysing image 10 out of 15
Analysing image 11 out of 15
Analysing image 12 out of 15
Analysing image 13 out of 15
Analysing image 14 out of 15
Analysing image 15 out of 15
Clustering 10000 image features into 300 classes...
trainQuantiser: end
trainQuantiser duration: 243 seconds
Classifier training: start
iter 1 act 6.283e-01 pre 6.283e-01 delta 1.096e+00 f 1.500e+01 |g| 1.146e+00 CG 1
iter 2 act 2.779e-05 pre 2.779e-05 delta 1.096e+00 f 1.437e+01 |g| 7.555e-03 CG 1
iter 3 act 2.175e-09 pre 2.175e-09 delta 1.096e+00 f 1.437e+01 |g| 6.702e-05 CG 1
iter 4 act 6.626e-13 pre 6.598e-13 delta 1.096e+00 f 1.437e+01 |g| 1.164e-06 CG 1
Classifier training: end
Classifier training duration: 28 seconds
Classifier evaluation: start
Classifier evaluation - tp: end
Classifier evaluation duration: 57 seconds
Result - tp: Accuracy: 1.000
Error Rate: 0.000
我不知道我是怎么离开这里的。我真正想要的不是评估分类器的准确性——就像第 12 章中所做的那样——而是使用分类器来确定新图像是否具有我感兴趣的形状。我没有找到任何文档或示例展示了如何做到这一点。任何帮助将不胜感激。
除了教程,我没有找到任何重要的文档。谁能给我指出它在哪里?同时我只是猜测。我不能使用 testDataset,因为训练分类器和使用分类器之间需要分开。所以我想训练分类器一次(需要很长时间 - 很多分钟)并保存结果(比如将上面的训练器对象序列化到磁盘并在以后的调用中反序列化它)。当我添加代码来执行此操作并尝试在新图像上使用 testDataset 时,我得到一个空指针异常。该异常与反序列化对象无关,因为当对象尚未在磁盘上时,我也会得到异常。新代码:
package com.mycompany.video.analytics;
import de.bwaldvogel.liblinear.SolverType;
import org.apache.commons.vfs2.FileSystemException;
import org.openimaj.data.DataSource;
import org.openimaj.data.dataset.Dataset;
import org.openimaj.data.dataset.GroupedDataset;
import org.openimaj.data.dataset.ListDataset;
import org.openimaj.data.dataset.VFSGroupDataset;
import org.openimaj.experiment.dataset.sampling.GroupSampler;
import org.openimaj.experiment.dataset.sampling.GroupedUniformRandomisedSampler;
import org.openimaj.experiment.dataset.split.GroupedRandomSplitter;
import org.openimaj.experiment.evaluation.classification.ClassificationEvaluator;
import org.openimaj.experiment.evaluation.classification.ClassificationResult;
import org.openimaj.experiment.evaluation.classification.analysers.confusionmatrix.CMAnalyser;
import org.openimaj.experiment.evaluation.classification.analysers.confusionmatrix.CMResult;
import org.openimaj.feature.DoubleFV;
import org.openimaj.feature.FeatureExtractor;
import org.openimaj.feature.SparseIntFV;
import org.openimaj.feature.local.data.LocalFeatureListDataSource;
import org.openimaj.feature.local.list.LocalFeatureList;
import org.openimaj.image.FImage;
import org.openimaj.image.ImageUtilities;
import org.openimaj.image.feature.dense.gradient.dsift.ByteDSIFTKeypoint;
import org.openimaj.image.feature.dense.gradient.dsift.DenseSIFT;
import org.openimaj.image.feature.dense.gradient.dsift.PyramidDenseSIFT;
import org.openimaj.image.feature.local.aggregate.BagOfVisualWords;
import org.openimaj.image.feature.local.aggregate.BlockSpatialAggregator;
import org.openimaj.io.IOUtils;
import org.openimaj.ml.annotation.ScoredAnnotation;
import org.openimaj.ml.annotation.linear.LiblinearAnnotator;
import org.openimaj.ml.clustering.ByteCentroidsResult;
import org.openimaj.ml.clustering.assignment.HardAssigner;
import org.openimaj.ml.clustering.kmeans.ByteKMeans;
import org.openimaj.util.pair.IntFloatPair;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
public class Chapter12Generic {
private static String IMAGES_PATH = "C:\\Development\\Video Analytics\\tpImages";
private static String TEST_IMAGES_PATH = "C:\\Development\\Video Analytics\\testImages";
private static String TRAINER_DATA_FILE_PATH = "C:\\Development\\Video Analytics\\out\\trainer.dat";
public static void main(String[] args) throws Exception {
LiblinearAnnotator<FImage, String> trainer = null;
File inputDataFile = new File(TRAINER_DATA_FILE_PATH);
if (inputDataFile.isFile()) {
trainer = IOUtils.readFromFile(inputDataFile);
} else {
VFSGroupDataset<FImage> allData = null;
allData = new VFSGroupDataset<FImage>(
IMAGES_PATH,
ImageUtilities.FIMAGE_READER);
GroupedDataset<String, ListDataset<FImage>, FImage> data =
GroupSampler.sample(allData, 1, false);
GroupedRandomSplitter<String, FImage> splits =
new GroupedRandomSplitter<String, FImage>(data, 15, 0, 15); // 15 training, 15 testing
DenseSIFT denseSIFT = new DenseSIFT(5, 7);
PyramidDenseSIFT<FImage> pyramidDenseSIFT = new PyramidDenseSIFT<FImage>(denseSIFT, 6f, 7);
GroupedDataset<String, ListDataset<FImage>, FImage> sample =
GroupedUniformRandomisedSampler.sample(splits.getTrainingDataset(), 15);
HardAssigner<byte[], float[], IntFloatPair> assigner = trainQuantiser(sample, pyramidDenseSIFT);
FeatureExtractor<DoubleFV, FImage> extractor = new PHOWExtractor(pyramidDenseSIFT, assigner);
//
// Now we’re ready to construct and train a classifier
//
trainer = new LiblinearAnnotator<FImage, String>(
extractor, LiblinearAnnotator.Mode.MULTICLASS, SolverType.L2R_L2LOSS_SVC, 1.0, 0.00001);
Date start = new Date();
System.out.println("Classifier training: start");
trainer.train(splits.getTrainingDataset());
IOUtils.writeToFile(trainer, inputDataFile);
System.out.println("Classifier training: end");
Date end = new Date();
long durationSec = (end.getTime() - start.getTime()) / 1000;
System.out.println("Classifier training duration: " + durationSec + " seconds");
}
// final GroupedDataset<String, ListDataset<FImage>, FImage> testDataSet = splits.getTestDataset();
VFSGroupDataset<FImage> testDataSet = new VFSGroupDataset<FImage>(
TEST_IMAGES_PATH,
ImageUtilities.FIMAGE_READER);
ClassificationEvaluator<CMResult<String>, String, FImage> eval =
new ClassificationEvaluator<CMResult<String>, String, FImage>(
trainer, testDataSet, new CMAnalyser<FImage, String>(CMAnalyser.Strategy.SINGLE));
Date start = new Date();
System.out.println("Classifier evaluation: start");
Map<FImage, ClassificationResult<String>> guesses = eval.evaluate();
System.out.println("Classifier evaluation - tp: end");
Date end = new Date();
long durationSec = (end.getTime() - start.getTime()) / 1000;
System.out.println("Classifier evaluation duration: " + durationSec + " seconds");
CMResult<String> result = eval.analyse(guesses);
System.out.println("Result - tp: " + result);
}
/**
* This method extracts the first 10000 dense SIFT features from the images in the dataset, and then clusters them
* into 300 separate classes. The method then returns a HardAssigner which can be used to assign SIFT features to
* identifiers
*
* @param pyramidDenseSIFT
* @return
*/
static HardAssigner<byte[], float[], IntFloatPair> trainQuantiser(
Dataset<FImage> sample,
// VFSGroupDataset<FImage> trainingImages,
PyramidDenseSIFT<FImage> pyramidDenseSIFT)
{
System.out.println("trainQuantiser: start");
Date start = new Date();
List<LocalFeatureList<ByteDSIFTKeypoint>> allKeys = new ArrayList<LocalFeatureList<ByteDSIFTKeypoint>>();
int i = 0;
int total = sample.numInstances();
// for (FImage image: sample) {
// ListDataset<FImage> images = trainingImages.get(key);
// total = images.size();
// break;
// }
for (FImage rec : sample) {
i++;
System.out.println(String.format("Analysing image %d out of %d", i, total));
FImage img = rec.getImage();
pyramidDenseSIFT.analyseImage(img);
allKeys.add(pyramidDenseSIFT.getByteKeypoints(0.005f));
}
final int numberOfDenseSiftFeaturesToExtract = 10000;
final int numberOfClassesInCluster = 300;
if (allKeys.size() > numberOfDenseSiftFeaturesToExtract)
allKeys = allKeys.subList(0, numberOfDenseSiftFeaturesToExtract);
ByteKMeans km = ByteKMeans.createKDTreeEnsemble(numberOfClassesInCluster);
DataSource<byte[]> dataSource = new LocalFeatureListDataSource<ByteDSIFTKeypoint, byte[]>(allKeys);
System.out.println(String.format(
"Clustering %d image features into %d classes...",
numberOfDenseSiftFeaturesToExtract, numberOfClassesInCluster));
ByteCentroidsResult result = km.cluster(dataSource);
Date end = new Date();
System.out.println("trainQuantiser: end");
System.out.println("trainQuantiser duration: " + (end.getTime() - start.getTime())/1000 + " seconds");
return result.defaultHardAssigner();
}
static class PHOWExtractor implements FeatureExtractor<DoubleFV, FImage> {
PyramidDenseSIFT<FImage> pdsift;
HardAssigner<byte[], float[], IntFloatPair> assigner;
public PHOWExtractor(PyramidDenseSIFT<FImage> pdsift, HardAssigner<byte[], float[], IntFloatPair> assigner)
{
this.pdsift = pdsift;
this.assigner = assigner;
}
public DoubleFV extractFeature(FImage object) {
FImage image = object.getImage();
pdsift.analyseImage(image);
BagOfVisualWords<byte[]> bovw = new BagOfVisualWords<byte[]>(assigner);
BlockSpatialAggregator<byte[], SparseIntFV> spatial = new BlockSpatialAggregator<byte[], SparseIntFV>(
bovw, 2, 2);
return spatial.aggregate(pdsift.getByteKeypoints(0.015f), image.getBounds()).normaliseFV();
}
}
}
例外:
Exception in thread "main" java.lang.reflect.InvocationTargetException
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:497)
at com.intellij.rt.execution.CommandLineWrapper.main(CommandLineWrapper.java:130)
Caused by: java.lang.NullPointerException
at org.openimaj.experiment.evaluation.classification.analysers.confusionmatrix.CMAnalyser$Strategy$1.add(CMAnalyser.java:80)
at org.openimaj.experiment.evaluation.classification.analysers.confusionmatrix.CMAnalyser.analyse(CMAnalyser.java:172)
at org.openimaj.experiment.evaluation.classification.analysers.confusionmatrix.CMAnalyser.analyse(CMAnalyser.java:57)
at org.openimaj.experiment.evaluation.classification.ClassificationEvaluator.analyse(ClassificationEvaluator.java:190)
at com.mycompany.video.analytics.Chapter12Generic.main(Chapter12Generic.java:113)
调用时发生异常
CMResult<String> result = eval.analyse(guesses);
任何想法如何解决这一问题?
根据@jon 的回答添加代码的第 3 版。现在的问题是它将虚假图像分类为真实。
public class Chapter12Generic_v3 {
// contains an accordion folder with images from caltech101
private static String TRAINING_IMAGES_PATH = "C:\\Development\\Video Analytics\\images";
// contains 1 airplane image from caltech101
private static String TEST_IMAGE = "C:\\Development\\Video Analytics\\testImages\\falseImages\\image_0001.jpg";
private static String TRAINER_DATA_FILE_PATH = "C:\\Development\\Video Analytics\\out\\trainer.dat";
public static void main(String[] args) throws Exception {
LiblinearAnnotator<FImage, String> trainer = null;
File inputDataFile = new File(TRAINER_DATA_FILE_PATH);
if (inputDataFile.isFile()) {
trainer = IOUtils.readFromFile(inputDataFile);
} else {
VFSGroupDataset<FImage> allData = null;
allData = new VFSGroupDataset<FImage>(
TRAINING_IMAGES_PATH,
ImageUtilities.FIMAGE_READER);
GroupedDataset<String, ListDataset<FImage>, FImage> data =
GroupSampler.sample(allData, 1, false);
GroupedRandomSplitter<String, FImage> splits =
new GroupedRandomSplitter<String, FImage>(data, 15, 0, 15); // 15 training, 15 testing
DenseSIFT denseSIFT = new DenseSIFT(5, 7);
PyramidDenseSIFT<FImage> pyramidDenseSIFT = new PyramidDenseSIFT<FImage>(denseSIFT, 6f, 7);
GroupedDataset<String, ListDataset<FImage>, FImage> sample =
GroupedUniformRandomisedSampler.sample(splits.getTrainingDataset(), 15);
HardAssigner<byte[], float[], IntFloatPair> assigner = trainQuantiser(sample, pyramidDenseSIFT);
FeatureExtractor<DoubleFV, FImage> extractor = new PHOWExtractor(pyramidDenseSIFT, assigner);
//
// Now we’re ready to construct and train a classifier
//
trainer = new LiblinearAnnotator<FImage, String>(
extractor, LiblinearAnnotator.Mode.MULTICLASS, SolverType.L2R_L2LOSS_SVC, 1.0, 0.00001);
Date start = new Date();
System.out.println("Classifier training: start");
trainer.train(splits.getTrainingDataset());
IOUtils.writeToFile(trainer, new File(TRAINER_DATA_FILE_PATH));
System.out.println("Classifier training: end");
Date end = new Date();
long durationSec = (end.getTime() - start.getTime()) / 1000;
System.out.println("Classifier training duration: " + durationSec + " seconds");
}
FImage query = ImageUtilities.readF(new File(TEST_IMAGE));
final List<ScoredAnnotation<String>> scoredAnnotations = trainer.annotate(query);
final ClassificationResult<String> classificationResult = trainer.classify(query);
System.out.println("scoredAnnotations: " + scoredAnnotations);
System.out.println("classificationResult: " + classificationResult);
}
/**
* This method extracts the first 10000 dense SIFT features from the images in the dataset, and then clusters them
* into 300 separate classes. The method then returns a HardAssigner which can be used to assign SIFT features to
* identifiers
*
* @param pyramidDenseSIFT
* @return
*/
static HardAssigner<byte[], float[], IntFloatPair> trainQuantiser(
Dataset<FImage> sample,
PyramidDenseSIFT<FImage> pyramidDenseSIFT)
{
System.out.println("trainQuantiser: start");
Date start = new Date();
List<LocalFeatureList<ByteDSIFTKeypoint>> allKeys = new ArrayList<LocalFeatureList<ByteDSIFTKeypoint>>();
int i = 0;
int total = sample.numInstances();
for (FImage rec : sample) {
i++;
System.out.println(String.format("Analysing image %d out of %d", i, total));
FImage img = rec.getImage();
pyramidDenseSIFT.analyseImage(img);
allKeys.add(pyramidDenseSIFT.getByteKeypoints(0.005f));
}
final int numberOfDenseSiftFeaturesToExtract = 10000;
final int numberOfClassesInCluster = 300;
if (allKeys.size() > numberOfDenseSiftFeaturesToExtract)
allKeys = allKeys.subList(0, numberOfDenseSiftFeaturesToExtract);
ByteKMeans km = ByteKMeans.createKDTreeEnsemble(numberOfClassesInCluster);
DataSource<byte[]> dataSource = new LocalFeatureListDataSource<ByteDSIFTKeypoint, byte[]>(allKeys);
System.out.println(String.format(
"Clustering %d image features into %d classes...",
numberOfDenseSiftFeaturesToExtract, numberOfClassesInCluster));
ByteCentroidsResult result = km.cluster(dataSource);
Date end = new Date();
System.out.println("trainQuantiser: end");
System.out.println("trainQuantiser duration: " + (end.getTime() - start.getTime())/1000 + " seconds");
return result.defaultHardAssigner();
}
static class PHOWExtractor implements FeatureExtractor<DoubleFV, FImage> {
PyramidDenseSIFT<FImage> pdsift;
HardAssigner<byte[], float[], IntFloatPair> assigner;
public PHOWExtractor(PyramidDenseSIFT<FImage> pdsift, HardAssigner<byte[], float[], IntFloatPair> assigner)
{
this.pdsift = pdsift;
this.assigner = assigner;
}
public DoubleFV extractFeature(FImage object) {
FImage image = object.getImage();
pdsift.analyseImage(image);
BagOfVisualWords<byte[]> bovw = new BagOfVisualWords<byte[]>(assigner);
BlockSpatialAggregator<byte[], SparseIntFV> spatial = new BlockSpatialAggregator<byte[], SparseIntFV>(
bovw, 2, 2);
return spatial.aggregate(pdsift.getByteKeypoints(0.015f), image.getBounds()).normaliseFV();
}
}
}