我正在处理一些固定长度的时间序列数据。此数据的格式为 headers #0-->time series trace#0-->header#1-->time series trace #1 -->..->header#P-->time series trace #P。通过使用一些旨在处理此类数据的 API,我终于能够将数据转换为形状为 [P, length of time series] 的 INDArray。如果我的目标是从数据中去除噪音,我应该使用什么作为 DNN?到目前为止,我一直在使用堆叠去噪自动编码器,但不确定我实现它的方式是否正确。
INDArray sinput=Nd4j.zeros(n1,n2); //creating a 2D matrix of rows=samples, columns=traceNumbers
float[] data = new float[n1]; //for the length of n1 samples
for(int j=0;j<n2;j++){ //for each trace
input.read(data);
for(int k=0;k< n1;k++){
System.out.println("reading "+k+","+j+" value: "+data[k] );
sinput.putScalar(new int[]{k,j},data[k]);
}
}
Activation act=Activation.RELU;
LearningRatePolicy lrPol=LearningRatePolicy.None;
MultiLayerConfiguration configuration= new NeuralNetConfiguration.Builder()
.seed(13)
.iterations(20000)
.learningRate(0.003)
.learningRateDecayPolicy(lrPol)
.lrPolicyDecayRate(1e-5)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.l1(1e-5)
.regularization(true)
.l2(1e-3)
.list()
.layer(0,new AutoEncoder.Builder().nIn(n2).nOut(FIRST_HIDDEN_LAYER_WIDTH).activation(act).corruptionLevel(0.3).weightInit(WeightInit.XAVIER).build())
.layer(1,new AutoEncoder.Builder().nIn(FIRST_HIDDEN_LAYER_WIDTH).nOut(SECOND_HIDDEN_LAYER_WIDTH).activation(act).weightInit(WeightInit.XAVIER).build())
.layer(2,new AutoEncoder.Builder().nIn(SECOND_HIDDEN_LAYER_WIDTH).nOut(THIRD_HIDDEN_LAYER_WIDTH).activation(act).weightInit(WeightInit.XAVIER).build())
.layer(3,new AutoEncoder.Builder().nIn(THIRD_HIDDEN_LAYER_WIDTH).nOut(FOURTH_HIDDEN_LAYER_WIDTH).activation(act).weightInit(WeightInit.XAVIER).build())
.layer(4,new AutoEncoder.Builder().nIn(FOURTH_HIDDEN_LAYER_WIDTH).nOut(THIRD_HIDDEN_LAYER_WIDTH).activation(act).weightInit(WeightInit.XAVIER).corruptionLevel(0.3).build())
.layer(5,new AutoEncoder.Builder().nIn(THIRD_HIDDEN_LAYER_WIDTH).nOut(SECOND_HIDDEN_LAYER_WIDTH).activation(act).weightInit(WeightInit.XAVIER).corruptionLevel(0.3).build())
.layer(6,new AutoEncoder.Builder().nIn(SECOND_HIDDEN_LAYER_WIDTH).nOut(FIRST_HIDDEN_LAYER_WIDTH).activation(act).weightInit(WeightInit.XAVIER).corruptionLevel(0.3).build())
.layer(7,new AutoEncoder.Builder().nIn(FIRST_HIDDEN_LAYER_WIDTH).nOut(n2).activation(act).weightInit(WeightInit.XAVIER).corruptionLevel(0.3).build())
// .layer(1,new AutoEncoder.Builder().nIn(SECOND_HIDDEN_LAYER_WIDTH).nOut(SECOND_HIDDEN_LAYER_WIDTH).activation(Activation.SIGMOID).corruptionLevel(0.3)momentum(0.3)..build())
.layer(8,new OutputLayer.Builder(LossFunctions.LossFunction.SQUARED_LOSS).activation(Activation.IDENTITY).nIn(n2).nOut(n2).build())
.pretrain(false).backprop(true)
.build();
MultiLayerNetwork network = new MultiLayerNetwork(configuration);
network.init();
DataSet dataSet=new DataSet(sinput,sinput);
network.init();
network.fit(new DataSet(dataSet.getFeatureMatrix(), dataSet.getFeatureMatrix())); //training
//network.fit(seisInput,seisInput);
System.out.println("Layer 0 has "+network.getLayer(0).numParams()+" and Params: ");
System.out.println(network.getLayer(0).params());
System.out.println();
//DataSet testDataSet=new DataSet(sinput,sinput);
// DataSetIterator iterator=new INDArrayDataSetIterator()
INDArray outputFromNet=network.output(testDataSet.getFeatureMatrix());
INDArray reconstructedFromNet=network.reconstruct(outputFromNet,1);