Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master'
Browse files Browse the repository at this point in the history
  • Loading branch information
kottmann committed Oct 26, 2016
2 parents be6b4a9 + 7b0246f commit 3859e84
Show file tree
Hide file tree
Showing 64 changed files with 1,104 additions and 599 deletions.
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.4-rc3.9-SNAPSHOT
0.6.1-SNAPSHOT
8 changes: 0 additions & 8 deletions deeplearning4j-core/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.18.1</version>
<configuration>
<argLine>-Ddtype=double</argLine>
</configuration>
Expand Down Expand Up @@ -62,11 +61,6 @@
<version>${nd4j.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math3</artifactId>
<version>${commonsmath.version}</version>
</dependency>
</dependencies>
</dependencyManagement>

Expand All @@ -81,7 +75,6 @@
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>${logback.version}</version>
<scope>test</scope>
</dependency>

Expand All @@ -95,7 +88,6 @@
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math3</artifactId>
<version>${commonsmath.version}</version>
</dependency>
<dependency>
<groupId>commons-io</groupId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ public static void writeImageToPpm(int[][] image, String ppmFileName) throws IOE
public MnistManager(String imagesFile, String labelsFile, boolean train) throws IOException {
if (imagesFile != null) {
images = new MnistImageFile(imagesFile, "r");
if(train) imagesArr = new MnistImageFile(imagesFile, "r").readImagesUnsafe(MnistDataFetcher.NUM_EXAMPLES);
if(train) imagesArr = images.readImagesUnsafe(MnistDataFetcher.NUM_EXAMPLES);
else imagesArr = images.readImagesUnsafe(MnistDataFetcher.NUM_EXAMPLES_TEST);
}
if (labelsFile != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ public void testCifarDataSetIteratorReset() {

MultiLayerNetwork net = new MultiLayerNetwork(conf);
net.init();
net.setListeners(Arrays.asList((IterationListener) new ScoreIterationListener(1)));
net.setListeners(new ScoreIterationListener(1));

MultipleEpochsIterator ds = new MultipleEpochsIterator(epochs, new CifarDataSetIterator(10,20, new int[]{20,20,1}));
net.fit(ds);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -652,14 +652,16 @@ public void testEvaluationWithMetaData() throws Exception {

List<Prediction> errors = e.getPredictionErrors(); //*** New - get list of prediction errors from evaluation ***
List<RecordMetaData> metaForErrors = new ArrayList<>();
for(Prediction p : errors) metaForErrors.add(p.getRecordMetaData());
for(Prediction p : errors){
metaForErrors.add((RecordMetaData)p.getRecordMetaData());
}
DataSet ds = rrdsi.loadFromMetaData(metaForErrors); //*** New - dynamically load a subset of the data, just for prediction errors ***
INDArray output = net.output(ds.getFeatures());

int count = 0;
for(Prediction t : errors){
System.out.println(t
+ "\t\tRaw Data: " + csv.loadFromMetaData(t.getRecordMetaData()).getRecord() //*** New - load subset of data from MetaData object (usually batched for efficiency) ***
+ "\t\tRaw Data: " + csv.loadFromMetaData((RecordMetaData)t.getRecordMetaData()).getRecord() //*** New - load subset of data from MetaData object (usually batched for efficiency) ***
+ "\tNormalized: " + ds.getFeatureMatrix().getRow(count) + "\tLabels: " + ds.getLabels().getRow(count)
+ "\tNetwork predictions: " + output.getRow(count));
count++;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import org.deeplearning4j.nn.conf.MultiLayerConfiguration;
import org.deeplearning4j.nn.conf.NeuralNetConfiguration;
import org.deeplearning4j.nn.conf.Updater;
import org.deeplearning4j.nn.conf.distribution.NormalDistribution;
import org.deeplearning4j.nn.conf.inputs.InputType;
import org.deeplearning4j.nn.conf.layers.ConvolutionLayer;
import org.deeplearning4j.nn.conf.layers.OutputLayer;
Expand Down Expand Up @@ -239,6 +240,7 @@ public void testCnnWithSubsampling(){
.regularization(false)
.learningRate(1.0)
.updater(Updater.SGD)
.weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0,1))
.list()
.layer(0, new ConvolutionLayer.Builder(kernel, stride, padding)
.nIn(inputDepth).nOut(3)
Expand Down Expand Up @@ -299,6 +301,7 @@ public void testCnnWithSubsamplingV2(){
.regularization(false)
.learningRate(1.0)
.updater(Updater.SGD)
.weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0,1))
.list()
.layer(0, new ConvolutionLayer.Builder(kernel, stride, padding)
.nIn(inputDepth).nOut(3)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -616,13 +616,13 @@ public void testGradientCnnFfRnn() {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.updater(Updater.NONE)
.seed(12345)
.weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0,1))
.list()
.layer(0, new ConvolutionLayer.Builder(5, 5)
.nIn(3)
.nOut(5)
.stride(1, 1)
.activation("tanh")
.weightInit(WeightInit.XAVIER)
.build()) //Out: (10-5)/1+1 = 6 -> 6x6x5
.layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX)
.kernelSize(2, 2)
Expand All @@ -631,13 +631,11 @@ public void testGradientCnnFfRnn() {
.layer(2, new DenseLayer.Builder()
.nIn(5 * 5 * 5)
.nOut(4)
.weightInit(WeightInit.XAVIER)
.activation("tanh")
.build())
.layer(3, new GravesLSTM.Builder()
.nIn(4)
.nOut(3)
.weightInit(WeightInit.XAVIER)
.activation("tanh")
.build())
.layer(4, new RnnOutputLayer.Builder()
Expand Down Expand Up @@ -797,7 +795,7 @@ public void testAutoEncoder() {
.l2(l2).l1(l1)
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
.seed(12345L)
.weightInit(WeightInit.XAVIER)
.weightInit(WeightInit.DISTRIBUTION).dist(new NormalDistribution(0,1))
.updater(Updater.SGD)
.list()
.layer(0, new AutoEncoder.Builder()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -470,17 +470,17 @@ public void testPreTraining(){
.nIn(4).nOut(3)
.weightInit(WeightInit.DISTRIBUTION).dist(new UniformDistribution(0, 1))
.activation("tanh")
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build(), "in")
.lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build(), "in")
.addLayer("layer1", new RBM.Builder(RBM.HiddenUnit.GAUSSIAN, RBM.VisibleUnit.GAUSSIAN)
.nIn(4).nOut(3)
.weightInit(WeightInit.DISTRIBUTION).dist(new UniformDistribution(0, 1))
.activation("tanh")
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build(), "in")
.lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build(), "in")
.addLayer("layer2", new RBM.Builder(RBM.HiddenUnit.GAUSSIAN, RBM.VisibleUnit.GAUSSIAN)
.nIn(3).nOut(3)
.weightInit(WeightInit.DISTRIBUTION).dist(new UniformDistribution(0, 1))
.activation("tanh")
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build(),"layer1")
.lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build(),"layer1")
.addLayer("out", new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.nIn(3+3).nOut(3)
.weightInit(WeightInit.DISTRIBUTION).dist(new UniformDistribution(0, 1))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ public void testLfw() throws Exception {
.layer(new org.deeplearning4j.nn.conf.layers.RBM.Builder(org.deeplearning4j.nn.conf.layers.RBM.HiddenUnit.RECTIFIED, org.deeplearning4j.nn.conf.layers.RBM.VisibleUnit.GAUSSIAN)
.nIn(d.numInputs()).nOut(nOut)
.weightInit(WeightInit.VI)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT)
.lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE)
.build())
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.learningRate(1e-3f)
Expand All @@ -105,7 +105,7 @@ public void testIrisGaussianHidden() {
.layer(new org.deeplearning4j.nn.conf.layers.RBM.Builder(
org.deeplearning4j.nn.conf.layers.RBM.HiddenUnit.GAUSSIAN, org.deeplearning4j.nn.conf.layers.RBM.VisibleUnit.GAUSSIAN)
.nIn(d.numInputs()).nOut(3)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.build();

int numParams = conf.getLayer().initializer().numParams(conf,true);
Expand All @@ -127,7 +127,7 @@ public void testIris() {
.learningRate(1e-1f)
.layer(new org.deeplearning4j.nn.conf.layers.RBM.Builder(org.deeplearning4j.nn.conf.layers.RBM.HiddenUnit.RECTIFIED, org.deeplearning4j.nn.conf.layers.RBM.VisibleUnit.GAUSSIAN)
.nIn(d.numInputs()).nOut(3)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.build();

int numParams = conf.getLayer().initializer().numParams(conf,true);
Expand Down Expand Up @@ -157,7 +157,7 @@ public void testBasic() {
.learningRate(1e-1f)
.layer(new org.deeplearning4j.nn.conf.layers.RBM.Builder()
.nIn(6).nOut(4)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.build();

int numParams = conf.getLayer().initializer().numParams(conf,true);
Expand Down Expand Up @@ -208,7 +208,7 @@ public void testSetGetParams() {
.learningRate(1e-1f)
.layer(new org.deeplearning4j.nn.conf.layers.RBM.Builder()
.nIn(6).nOut(4)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.build();

int numParams = conf.getLayer().initializer().numParams(conf,true);
Expand Down Expand Up @@ -241,7 +241,7 @@ public void testCg() {
.learningRate(1e-1f)
.layer(new org.deeplearning4j.nn.conf.layers.RBM.Builder()
.nIn(6).nOut(4)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.build();

int numParams = conf.getLayer().initializer().numParams(conf,true);
Expand Down Expand Up @@ -276,7 +276,7 @@ public void testGradient() {
.learningRate(1e-1f)
.layer(new org.deeplearning4j.nn.conf.layers.RBM.Builder()
.nIn(6).nOut(4)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.build();

int numParams = conf.getLayer().initializer().numParams(conf,true);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ public void testDbn() throws Exception {
.nIn(4).nOut(3)
.weightInit(WeightInit.DISTRIBUTION).dist(new UniformDistribution(0, 1))
.activation("tanh")
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.nIn(3).nOut(3)
.weightInit(WeightInit.DISTRIBUTION).dist(new UniformDistribution(0, 1))
Expand Down Expand Up @@ -655,6 +655,7 @@ public void testPredict() throws Exception{
.layer(0, new DenseLayer.Builder().nIn(400).nOut(50).activation("relu").build())
.layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).activation("softmax").nIn(50).nOut(10).build())
.pretrain(false).backprop(true)
.setInputType(InputType.convolutional(20,20,1))
.build();

MultiLayerNetwork net = new MultiLayerNetwork(conf);
Expand Down Expand Up @@ -698,6 +699,7 @@ public void testOutput() throws Exception{
.layer(0, new DenseLayer.Builder().nIn(400).nOut(50).activation("relu").build())
.layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).activation("softmax").nIn(50).nOut(10).build())
.pretrain(false).backprop(true)
.setInputType(InputType.convolutional(20,20,1))
.build();

MultiLayerNetwork net = new MultiLayerNetwork(conf);
Expand Down
Loading

0 comments on commit 3859e84

Please sign in to comment.