예제 #1
0
        public static void testLearningOneLayer()
        {
            Dimension2D inputDimension = new Dimension2D(5, 5);

            Dimension2D convolutionKernel = new Dimension2D(3, 3);

            ConvolutionalNetwork convolutionNet = (new ConvolutionalNetwork.Builder()).withInputLayer(5, 5, 1).withConvolutionLayer(3, 3, 2).withFullConnectedLayer(2).build();


            // CREATE DATA SET

            DataSet dataSet = new DataSet(25, 2);

            dataSet.addRow(new double[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, new double[] { 1, 0 });
            dataSet.addRow(new double[] { 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0 }, new double[] { 0, 1 });

            // TRAIN NETWORK

            convolutionNet.LearningRule.MaxError = 0.00001;
            convolutionNet.learn(dataSet);

            Console.WriteLine("Done training!");

            FeatureMapLayer featureMap1 = ((FeatureMapsLayer)convolutionNet.getLayerAt(1)).getFeatureMap(0);
            FeatureMapLayer featureMap2 = ((FeatureMapsLayer)convolutionNet.getLayerAt(1)).getFeatureMap(1);

            //        WeightVisualiser visualiser1 = new WeightVisualiser(featureMap1, convolutionKernel);
            //        visualiser1.displayWeights();
            //
            //        WeightVisualiser visualiser2 = new WeightVisualiser(featureMap2, convolutionKernel);
            //        visualiser2.displayWeights();


            // CREATE TEST SET

            DataSet testSet = new DataSet(25, 2);

            testSet.addRow(new double[] { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, new double[] { 1, 0 });
            testSet.addRow(new double[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 }, new double[] { 1, 0 });
            testSet.addRow(new double[] { 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0 }, new double[] { 0, 1 });
        }
예제 #2
0
        /// <param name="args"> Command line parameters used to initialize parameters of convolutional network
        ///             [0] - maximal number of epochs during learning
        ///             [1] - learning error stop condition
        ///             [2] - learning rate used during learning process
        ///             [3] - number of feature maps in 1st convolutional layer
        ///             [4] - number of feature maps in 2nd convolutional layer
        ///             [5] - number of feature maps in 3rd convolutional layer </param>
        public static void Main(string[] args)
        {
            try
            {
                int    maxIter      = 10000;         // Integer.parseInt(args[0]);
                double maxError     = 0.01;          //Double.parseDouble(args[1]);
                double learningRate = 0.2;           //  Double.parseDouble(args[2]);

                int layer1 = Convert.ToInt32(args[3]);
                int layer2 = Convert.ToInt32(args[4]);
                int layer3 = Convert.ToInt32(args[5]);

                LOG.info("{}-{}-{}", layer1, layer2, layer3);

                DataSet trainSet = MNISTDataSet.createFromFile(MNISTDataSet.TRAIN_LABEL_NAME, MNISTDataSet.TRAIN_IMAGE_NAME, 100);
                DataSet testSet  = MNISTDataSet.createFromFile(MNISTDataSet.TEST_LABEL_NAME, MNISTDataSet.TEST_IMAGE_NAME, 10000);

                Dimension2D inputDimension    = new Dimension2D(32, 32);
                Dimension2D convolutionKernel = new Dimension2D(5, 5);
                Dimension2D poolingKernel     = new Dimension2D(2, 2);

                ConvolutionalNetwork convolutionNetwork = (new ConvolutionalNetwork.Builder()).withInputLayer(32, 32, 1).withConvolutionLayer(5, 5, layer1).withPoolingLayer(2, 2).withConvolutionLayer(5, 5, layer2).withPoolingLayer(2, 2).withConvolutionLayer(5, 5, layer3).withFullConnectedLayer(10).build();

                ConvolutionalBackpropagation backPropagation = new ConvolutionalBackpropagation();
                backPropagation.LearningRate  = learningRate;
                backPropagation.MaxError      = maxError;
                backPropagation.MaxIterations = maxIter;
                backPropagation.addListener(new LearningListener(convolutionNetwork, testSet));
                backPropagation.ErrorFunction = new MeanSquaredError();

                convolutionNetwork.LearningRule = backPropagation;
                convolutionNetwork.learn(trainSet);

                Evaluation.runFullEvaluation(convolutionNetwork, testSet);
            }
            catch (IOException e)
            {
                Console.WriteLine(e.ToString());
                Console.Write(e.StackTrace);
            }
        }