public void MulticlassTest1()
        {
            Accord.Math.Tools.SetupGenerator(0);
            Neuron.RandGenerator = new ThreadSafeRandom(0);


            int numberOfInputs = 3;
            int numberOfClasses = 4;
            int hiddenNeurons = 5;

            double[][] input = 
            {
                new double[] { -1, -1, -1 }, // 0
                new double[] { -1,  1, -1 }, // 1
                new double[] {  1, -1, -1 }, // 1
                new double[] {  1,  1, -1 }, // 0
                new double[] { -1, -1,  1 }, // 2
                new double[] { -1,  1,  1 }, // 3
                new double[] {  1, -1,  1 }, // 3
                new double[] {  1,  1,  1 }  // 2
            };

            int[] labels =
            {
                0,
                1,
                1,
                0,
                2,
                3,
                3,
                2,
            };

            double[][] outputs = Accord.Statistics.Tools
                .Expand(labels, numberOfClasses, -1, 1);

            var function = new BipolarSigmoidFunction(2);
            var network = new ActivationNetwork(function,
                numberOfInputs, hiddenNeurons, numberOfClasses);

            new NguyenWidrow(network).Randomize();

            var teacher = new LevenbergMarquardtLearning(network);

            double error = Double.PositiveInfinity;
            for (int i = 0; i < 10; i++)
                error = teacher.RunEpoch(input, outputs);

            for (int i = 0; i < input.Length; i++)
            {
                int answer;
                double[] output = network.Compute(input[i]);
                double response = output.Max(out answer);

                int expected = labels[i];
                Assert.AreEqual(expected, answer);
            }
        }
Example #2
0
        //---------------------------------------------


        #region Public Methods
        public NetworkContainer CreateNetworkContainer()
        {
            NetworkContainer neuralNetwork = null;

            int[] hiddenLayers = new int[cbHiddenLayerNumber.SelectedIndex];

            switch (hiddenLayers.Length)
            {
            case 0:
                break;

            case 1:
                hiddenLayers[0] = (int)this.nHidden1.Value;
                break;

            case 2:
                hiddenLayers[1] = (int)this.nHidden2.Value;
                goto case 1;

            case 3:
                hiddenLayers[2] = (int)this.nHidden3.Value;
                goto case 2;

            case 4:
                hiddenLayers[3] = (int)this.nHidden4.Value;
                goto case 3;

            default:
                break;
            }

            IActivationFunction activationFunction = null;

            if (this.rbBipolarSigmoid.Checked)
            {
                activationFunction = new BipolarSigmoidFunction((double)numSigmoidAlpha.Value);
            }
            else if (this.rbSigmoid.Checked)
            {
                activationFunction = new SigmoidFunction((double)numSigmoidAlpha.Value);
            }
            else if (this.rbThreshold.Checked)
            {
                activationFunction = new ThresholdFunction();
            }

            neuralNetwork = new NetworkContainer(
                tbNetworkName.Text,
                m_networkSchema,
                activationFunction,
                hiddenLayers);


            //         neuralNetwork.Schema.DataRanges.ActivationFunctionRange = new AForge.DoubleRange((double)numRangeLow.Value, (double)numRangeHigh.Value);


            return(neuralNetwork);
        }
        public ResilientBackpropagationLearningNeuralNetwork(double learningRate, double sigmoidAlphaValue, int inputCount, params int[] neuronCounts)
            : this(learningRate, sigmoidAlphaValue)
        {
            _layerCounts = new[] { inputCount }.Concat(neuronCounts).ToArray();
            var activationFunction = new BipolarSigmoidFunction(_sigmoidAlphaValue);

            _network = new ActivationNetwork(activationFunction, inputCount, neuronCounts);
            _network.Randomize();
        }
        public EvolutionaryLearningNeuralNetwork(int populationSize, double sigmoidAlphaValue, int inputCount, int neuronCounts)
            : this(populationSize, sigmoidAlphaValue)
        {
            _layerCounts = new[] { inputCount, neuronCounts };
            var activationFunction = new BipolarSigmoidFunction(_sigmoidAlphaValue);

            _network = new ActivationNetwork(activationFunction, inputCount, neuronCounts);
            _network.Randomize();
        }
        public PerceptronLearningNeuralNetwork(double learningRate, double sigmoidAlphaValue, int inputCount, int neuronCounts)
            : this(learningRate, sigmoidAlphaValue)
        {
            _layerCounts = new[] { inputCount, neuronCounts };
            var activationFunction = new BipolarSigmoidFunction(_sigmoidAlphaValue);

            _network = new ActivationNetwork(activationFunction, inputCount, neuronCounts);
            _network.Randomize();
        }
Example #6
0
        public void TestName()
        {
            IActivationFunction function = new BipolarSigmoidFunction();
            var network = new ActivationNetwork(function,
                                                inputsCount: 2, neuronsCount: new[] { 2, 1 });

            var layer   = network.Layers[0] as ActivationLayer;
            var teacher = new PerceptronLearning(network);

            teacher.LearningRate = 0.1;

            var input = new double[4][];

            input[0] = new[] { 0d, 0d };
            input[1] = new[] { 0d, 1d };
            input[2] = new[] { 1d, 0d };
            input[3] = new[] { 1d, 1d };

            var output = new double[4][];

            output[0] = new[] { 0d };
            output[1] = new[] { 0d };
            output[2] = new[] { 0d };
            output[3] = new[] { 1d };



            //// Iterate until stop criteria is met
            double error = double.PositiveInfinity;
            double previous;

            var needToStop = false;

            while (!needToStop)
            {
            }

            do
            {
                previous = error;

                //    // Compute one learning iteration
                error = teacher.RunEpoch(input, output);
            } while (Math.Abs(previous - error) > 0.01);

            int[] answers = input.Apply(network.Compute).GetColumn(0).Apply(System.Math.Sign);

            //https://github.com/accord-net/framework/blob/development/Samples/Neuro/Perceptron/Applications/OneLayerPerceptron.cs
        }
Example #7
0
        private static void network(double[][] inputs, int[] outputs)
        {
            // Since we would like to learn binary outputs in the form
            // [-1,+1], we can use a bipolar sigmoid activation function
            IActivationFunction function = new BipolarSigmoidFunction();

            // In our problem, we have 2 inputs (x, y pairs), and we will
            // be creating a network with 5 hidden neurons and 1 output:
            //
            var network = new ActivationNetwork(function,
                                                inputsCount: 2, neuronsCount: new[] { 5, 1 });

            // Create a Levenberg-Marquardt algorithm
            var teacher = new LevenbergMarquardtLearning(network)
            {
                UseRegularization = true
            };


            // Because the network is expecting multiple outputs,
            // we have to convert our single variable into arrays
            //
            var y = outputs.ToDouble().ToJagged();

            // Iterate until stop criteria is met
            double error = double.PositiveInfinity;
            double previous;

            do
            {
                previous = error;

                // Compute one learning iteration
                error = teacher.RunEpoch(inputs, y);
            } while (Math.Abs(previous - error) < 1e-10 * previous);


            // Classify the samples using the model
            int[] answers = inputs.Apply(network.Compute).GetColumn(0).Apply(System.Math.Sign);

            // Plot the results
            ScatterplotBox.Show("Expected results", inputs, outputs);
            ScatterplotBox.Show("Network results", inputs, answers)
            .Hold();
        }
        public void Learn()
        {
            const int hiddenNeurons   = 5;
            var       numberOfInputs  = GetControllerOutputProperties().Length;
            var       numberOfClasses = Enum.GetNames(typeof(OutputClass)).Length;

            var outputs = Accord.Statistics.Tools.Expand(GetOutputs(), numberOfClasses, -1, 1);
            var inputs  = GetLearnInputs();

            var activationFunction = new BipolarSigmoidFunction(2);

            NeuralNetwork = new ActivationNetwork(activationFunction, numberOfInputs, hiddenNeurons, numberOfClasses);

            new NguyenWidrow(NeuralNetwork).Randomize();

            var teacher = new LevenbergMarquardtLearning(NeuralNetwork);

            for (var i = 0; i < 10; i++)
            {
                teacher.RunEpoch(inputs, outputs);
            }
        }
        private static void BuildNNModel(double[][] trainInput, int[] trainOutput, double[][] testInput, int[] testOutput)
        {
            double[][] outputs = Accord.Math.Jagged.OneHot(trainOutput);

            var function = new BipolarSigmoidFunction(2);
            var network  = new ActivationNetwork(
                new BipolarSigmoidFunction(2),
                91,
                20,
                10
                );

            var teacher = new LevenbergMarquardtLearning(network);

            Console.WriteLine("\n-- Training Neural Network");
            int    numEpoch = 10;
            double error    = Double.PositiveInfinity;

            for (int i = 0; i < numEpoch; i++)
            {
                error = teacher.RunEpoch(trainInput, outputs);
                Console.WriteLine("* Epoch {0} - error: {1:0.0000}", i + 1, error);
            }
            Console.WriteLine("");

            List <int> inSamplePredsList = new List <int>();

            for (int i = 0; i < trainInput.Length; i++)
            {
                double[] output = network.Compute(trainInput[i]);
                int      pred   = output.ToList().IndexOf(output.Max());
                inSamplePredsList.Add(pred);
            }

            List <int> outSamplePredsList = new List <int>();

            for (int i = 0; i < testInput.Length; i++)
            {
                double[] output = network.Compute(testInput[i]);
                int      pred   = output.ToList().IndexOf(output.Max());
                outSamplePredsList.Add(pred);
            }

            int[] inSamplePreds  = inSamplePredsList.ToArray();
            int[] outSamplePreds = outSamplePredsList.ToArray();

            // Accuracy
            double inSampleAccuracy  = 1 - new ZeroOneLoss(trainOutput).Loss(inSamplePreds);
            double outSampleAccuracy = 1 - new ZeroOneLoss(testOutput).Loss(outSamplePreds);

            Console.WriteLine("* In-Sample Accuracy: {0:0.0000}", inSampleAccuracy);
            Console.WriteLine("* Out-of-Sample Accuracy: {0:0.0000}", outSampleAccuracy);

            // Build confusion matrix
            int[][] confMatrix = BuildConfusionMatrix(
                testOutput, outSamplePreds, 10
                );
            System.IO.File.WriteAllLines(
                Path.Combine(
                    @"\\Mac\Home\Documents\c-sharp-machine-learning\ch.8\input-data",
                    "nn-conf-matrix.csv"
                    ),
                confMatrix.Select(x => String.Join(",", x))
                );

            // Precision Recall
            PrintPrecisionRecall(confMatrix);
            DrawROCCurve(testOutput, outSamplePreds, 10, "NN");
        }
        public void MulticlassTest1()
        {
            Accord.Math.Tools.SetupGenerator(0);

            // Suppose we would like to teach a network to recognize 
            // the following input vectors into 3 possible classes:
            //
            double[][] inputs =
            {
                new double[] { 0, 1, 1, 0 }, // 0
                new double[] { 0, 1, 0, 0 }, // 0
                new double[] { 0, 0, 1, 0 }, // 0
                new double[] { 0, 1, 1, 0 }, // 0
                new double[] { 0, 1, 0, 0 }, // 0
                new double[] { 1, 0, 0, 0 }, // 1
                new double[] { 1, 0, 0, 0 }, // 1
                new double[] { 1, 0, 0, 1 }, // 1
                new double[] { 0, 0, 0, 1 }, // 1
                new double[] { 0, 0, 0, 1 }, // 1
                new double[] { 1, 1, 1, 1 }, // 2
                new double[] { 1, 0, 1, 1 }, // 2
                new double[] { 1, 1, 0, 1 }, // 2
                new double[] { 0, 1, 1, 1 }, // 2
                new double[] { 1, 1, 1, 1 }, // 2
            };

            int[] classes =
            {
                0, 0, 0, 0, 0,
                1, 1, 1, 1, 1,
                2, 2, 2, 2, 2,
            };

            // First we have to convert this problem into a way that  the neural
            // network can handle. The first step is to expand the classes into 
            // indicator vectors, where a 1 into a position signifies that this
            // position indicates the class the sample belongs to.
            //
            double[][] outputs = Accord.Statistics.Tools.Expand(classes, -1, +1);

            // Create an activation function for the net
            var function = new BipolarSigmoidFunction();

            // Create an activation network with the function and
            //  4 inputs, 5 hidden neurons and 3 possible outputs:
            var network = new ActivationNetwork(function, 4, 5, 3);

            // Randomly initialize the network
            new NguyenWidrow(network).Randomize();

            // Teach the network using parallel Rprop:
            var teacher = new ParallelResilientBackpropagationLearning(network);

            double error = 1.0;
            while (error > 1e-5)
                error = teacher.RunEpoch(inputs, outputs);


            // Checks if the network has learned
            for (int i = 0; i < inputs.Length; i++)
            {
                double[] answer = network.Compute(inputs[i]);

                int expected = classes[i];
                int actual; answer.Max(out actual);

                Assert.AreEqual(expected, actual, 0.01);
            }
        }
Example #11
0
        static void TestAnn2()
        {
            // Here we will be creating a neural network to process 3-valued input
            // vectors and classify them into 4-possible classes. We will be using
            // a single hidden layer with 5 hidden neurons to accomplish this task.
            //
            int numberOfInputs  = 3;
            int numberOfClasses = 4;
            int hiddenNeurons   = 5;

            // Those are the input vectors and their expected class labels
            // that we expect our network to learn.
            //
            double[][] input =
            {
                new double[] { -1, -1, -1 }, // 0
                new double[] { -1,  1, -1 }, // 1
                new double[] {  1, -1, -1 }, // 1
                new double[] {  1,  1, -1 }, // 0
                new double[] { -1, -1,  1 }, // 2
                new double[] { -1,  1,  1 }, // 3
                new double[] {  1, -1,  1 }, // 3
                new double[] {  1,  1,  1 } // 2
            };

            int[] labels = { 0, 1, 1, 0, 2, 3, 3, 2 };

            // In order to perform multi-class classification, we have to select a
            // decision strategy in order to be able to interpret neural network
            // outputs as labels. For this, we will be expanding our 4 possible class
            // labels into 4-dimensional output vectors where one single dimension
            // corresponding to a label will contain the value +1 and -1 otherwise.

            double[][] outputs = Accord.Statistics.Tools.Expand(labels, numberOfClasses, -1, 1);

            // Next we can proceed to create our network
            var function = new BipolarSigmoidFunction(2);
            var network  = new ActivationNetwork(function, numberOfInputs, hiddenNeurons, numberOfClasses);

            // Heuristically randomize the network
            new NguyenWidrow(network).Randomize();

            // Create the learning algorithm
            var teacher = new LevenbergMarquardtLearning(network);

            // Teach the network for 10 iterations:
            double error = Double.PositiveInfinity;

            for (int i = 0; i < 10; i++)
            {
                error = teacher.RunEpoch(input, outputs);
            }

            // At this point, the network should be able to
            // perfectly classify the training input points.

            for (int i = 0; i < input.Length; i++)
            {
                int      answer;
                double[] output = network.Compute(input[i]);
                //double response = output.Max(out answer);

                int expected = labels[i];

                // at this point, the variables 'answer' and
                // 'expected' should contain the same value.
            }
        }
Example #12
0
        public void MulticlassTest1()
        {
            Accord.Math.Tools.SetupGenerator(0);
            // Neuron.RandGenerator = new ThreadSafeRandom(0);


            int numberOfInputs  = 3;
            int numberOfClasses = 4;
            int hiddenNeurons   = 5;

            double[][] input =
            {
                new double[] { -1, -1, -1 }, // 0
                new double[] { -1,  1, -1 }, // 1
                new double[] {  1, -1, -1 }, // 1
                new double[] {  1,  1, -1 }, // 0
                new double[] { -1, -1,  1 }, // 2
                new double[] { -1,  1,  1 }, // 3
                new double[] {  1, -1,  1 }, // 3
                new double[] {  1,  1,  1 }  // 2
            };

            int[] labels =
            {
                0,
                1,
                1,
                0,
                2,
                3,
                3,
                2,
            };

            double[][] outputs = Accord.Statistics.Tools
                                 .Expand(labels, numberOfClasses, -1, 1);

            var function = new BipolarSigmoidFunction(2);
            var network  = new ActivationNetwork(function,
                                                 numberOfInputs, hiddenNeurons, numberOfClasses);

            new NguyenWidrow(network).Randomize();

            var teacher = new LevenbergMarquardtLearning(network);

            double error = Double.PositiveInfinity;

            for (int i = 0; i < 10; i++)
            {
                error = teacher.RunEpoch(input, outputs);
            }

            for (int i = 0; i < input.Length; i++)
            {
                int      answer;
                double[] output   = network.Compute(input[i]);
                double   response = output.Max(out answer);

                int expected = labels[i];
                Assert.AreEqual(expected, answer);
            }
        }
        public void MulticlassTest1()
        {
            Accord.Math.Tools.SetupGenerator(0);

            // Suppose we would like to teach a network to recognize
            // the following input vectors into 3 possible classes:
            //
            double[][] inputs =
            {
                new double[] { 0, 1, 1, 0 }, // 0
                new double[] { 0, 1, 0, 0 }, // 0
                new double[] { 0, 0, 1, 0 }, // 0
                new double[] { 0, 1, 1, 0 }, // 0
                new double[] { 0, 1, 0, 0 }, // 0
                new double[] { 1, 0, 0, 0 }, // 1
                new double[] { 1, 0, 0, 0 }, // 1
                new double[] { 1, 0, 0, 1 }, // 1
                new double[] { 0, 0, 0, 1 }, // 1
                new double[] { 0, 0, 0, 1 }, // 1
                new double[] { 1, 1, 1, 1 }, // 2
                new double[] { 1, 0, 1, 1 }, // 2
                new double[] { 1, 1, 0, 1 }, // 2
                new double[] { 0, 1, 1, 1 }, // 2
                new double[] { 1, 1, 1, 1 }, // 2
            };

            int[] classes =
            {
                0, 0, 0, 0, 0,
                1, 1, 1, 1, 1,
                2, 2, 2, 2, 2,
            };

            // First we have to convert this problem into a way that  the neural
            // network can handle. The first step is to expand the classes into
            // indicator vectors, where a 1 into a position signifies that this
            // position indicates the class the sample belongs to.
            //
            double[][] outputs = Statistics.Tools.Expand(classes, -1, +1);

            // Create an activation function for the net
            var function = new BipolarSigmoidFunction();

            // Create an activation network with the function and
            //  4 inputs, 5 hidden neurons and 3 possible outputs:
            var network = new ActivationNetwork(function, 4, 5, 3);

            // Randomly initialize the network
            new NguyenWidrow(network).Randomize();

            // Teach the network using parallel Rprop:
            var teacher = new ParallelResilientBackpropagationLearning(network);

            double error = 1.0;

            while (error > 1e-5)
            {
                error = teacher.RunEpoch(inputs, outputs);
            }


            // Checks if the network has learned
            for (int i = 0; i < inputs.Length; i++)
            {
                double[] answer = network.Compute(inputs[i]);

                int expected = classes[i];
                int actual; answer.Max(out actual);

                Assert.AreEqual(expected, actual, 0.01);
            }
        }
Example #14
0
        public void Train(IForecastingDataSets datasets)
        {
            NeedToStop = false;
            if (ModelStartRunning != null)
            {
                ModelStartRunning(this, new ComponentRunEventArgs(datasets));
            }
            IActivationFunction actFunc = null;
            double sigmoidAlphaValue    = mAnnModelParameter.SigmoidAlphaValue;

            if (mAnnModelParameter.HiddenActivationFunction == fann_activationfunc_enum.FANN_SIGMOID_SYMMETRIC)
            {
                actFunc = new BipolarSigmoidFunction(sigmoidAlphaValue);
            }
            else if (mAnnModelParameter.HiddenActivationFunction == fann_activationfunc_enum.FANN_SIGMOID)
            {
                actFunc = new SigmoidFunction(sigmoidAlphaValue);
            }
            else if (mAnnModelParameter.HiddenActivationFunction == fann_activationfunc_enum.FANN_THRESHOLD)
            {
                actFunc = new ThresholdFunction();
            }
            else
            {
                actFunc = new BipolarSigmoidFunction(sigmoidAlphaValue);
            }

            mAnnModelParameter.InputNeuronCount  = datasets.InputData[0].Length;
            mAnnModelParameter.OutputNeuronCount = datasets.OutputData[0].Length;
            int inputsCount  = mAnnModelParameter.InputNeuronCount;
            int outputsCount = mAnnModelParameter.OutputNeuronCount;

            // mAnnModelParameter.HiddenNeuronsCount = new int[1];
            //      mAnnModelParameter.HiddenNeuronsCount[0] = datasets.InputData[0].Length * 2 + 1;
            mAnnModelParameter.HiddenCount = 1;

            int[] neuronsCount = new int[mAnnModelParameter.HiddenNeuronsCount.Length + 1];
            for (int i = 0; i < mAnnModelParameter.HiddenNeuronsCount.Length; i++)
            {
                neuronsCount[i] = mAnnModelParameter.HiddenNeuronsCount[i];
            }
            neuronsCount[mAnnModelParameter.HiddenNeuronsCount.Length] = outputsCount;

            mNetwork = new ActivationNetwork(actFunc, inputsCount, neuronsCount);
            BackPropagationLearning teacher = new BackPropagationLearning(mNetwork);
            ActivationLayer         layer   = mNetwork[0];

            teacher.LearningRate = mAnnModelParameter.LearningRate;
            teacher.Momentum     = mAnnModelParameter.LearningMomentum;

            List <double> arError      = new List <double>();
            int           solutionSize = datasets.InputData.Length;

            datasets.ForecastedData = new double[solutionSize][];
            int iteration = 1;

            while (!mNeedToStop)
            {
                double error = teacher.RunEpoch(datasets.InputData, datasets.OutputData);
                arError.Add(error);

                double learningError   = 0.0;
                double predictionError = 0.0;

                for (int i = 0, n = solutionSize; i < n; i++)
                {
                    datasets.ForecastedData[i] = (double[])mNetwork.Compute(datasets.InputData[i]).Clone();

                    if (i >= n - mAnnModelParameter.MaximumWindowSize)
                    {
                        predictionError += Math.Abs(datasets.OutputData[i][0] - datasets.ForecastedData[i][0]);
                    }
                    else
                    {
                        learningError += Math.Abs(datasets.OutputData[i][0] - datasets.ForecastedData[i][0]);
                    }
                }
                if (iteration >= mAnnModelParameter.Iterations)
                {
                    NeedToStop = true;
                }
                if (learningError <= mAnnModelParameter.DesiredError)
                {
                    NeedToStop = true;
                }
                if (ModelRunningEpoch != null)
                {
                    ModelRunningEpoch(this, new AnnModelRunEpochEventArgs(iteration, error));
                }
                iteration++;
            }

            LayerWeightCollection = new LayerWeight[mNetwork.LayersCount];

            LayerWeightCollection[0].Weight      = new double[layer.NeuronsCount][];
            LayerWeightCollection[0].ThreashHold = new double[layer.NeuronsCount][];
            for (int i = 0; i < layer.NeuronsCount; i++)
            {
                LayerWeightCollection[0].Weight[i]      = new double[layer.InputsCount];
                LayerWeightCollection[0].ThreashHold[i] = new double[layer.InputsCount];
                for (int j = 0; j < layer.InputsCount; j++)
                {
                    LayerWeightCollection[0].Weight[i][j]      = layer[i][j];
                    LayerWeightCollection[0].ThreashHold[i][j] = layer[i][j];
                }
            }

            layer = mNetwork[1];
            LayerWeightCollection[1].Weight      = new double[layer.NeuronsCount][];
            LayerWeightCollection[1].ThreashHold = new double[layer.NeuronsCount][];
            for (int i = 0; i < layer.NeuronsCount; i++)
            {
                LayerWeightCollection[1].Weight[i]      = new double[layer.InputsCount];
                LayerWeightCollection[1].ThreashHold[i] = new double[layer.InputsCount];
                for (int j = 0; j < layer.InputsCount; j++)
                {
                    LayerWeightCollection[1].Weight[i][j]      = layer[i][j];
                    LayerWeightCollection[1].ThreashHold[i][j] = layer[i][j];
                }
            }

            if (ModelFinishRunning != null)
            {
                ModelFinishRunning(this, new ComponentRunEventArgs(datasets));
            }
        }
Example #15
0
        static void Main(string[] args)
        {
            // Read the Excel worksheet into a DataTable
            DataTable table = new ExcelReader("examples.xls").GetWorksheet("Classification - Yin Yang");

            // Convert the DataTable to input and output vectors
            double[][] inputs  = table.ToJagged <double>("X", "Y");
            int[]      outputs = table.Columns["G"].ToArray <int>();


            // Since we would like to learn binary outputs in the form
            // [-1,+1], we can use a bipolar sigmoid activation function
            IActivationFunction function = new BipolarSigmoidFunction();

            Console.Write("Enter network name: ");
            string networkName = Console.ReadLine();

            Network network = null;

            if (File.Exists(networkName))
            {
                network = Network.Load(networkName);
            }
            else
            {
                network = new ActivationNetwork(function,
                                                inputsCount: 2, neuronsCount: new[] { 10, 5, 1 });
            }


            // Create a Levenberg-Marquardt algorithm
            var teacher = new LevenbergMarquardtLearning((ActivationNetwork)network)
            {
                UseRegularization = true
            };

            // Because the network is expecting multiple outputs,
            // we have to convert our single variable into arrays
            double[][] y = outputs.ToDouble().ToJagged();

            // Iterate until stop criteria is met
            double error = double.PositiveInfinity;
            double previous;

            Dictionary <int, double> epochError = new Dictionary <int, double>();

            int currentEpoch = 1;

            do
            {
                previous = error;

                // Compute one learning iteration
                error = teacher.RunEpoch(inputs, y);
                epochError.Add(currentEpoch++, error);
            } while (Math.Abs(previous - error) > 0.001);

            network.Save(networkName);


            // Classify the samples using the model
            double[] decimalAnswers = inputs.Apply(network.Compute).GetColumn(0); // can be used as probability.
            int[]    answers        = decimalAnswers.Apply(Math.Sign);

            // Plot the results
            ScatterplotBox.Show("Expected results", inputs, outputs);
            ScatterplotBox.Show("Network results", inputs, answers);

            PlotError("Training error", epochError);
        }
Example #16
0
        static void Main()
        {
            Console.WriteLine("Создание нейронной сети...");

            DataItem data = DataItemFactory.GetNumericData();

            int inputCount  = data.Input.First().Length;
            var neuronCount = new[] { 10 };
            IActivationFunction function = new BipolarSigmoidFunction();
            double learningRate          = 0.1;
            double momentum = 0;

            int    maxEpochNumber = 1000000;
            double minErrorChange = 0.000001;
            double minError       = 0.001;

            var network = MultilayerNeuralNetwork.CreateNetwork(neuronCount, inputCount, function);

            network.Randomize();

            Console.WriteLine("Создание нейронной сети завершено.");

            var teacher = new PerceptronLearning(network)
            {
                LearningRate = learningRate
            };

            int    epochNumber = 1;
            double lastError   = double.MaxValue;
            double error;
            double errorChange;

            Console.WriteLine("Start learning...");

            do
            {
                DateTime dtStart = DateTime.Now;
                error = teacher.RunEpoch(data.Input, data.Output) / data.Input.Length;
                Console.WriteLine(
                    $"Epoch #{epochNumber} finished; " +
                    $"current error is {error}; " +
                    $"it takes: {(DateTime.Now - dtStart).Duration()}");

                errorChange = Math.Abs(lastError - error);
                lastError   = error;
                epochNumber++;
            }while (epochNumber < maxEpochNumber &&
                    error > minError &&
                    errorChange > minErrorChange);

            for (int i = 0; i < data.Input.Length; i++)
            {
                double[] outputs = network.Compute(data.Input[i]);
                string   strIn   = "";
                foreach (var input in data.Output[i])
                {
                    strIn += $"{input},";
                }
                string strOut = "";
                foreach (var output in outputs)
                {
                    strOut += $"{Math.Abs(output):0.00},";
                }

                Console.WriteLine($"{i}. Expected {strIn} Actual {strOut}");
            }
        }
Example #17
0
        public bool Train()
        {
            if (type == ClassifierType.SVM)
            {
                if (trainData != null)
                {
                    float[,] mat = trainData.Data;
                    double[][] inputs = new double[trainData.Rows][];
                    for (int i = 0; i < trainData.Rows; i++)
                    {
                        int numFeatures = Math.Max(trainData.Cols, 2);
                        inputs[i] = new double[numFeatures];
                        for (int j = 0; j < numFeatures; j++)
                        {
                            inputs[i][j] = mat[i, Math.Min(trainData.Cols - 1, j)];
                        }
                    }
                    int[] outputs = new int[trainLabels.Rows];
                    for (int i = 0; i < trainLabels.Rows; i++)
                    {
                        outputs[i] = trainLabels[i, 0];
                    }

                    int numClasses = nameForID.Count;

                    if (numClasses <= 1)
                    {
                        return(true);
                    }

                    IKernel kernel;
                    switch (kernelType)
                    {
                    case KernelType.Linear:
                        kernel = new Linear();
                        break;

                    case KernelType.Poly:
                        kernel = new Polynomial(3);
                        break;

                    case KernelType.Rbf:
                        if (inputs.Length < 20)
                        {
                            kernel = new Gaussian(0.1);
                        }
                        else
                        {
                            kernel = Gaussian.Estimate(inputs, inputs.Length / 4);
                        }
                        break;

                    case KernelType.Chi2:
                        kernel = new ChiSquare();
                        break;

                    default:
                        kernel = inputs[0].Length > 20 ? (IKernel)(new ChiSquare()) : (IKernel)(Gaussian.Estimate(inputs, inputs.Length / 4));
                        break;
                    }
                    svm = new MulticlassSupportVectorMachine(inputs: inputs[0].Length, kernel: kernel, classes: numClasses);
                    var teacher = new MulticlassSupportVectorLearning(svm, inputs, outputs)
                    {
                        Algorithm = (machine, classInputs, classOutputs, i, j) => new SequentialMinimalOptimization(machine, classInputs, classOutputs)
                        {
                            Tolerance = 1e-6,
                            UseComplexityHeuristic = true
                                                     //Tolerance = 0.001,
                                                     //Complexity = 1,
                                                     //CacheSize = 200
                        }
                    };
                    try
                    {
                        double error = teacher.Run();
                    }
                    catch (Exception ex) { Debug.WriteLine("Error training SVM: " + ex.Message); }

                    teacher = new MulticlassSupportVectorLearning(svm, inputs, outputs)
                    {
                        Algorithm = (machine, classInputs, classOutputs, i, j) => new ProbabilisticOutputCalibration(machine, classInputs, classOutputs)
                    };
                    try
                    {
                        double error = teacher.Run();
                    }
                    catch (Exception ex) { Debug.WriteLine("Error calibrating SVM: " + ex.Message); }

                    return(true);
                }
                return(false);
            }
            else if (type == ClassifierType.NeuralNet)
            {
                float[,] mat = trainData.Data;
                double[][] inputs      = new double[trainData.Rows][];
                List <int> randomOrder = new List <int>(trainData.Rows);
                for (int i = 0; i < trainData.Rows; i++)
                {
                    randomOrder.Add(i);
                }
                randomOrder.Shuffle();
                for (int i = 0; i < trainData.Rows; i++)
                {
                    inputs[randomOrder[i]] = new double[trainData.Cols];
                    for (int j = 0; j < trainData.Cols; j++)
                    {
                        inputs[randomOrder[i]][j] = mat[i, j];
                    }
                }
                int[] classes = new int[trainLabels.Rows];
                for (int i = 0; i < trainLabels.Rows; i++)
                {
                    classes[randomOrder[i]] = trainLabels[i, 0];
                }

                // First we have to convert this problem into a way that  the neural
                // network can handle. The first step is to expand the classes into
                // indicator vectors, where a 1 into a position signifies that this
                // position indicates the class the sample belongs to.
                //
                double[][] outputs = Accord.Statistics.Tools.Expand(classes, -1, +1);

                // Create an activation function for the net
                var function = new BipolarSigmoidFunction();

                // Create an activation network with the function and
                //  N inputs, (M+N)/2 hidden neurons and M possible outputs:
                int N = inputs[0].Length;
                int M = nameForID.Count;
                network = new ActivationNetwork(function, N, (M + N) / 2, M);

                // Randomly initialize the network
                new NguyenWidrow(network).Randomize();

                // Teach the network using parallel Rprop:
                var teacher = new ParallelResilientBackpropagationLearning(network);

                double error = 1.0;
                int    iter  = 0;
                while (error > 1e-7 && iter < 100)
                {
                    //for (int iter = 0; iter < 10000 && error > 1e-5; iter++)
                    error = teacher.RunEpoch(inputs, outputs);
                    iter++;
                }

                return(true);
            }
            else if (type == ClassifierType.KMeans)
            {
                List <double[]> rows = new List <double[]>();
                float[,] data = trainData.Data;
                for (int i = 0; i < trainData.Rows; i++)
                {
                    double[] row = new double[trainData.Cols];
                    for (int j = 0; j < trainData.Cols; j++)
                    {
                        row[j] = data[i, j];
                    }
                    rows.Add(row);
                }
                double[][] points = rows.ToArray();

                kmeans = new Accord.MachineLearning.KMeans(nameForID.Count * 2);
                int[] labels = kmeans.Compute(points);
                clusterClasses = new Dictionary <int, Dictionary <int, int> >();
                for (int i = 0; i < labels.Length; i++)
                {
                    if (!clusterClasses.ContainsKey(labels[i]))
                    {
                        clusterClasses.Add(labels[i], new Dictionary <int, int>());
                    }
                    int label = trainLabels.Data[i, 0];
                    if (!clusterClasses[labels[i]].ContainsKey(label))
                    {
                        clusterClasses[labels[i]].Add(label, 0);
                    }
                    clusterClasses[labels[i]][label]++;
                }

                return(true);
            }
            else
            {
                return(true);
            }
        }