/*
         *
         * TODO: Testing.
         * TODO: It doesn't work that well for some reason
         *
         */
        public static NeuralNetwork RandomChangeTraining(NeuralNetwork network, float[][] examples, float[][] target, float lossTarget, float maxRandomChangeAmount = 1f)
        {
            bool   done      = false;
            double bestLoss  = Double.PositiveInfinity;
            float  maxChange = maxRandomChangeAmount;
            int    iteration = 0;

            NeuralNetwork bestNetwork = network;

            while (!done)
            {
                NeuralNetwork newNetwork = SimilarNetwork(bestNetwork, maxChange);
                double        loss       = 0d;

                // For each example
                for (int i = 0; i < examples.Length; i++)
                {
                    float[] feature    = examples[i];
                    float[] label      = target[i];
                    float[] prediction = newNetwork.Predict(feature);

                    // For each output value
                    for (int j = 0; j < label.Length; j++)
                    {
                        double predictionValue = prediction[j];
                        double labelValue      = label[j];
                        loss += Math.Pow(labelValue - predictionValue, 2d);
                        //Console.WriteLine(predictionValue);
                        //Console.WriteLine(labelValue);
                    }
                }
                // Average loss/sum of squares over all examples
                loss = loss / examples.Length;

                Console.WriteLine("L " + loss.ToString());

                if (loss < 0.005d)
                {
                    done = true;
                }

                if (loss > bestLoss)
                {
                    Console.WriteLine("BAD NETWORK");
                }

                if (loss < bestLoss)
                {
                    bestNetwork = newNetwork;
                    bestLoss    = loss;
                    Console.WriteLine("===================== (" + iteration.ToString() + ") Best Loss: " + bestLoss.ToString());
                    Console.WriteLine("W" + newNetwork.layers[0].weights[0, 0].ToString());
                    Console.WriteLine("B" + newNetwork.layers[0].weights[1, 0].ToString());
                }

                iteration += 1;
            }

            return(bestNetwork);
        }
        /*
         *
         * TODO: Testing.
         * TODO: Verify knowledge of backpropagation.
         *
         */
        public static NeuralNetwork BackpropagationTraining(NeuralNetwork network, float[][] examples, float[][] target, float learningRateMultiplier = 1f)
        {
            int numExamples = examples.Length;
            int numOutputs  = network.OutputSize;
            int numLayers   = network.NumFunctionalLayers;

            float[][] nodeErrors = new float[numLayers][];             // float[layer][node]
            float[][,] edgeDeltas = new float[numLayers][, ];          // float[layer][inputNode, outputNode (layer)] Note: that edges refer to layer from layer-1 to layer

            // OUTPUT NODE ERROR:
            // Calculate Average Error for each output node
            // NodeError[outputLayer][node] = Average(target[example][node] - prediction[node]) over all examples
            float[] outputError = new float[network.OutputSize];
            for (int i = 0; i < numExamples; i++)             // Example
            {
                float[] prediction = network.Predict(examples[i]);
                for (int j = 0; j < numOutputs; j++)                 // Output Node
                {
                    outputError[j] += target[i][j] - prediction[j];  // Prediction is bigger = negative error
                }
            }
            for (int j = 0; j < numOutputs; j++)             // Output Node
            {
                outputError[j] /= numExamples;
            }
            nodeErrors[numLayers - 1] = outputError;

            // Get delta for each edge
            edgeDeltas[numLayers - 1] = network.layers[numLayers - 1].GetDesiredNudges(outputError);

            // Backpropagation
            for (int i = numLayers - 2; i >= 0; i--)             // From layer (L - 1) to 1. -1 = input nodes
            {
                Layer hiddenLayer = network.layers[i];
                Layer aboveLayer  = network.layers[i + 1];

                // HIDDEN NODE ERROR:
                // Calculate node errors based on above-layer edge deltas
                // NodeError[layer][node1] = Sum(edgeDelta[layer above][node1, node2])
                nodeErrors[i] = aboveLayer.GetInputNodeErrors(edgeDeltas[i + 1]);

                // EDGE DELTA:
                // Get delta for each edge
                // nudge[i, j] = (weight[i, j]/totalWeight[j]) * error[j]
                edgeDeltas[i] = hiddenLayer.GetDesiredNudges(nodeErrors[i]);
            }
            Console.WriteLine("Output error tot: " + string.Join(" ", outputError));

            Layer[] newLayers = new Layer[numLayers];
            for (int i = 0; i < numLayers; i++)
            {
                newLayers[i] = Layer.NudgedLayer(network.layers[i], edgeDeltas[i], learningRateMultiplier);
            }

            return(new NeuralNetwork(newLayers));
        }
Esempio n. 3
0
        static void Main()
        {
            var outputs = new double[] { 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1 };
            var inputs  = new double[, ]
            {
                { 0, 0, 0, 0 },
                { 0, 0, 0, 1 },
                { 0, 0, 1, 0 },
                { 0, 0, 1, 1 },
                { 0, 1, 0, 0 },
                { 0, 1, 0, 1 },
                { 0, 1, 1, 0 },
                { 0, 1, 1, 1 },
                { 1, 0, 0, 0 },
                { 1, 0, 0, 1 },
                { 1, 0, 1, 0 },
                { 1, 0, 1, 1 },
                { 1, 1, 0, 0 },
                { 1, 1, 0, 1 },
                { 1, 1, 1, 0 },
                { 1, 1, 1, 1 }
            };

            var topology      = new Topology(4, 1, 0.1, 2);
            var neuralNetwork = new NeuralNetwork(topology);
            var difference    = neuralNetwork.Learn(outputs, inputs, 10000);

            var results = new List <double>();

            for (int i = 0; i < outputs.Length; i++)
            {
                var row = NeuralNetwork.GetRow(inputs, i);
                var res = neuralNetwork.Predict(row).Output;
                results.Add(res);
            }

            for (int i = 0; i < results.Count; i++)
            {
                var expected = Math.Round(outputs[i], 2);
                var actual   = Math.Round(results[i], 2);
                Console.WriteLine(actual - expected);
            }

            Console.Read();
        }