コード例 #1
0
ファイル: Program.cs プロジェクト: freyyy/ThirdYearProject
        static void Main(string[] args)
        {
            double[][] inputs = { new double[] { 0, 0 }, new double[] { 0, 1 }, new double[] { 1, 0 }, new double[] { 1, 1 } };
            double[] targets = { 0, 1, 1, 0 };
            double error = 1;
            int epoch = 0;

            ActivationFunction f = new SigmoidFunction();
            Layer layer1 = new Layer(2, 2, f);
            Layer layer2 = new Layer(1, 2, f);
            Network network = new Network(new Layer[] { layer1, layer2 });
            LearningStrategy learning = new BackpropagationLearning(network, 0.25);

            while (error > 0.01)
            {
                error = learning.RunEpoch(inputs, targets);
                epoch++;
                Console.WriteLine("Iteration {0} error: {1}", epoch, error);
            }
            Console.WriteLine("Training complete after {0} epochs using the Backpropagation training regime.", epoch);
            Console.WriteLine("Testing");
            network.Update(new double[] { 0, 0 });
            Console.WriteLine("{0}", network.Output[0]);
            network.Update(new double[] { 0, 1 });
            Console.WriteLine("{0}", network.Output[0]);
            network.Update(new double[] { 1, 0 });
            Console.WriteLine("{0}", network.Output[0]);
            network.Update(new double[] { 1, 1 });
            Console.WriteLine("{0}", network.Output[0]);
        }
コード例 #2
0
        public void SparseAutoencoderComputeDeltas_ReturnsDeltas()
        {
            ActivationFunction sigmoidFunction = new SigmoidFunction();
            Layer[] layers = new Layer[] { new Layer(2, 2, sigmoidFunction), new Layer(1, 2, sigmoidFunction) };
            Network network = new Network(layers);
            SparseAutoencoderLearning sparseAutoencoder = new SparseAutoencoderLearning(network);
            double[][] input = new double[][] { new double[] { 0.5, 0.6 } };
            double[][] target = new double[][] { new double[] { 1 } };
            double[][] expected = new double[][]
            {
                new double[] { -0.0108698887658827, -0.0105735765912387 },
                new double[] { -0.0875168367272141 }
            };

            layers[0][0][0] = 0.1;
            layers[0][0][1] = 0.2;
            layers[0][1][0] = 0.3;
            layers[0][1][1] = 0.4;
            layers[1][0][0] = 0.5;
            layers[1][0][1] = 0.5;
            layers[0][0].Bias = -0.01;
            layers[0][1].Bias = -0.02;
            layers[1][0].Bias = -0.05;

            sparseAutoencoder.UpdateCachedActivations(input);
            double[][] actual = sparseAutoencoder.ComputeDeltas(0, target[0]);
            Assert.AreEqual(expected[0][0], actual[0][0], 0.0001, "Invalid deltas");
            Assert.AreEqual(expected[0][1], actual[0][1], 0.0001, "Invalid deltas");
            Assert.AreEqual(expected[1][0], actual[1][0], 0.0001, "Invalid deltas");
        }
コード例 #3
0
ファイル: Program.cs プロジェクト: freyyy/ThirdYearProject
        static void Main(string[] args)
        {
            double[][] inputs = { new double[] { 0, 0 }, new double[] { 0, 1 }, new double[] { 1, 0 }, new double[] { 1, 1 } };
            double[] targets = { 0, 1, 1, 1 };
            double error = 1;
            int epoch = 0;

            ActivationFunction function = new SigmoidFunction();
            Neuron neuron = new Neuron(2, function);
            LearningStrategy learning = new DeltaRuleLearning(neuron, 0.75);

            while (error > 0.01)
            {
                error = learning.RunEpoch(inputs, targets);
                epoch++;
                Console.WriteLine("Iteration {0} error: {1}", epoch, error);
            }
            Console.WriteLine("Training complete after {0} epochs using the Delta Rule learning regime.", epoch);
            Console.WriteLine("Testing");
            neuron.Update(new double[] { 0, 0 });
            Console.WriteLine("{0}", neuron.Output);
            neuron.Update(new double[] { 0, 1 });
            Console.WriteLine("{0}", neuron.Output);
            neuron.Update(new double[] { 1, 0 });
            Console.WriteLine("{0}", neuron.Output);
            neuron.Update(new double[] { 1, 1 });
            Console.WriteLine("{0}", neuron.Output);
        }
コード例 #4
0
        public void SigmoidOutput_ReturnsValueOfSigmoid()
        {
            ActivationFunction sigmoidFunction = new SigmoidFunction();
            double input1 = -3;
            double input2 = 0;
            double input3 = 2;
            double expected1 = 0.0474258731776;
            double expected2 = 0.5;
            double expected3 = 0.880797077978;
            double actual1 = sigmoidFunction.Output(input1);
            double actual2 = sigmoidFunction.Output(input2);
            double actual3 = sigmoidFunction.Output(input3);

            Assert.AreEqual(expected1, actual1, 0.0001, "Invalid sigmoid output");
            Assert.AreEqual(expected2, actual2, 0.0001, "Invalid sigmoid output");
            Assert.AreEqual(expected3, actual3, 0.0001, "Invalid sigmoid output");
        }
コード例 #5
0
        public void SigmoidDerivativeOutput_ReturnsSigmoidDerivativeGivenSigmoidOutput()
        {
            ActivationFunction sigmoidFunction = new SigmoidFunction();
            double input1 = 0.0474258731776;
            double input2 = 0.5;
            double input3 = 0.880797077978;
            double expected1 = 0.04517665973;
            double expected2 = 0.25;
            double expected3 = 0.1049935854;
            double actual1 = sigmoidFunction.OutputDerivative(input1);
            double actual2 = sigmoidFunction.OutputDerivative(input2);
            double actual3 = sigmoidFunction.OutputDerivative(input3);

            Assert.AreEqual(expected1, actual1, 0.0001, "Invalid sigmoid derivative output");
            Assert.AreEqual(expected2, actual2, 0.0001, "Invalid sigmoid derivative output");
            Assert.AreEqual(expected3, actual3, 0.0001, "Invalid sigmoid derivative output");
        }
コード例 #6
0
ファイル: NeuronTest.cs プロジェクト: freyyy/ThirdYearProject
        public void NeuronActivation_ReturnsNeuronActivation()
        {
            ActivationFunction sigmoidFunction = new SigmoidFunction();
            Neuron sigmoidNeuron = new Neuron(3, sigmoidFunction);

            double[] input = new double[] { 0.3, 0.2, 0.1 };
            double expected = 0.05;

            sigmoidNeuron[0] = 0.1;
            sigmoidNeuron[1] = 0.2;
            sigmoidNeuron[2] = 0.3;
            sigmoidNeuron.Bias = -0.05;

            double actual = sigmoidNeuron.Activation(input);

            Assert.AreEqual(expected, actual, 0.0001, "Invalid neuron activation");
        }
コード例 #7
0
        public void SigmoidDerivative_ReturnsSigmoidDerivative()
        {
            ActivationFunction sigmoidFunction = new SigmoidFunction();
            double input1 = -3;
            double input2 = 0;
            double input3 = 2;
            double expected1 = 0.04517665973;
            double expected2 = 0.25;
            double expected3 = 0.1049935854;
            double actual1 = sigmoidFunction.Derivative(input1);
            double actual2 = sigmoidFunction.Derivative(input2);
            double actual3 = sigmoidFunction.Derivative(input3);

            Assert.AreEqual(expected1, actual1, 0.0001, "Invalid sigmoid derivative output");
            Assert.AreEqual(expected2, actual2, 0.0001, "Invalid sigmoid derivative output");
            Assert.AreEqual(expected3, actual3, 0.0001, "Invalid sigmoid derivative output");
        }
コード例 #8
0
ファイル: NeuronTest.cs プロジェクト: freyyy/ThirdYearProject
        public void NeuronUpdate_UpdatesOutputAndReturnsValue()
        {
            ActivationFunction sigmoidFunction = new SigmoidFunction();
            Neuron sigmoidNeuron = new Neuron(3, sigmoidFunction);

            double[] input = new double[] { 0.3, 0.2, 0.1 };
            double expected = 0.512497396484;

            sigmoidNeuron[0] = 0.1;
            sigmoidNeuron[1] = 0.2;
            sigmoidNeuron[2] = 0.3;
            sigmoidNeuron.Bias = -0.05;

            double actual = sigmoidNeuron.Update(input);
            double neuronOutput = sigmoidNeuron.Output;

            Assert.AreEqual(expected, actual, 0.0001, "Invalid neuron output");
            Assert.AreEqual(expected, neuronOutput, 0.0001, "Invalid neuron output");
        }
コード例 #9
0
        public void SparseAutoencoderConstructor_InitialisesCache()
        {
            ActivationFunction sigmoidFunction = new SigmoidFunction();
            Layer[] layers = new Layer[] { new Layer(4, 5, sigmoidFunction), new Layer(3, 4, sigmoidFunction) };
            Network network = new Network(layers);
            SparseAutoencoderLearning sparseAutoencoder = new SparseAutoencoderLearning(network);

            int batchSize = sparseAutoencoder.BatchSize;
            double[][][] cachedActivations = sparseAutoencoder.CachedActivations;

            Assert.AreEqual(1, batchSize, 0, "Inalid batch size");
            Assert.AreEqual(batchSize, cachedActivations.Length, 0, "Invalid activations cache size");

            for (int i = 0; i < batchSize; i++)
            {
                Assert.AreEqual(network.LayerCount, cachedActivations[i].Length);

                for (int j = 0; j < network.LayerCount; j++)
                {
                    Assert.AreEqual(network[j].NeuronCount, cachedActivations[i][j].Length);
                }
            }

            sparseAutoencoder = new SparseAutoencoderLearning(network, 32);

            batchSize = sparseAutoencoder.BatchSize;
            cachedActivations = sparseAutoencoder.CachedActivations;

            Assert.AreEqual(32, batchSize, 0, "Inalid batch size");
            Assert.AreEqual(batchSize, cachedActivations.Length, 0, "Invalid activations cache size");

            for (int i = 0; i < batchSize; i++)
            {
                Assert.AreEqual(network.LayerCount, cachedActivations[i].Length);

                for (int j = 0; j < network.LayerCount; j++)
                {
                    Assert.AreEqual(network[j].NeuronCount, cachedActivations[i][j].Length);
                }
            }
        }
コード例 #10
0
ファイル: LayerTest.cs プロジェクト: freyyy/ThirdYearProject
        public void LayerUpdate_UpdatesAllNeurons()
        {
            ActivationFunction sigmoidFunction = new SigmoidFunction();
            Layer layer = new Layer(2, 2, sigmoidFunction);
            double[] input = new double[] { 0.5, 0.6 };
            double[] expected = new double[] { 0.539914884556, 0.591458978433 };

            layer[0][0] = 0.1;
            layer[0][1] = 0.2;
            layer[1][0] = 0.3;
            layer[1][1] = 0.4;
            layer[0].Bias = -0.01;
            layer[1].Bias = -0.02;

            double[] actual = layer.Update(input);
            double[] layerOutput = layer.Output;
            Assert.AreEqual(expected[0], actual[0], 0.0001, "Invalid layer output");
            Assert.AreEqual(expected[0], layerOutput[0], 0.0001, "Invalid layer output");
            Assert.AreEqual(expected[1], actual[1], 0.0001, "Invalid layer output");
            Assert.AreEqual(expected[1], layerOutput[1], 0.0001, "Invalid layer output");
        }
コード例 #11
0
        public void NetworkUpdate_UpdatesAllLayers()
        {
            ActivationFunction sigmoidFunction = new SigmoidFunction();
            Layer[] layers = new Layer[] { new Layer(2, 2, sigmoidFunction), new Layer(1, 2, sigmoidFunction) };
            Network network = new Network(layers);
            double[] input = new double[] { 0.5, 0.6 };
            double[] expected = new double[] { 0.626138674824 };

            layers[0][0][0] = 0.1;
            layers[0][0][1] = 0.2;
            layers[0][1][0] = 0.3;
            layers[0][1][1] = 0.4;
            layers[1][0][0] = 0.5;
            layers[1][0][1] = 0.5;
            layers[0][0].Bias = -0.01;
            layers[0][1].Bias = -0.02;
            layers[1][0].Bias = -0.05;

            double[] actual = network.Update(input);
            double[] networkOutput = network.Output;
            Assert.AreEqual(expected[0], actual[0], 0.0001, "Invalid network output");
            Assert.AreEqual(expected[0], networkOutput[0], 0.0001, "Invalid network output");
        }
コード例 #12
0
        public void SparseAutoencoder_BatchGradientChecking()
        {
            ActivationFunction sigmoidFunction = new SigmoidFunction();
            Layer[] layers = new Layer[] { new Layer(2, 2, sigmoidFunction), new Layer(1, 2, sigmoidFunction) };
            Network network = new Network(layers);
            SparseAutoencoderLearning sparseAutoencoder = new SparseAutoencoderLearning(network, 3);
            double[][] input = new double[][] { new double[] { 0.5, 0.6 }, new double[] { 0.1, 0.2 }, new double[] { 0.3, 0.3 } };
            double[][] target = new double[][] { new double[] { 1 }, new double[] { 0 }, new double[] { 0.5 } };
            double[][][] gradientWeights = new double[][][]
            {
                new double[][]
                {
                    new double[] { 0, 0 },
                    new double[] { 0, 0 }
                },
                new double[][]
                {
                    new double[] { 0, 0 }
                }
            };
            double[][] gradientBias = new double[][]
            {
                new double[] { 0, 0 },
                new double[] { 0 }
            };
            double tmp;

            layers[0][0][0] = 0.1;
            layers[0][0][1] = 0.2;
            layers[0][1][0] = 0.3;
            layers[0][1][1] = 0.4;
            layers[1][0][0] = 0.5;
            layers[1][0][1] = 0.5;
            layers[0][0].Bias = 0.01;
            layers[0][1].Bias = 0.02;
            layers[1][0].Bias = 0.05;

            for (int i = 0; i < network.LayerCount; i++)
            {
                for (int j = 0; j < network[i].NeuronCount; j++)
                {
                    for (int k = 0; k < network[i][j].InputCount; k++)
                    {
                        tmp = network[i][j][k];
                        network[i][j][k] = tmp + 0.0001;

                        for (int l = 0; l < input.Length; l++)
                        {
                            gradientWeights[i][j][k] += ((double)1 / input.Length) * 0.5 * Math.Pow(network.Update(input[l])[0] - target[l][0], 2);
                        }

                        network[i][j][k] = tmp - 0.0001;

                        for (int l = 0; l < input.Length; l++)
                        {
                            gradientWeights[i][j][k] -= ((double)1 / input.Length) * 0.5 * Math.Pow(network.Update(input[l])[0] - target[l][0], 2);
                        }

                        network[i][j][k] = tmp;
                        gradientWeights[i][j][k] = gradientWeights[i][j][k] / 0.0002;
                    }

                    tmp = network[i][j].Bias;
                    network[i][j].Bias = tmp + 0.0001;

                    for(int l = 0; l < input.Length; l++)
                    {
                        gradientBias[i][j] += ((double)1 / input.Length) * 0.5 * Math.Pow(network.Update(input[l])[0] - target[l][0], 2);
                    }

                    network[i][j].Bias = tmp - 0.0001;

                    for (int l = 0; l < input.Length; l++)
                    {
                        gradientBias[i][j] -= ((double)1 / input.Length) * 0.5 * Math.Pow(network.Update(input[l])[0] - target[l][0], 2);
                    }

                    network[i][j].Bias = tmp;
                    gradientBias[i][j] = gradientBias[i][j] / 0.0002;
                }
            }

            Tuple<double[][][], double[][]> result = sparseAutoencoder.ComputeBatchPartialDerivatives(input, target);
            double[][][] partialDerivativesWeights = result.Item1;
            double[][] partialDerivativesBias = result.Item2;

            Assert.AreEqual(gradientWeights[0][0][0], ((double)1 / input.Length) * partialDerivativesWeights[0][0][0], 0.0001, "Gradient checking failed");
            Assert.AreEqual(gradientWeights[0][0][1], ((double)1 / input.Length) * partialDerivativesWeights[0][0][1], 0.0001, "Gradient checking failed");
            Assert.AreEqual(gradientWeights[0][1][0], ((double)1 / input.Length) * partialDerivativesWeights[0][1][0], 0.0001, "Gradient checking failed");
            Assert.AreEqual(gradientWeights[0][1][1], ((double)1 / input.Length) * partialDerivativesWeights[0][1][1], 0.0001, "Gradient checking failed");
            Assert.AreEqual(gradientWeights[1][0][0], ((double)1 / input.Length) * partialDerivativesWeights[1][0][0], 0.0001, "Gradient checking failed");
            Assert.AreEqual(gradientWeights[1][0][1], ((double)1 / input.Length) * partialDerivativesWeights[1][0][1], 0.0001, "Gradient checking failed");

            Assert.AreEqual(gradientBias[0][0], ((double)1 / input.Length) * partialDerivativesBias[0][0], 0.0001, "Gradient checking failed");
            Assert.AreEqual(gradientBias[0][1], ((double)1 / input.Length) * partialDerivativesBias[0][1], 0.0001, "Gradient checking failed");
            Assert.AreEqual(gradientBias[1][0], ((double)1 / input.Length) * partialDerivativesBias[1][0], 0.0001, "Gradient checking failed");
        }
コード例 #13
0
        public void SparseAutoencoderOutputLayerDeltas_ReturnsDeltas()
        {
            ActivationFunction sigmoidFunction = new SigmoidFunction();
            Layer[] layers = new Layer[] { new Layer(2, 2, sigmoidFunction), new Layer(1, 2, sigmoidFunction) };
            Network network = new Network(layers);
            SparseAutoencoderLearning sparseAutoencoder = new SparseAutoencoderLearning(network);
            double[] input = new double[] { 0.5, 0.6 };
            double[] target = new double[] { 1 };
            double[] expected = new double[] { -0.0875168367272141 };

            layers[0][0][0] = 0.1;
            layers[0][0][1] = 0.2;
            layers[0][1][0] = 0.3;
            layers[0][1][1] = 0.4;
            layers[1][0][0] = 0.5;
            layers[1][0][1] = 0.5;
            layers[0][0].Bias = -0.01;
            layers[0][1].Bias = -0.02;
            layers[1][0].Bias = -0.05;

            network.Update(input);
            double[] actual = sparseAutoencoder.OutputLayerDeltas(target);
            Assert.AreEqual(expected[0], actual[0], 0.0001, "Invalid output layer delta");
        }
コード例 #14
0
        public void SparseAutoencoder_GradientChecking()
        {
            ActivationFunction sigmoidFunction = new SigmoidFunction();
            Layer[] layers = new Layer[] { new Layer(2, 2, sigmoidFunction), new Layer(1, 2, sigmoidFunction) };
            Network network = new Network(layers);
            SparseAutoencoderLearning sparseAutoencoder = new SparseAutoencoderLearning(network);
            double[][] input = new double[][] { new double[] { 0.5, 0.6 } };
            double[][] target = new double[][] { new double[] { 1 } };
            double[][][] gradient = new double[][][]
            {
                new double[][]
                {
                    new double[] { 0, 0 },
                    new double[] { 0, 0 }
                },
                new double[][]
                {
                    new double[] { 0, 0 }
                }
            };

            layers[0][0][0] = 0.1;
            layers[0][0][1] = 0.2;
            layers[0][1][0] = 0.3;
            layers[0][1][1] = 0.4;
            layers[1][0][0] = 0.5;
            layers[1][0][1] = 0.5;
            layers[0][0].Bias = 0.01;
            layers[0][1].Bias = 0.02;
            layers[1][0].Bias = 0.05;

            for (int i = 0; i < network.LayerCount; i++)
            {
                for (int j = 0; j < network[i].NeuronCount; j++)
                {
                    for (int k = 0; k < network[i][j].InputCount; k++)
                    {
                        double tmp = network[i][j][k];
                        network[i][j][k] = tmp + 0.0001;
                        gradient[i][j][k] += Math.Pow(network.Update(input[0])[0] - target[0][0], 2);
                        network[i][j][k] = tmp - 0.0001;
                        gradient[i][j][k] -= Math.Pow(network.Update(input[0])[0] - target[0][0], 2);
                        network[i][j][k] = tmp;
                        gradient[i][j][k]  = 0.5 * gradient[i][j][k] / 0.0002;
                    }
                }
            }

            sparseAutoencoder.UpdateCachedActivations(input);
            double[][] deltas = sparseAutoencoder.ComputeDeltas(0, target[0]);
            double[][][] partialDerivatives = sparseAutoencoder.ComputePartialDerivatives(0, deltas, input[0]);

            Assert.AreEqual(gradient[0][0][0], partialDerivatives[0][0][0], 0.0001, "Gradient checking failed");
            Assert.AreEqual(gradient[0][0][1], partialDerivatives[0][0][1], 0.0001, "Gradient checking failed");
            Assert.AreEqual(gradient[0][1][0], partialDerivatives[0][1][0], 0.0001, "Gradient checking failed");
            Assert.AreEqual(gradient[0][1][1], partialDerivatives[0][1][1], 0.0001, "Gradient checking failed");
            Assert.AreEqual(gradient[1][0][0], partialDerivatives[1][0][0], 0.0001, "Gradient checking failed");
            Assert.AreEqual(gradient[1][0][1], partialDerivatives[1][0][1], 0.0001, "Gradient checking failed");
        }
コード例 #15
0
        public void SparseAutoencoderUpdateCachedActivations_UpdatesCache()
        {
            ActivationFunction sigmoidFunction = new SigmoidFunction();
            Layer[] layers = new Layer[] { new Layer(2, 2, sigmoidFunction), new Layer(1, 2, sigmoidFunction) };
            Network network = new Network(layers);
            SparseAutoencoderLearning sparseAutoencoder = new SparseAutoencoderLearning(network);
            double[][] input = new double[][] { new double[] { 0.5, 0.6 } };
            double[][][] expected = new double[][][]
            {
                new double[][]
                {
                    new double[] { 0.539914884556, 0.591458978433 },
                    new double[] { 0.626138674824 }
                },
                new double[][]
                {
                    new double[] { 0.547357618143, 0.608259030747 },
                    new double[] { 0.628971793540 }
                }
            };

            layers[0][0][0] = 0.1;
            layers[0][0][1] = 0.2;
            layers[0][1][0] = 0.3;
            layers[0][1][1] = 0.4;
            layers[1][0][0] = 0.5;
            layers[1][0][1] = 0.5;
            layers[0][0].Bias = -0.01;
            layers[0][1].Bias = -0.02;
            layers[1][0].Bias = -0.05;

            double[][][] cachedActivations = sparseAutoencoder.UpdateCachedActivations(input);

            Assert.AreEqual(expected[0][0][0], cachedActivations[0][0][0], 0.0001, "Invalid cached activation value");
            Assert.AreEqual(expected[0][0][1], cachedActivations[0][0][1], 0.0001, "Invalid cached activation value");
            Assert.AreEqual(expected[0][1][0], cachedActivations[0][1][0], 0.0001, "Invalid cached activation value");

            input = new double[][] { new double[] { 0.5, 0.6 }, new double[] { 0.6, 0.7 } };
            sparseAutoencoder = new SparseAutoencoderLearning(network, 2);

            cachedActivations = sparseAutoencoder.UpdateCachedActivations(input);

            Assert.AreEqual(expected[0][0][0], cachedActivations[0][0][0], 0.0001, "Invalid cached activation value");
            Assert.AreEqual(expected[0][0][1], cachedActivations[0][0][1], 0.0001, "Invalid cached activation value");
            Assert.AreEqual(expected[0][1][0], cachedActivations[0][1][0], 0.0001, "Invalid cached activation value");
            Assert.AreEqual(expected[1][0][0], cachedActivations[1][0][0], 0.0001, "Invalid cached activation value");
            Assert.AreEqual(expected[1][0][1], cachedActivations[1][0][1], 0.0001, "Invalid cached activation value");
            Assert.AreEqual(expected[1][1][0], cachedActivations[1][1][0], 0.0001, "Invalid cached activation value");
        }
コード例 #16
0
        public void SparseAutoencoderUpdateCachedActivations_ThrowsArgument()
        {
            ActivationFunction sigmoidFunction = new SigmoidFunction();
            Layer[] layers = new Layer[] { new Layer(2, 2, sigmoidFunction), new Layer(1, 2, sigmoidFunction) };
            Network network = new Network(layers);
            SparseAutoencoderLearning sparseAutoencoder = new SparseAutoencoderLearning(network, 2);
            double[][] input = new double[][] { new double[] { 0.5, 0.6 } };

            layers[0][0][0] = 0.1;
            layers[0][0][1] = 0.2;
            layers[0][1][0] = 0.3;
            layers[0][1][1] = 0.4;
            layers[1][0][0] = 0.5;
            layers[1][0][1] = 0.5;
            layers[0][0].Bias = 0.01;
            layers[0][1].Bias = 0.02;
            layers[1][0].Bias = 0.05;

            double[][][] cachedActivations = sparseAutoencoder.UpdateCachedActivations(input);
        }
コード例 #17
0
        public void SparseAutoencoderPartialDerivatives_ReturnsDerivatives()
        {
            ActivationFunction sigmoidFunction = new SigmoidFunction();
            Layer[] layers = new Layer[] { new Layer(2, 2, sigmoidFunction), new Layer(1, 2, sigmoidFunction) };
            Network network = new Network(layers);
            SparseAutoencoderLearning sparseAutoencoder = new SparseAutoencoderLearning(network);
            double[][] input = new double[][] { new double[] { 0.5, 0.6 } };
            double[][] target = new double[][] { new double[] { 1 } };
            double[][][] expected = new double[][][]
            {
                new double[][]
                {
                    new double[] { -0.0054349443829414, -0.0065219332595296 },
                    new double[] { -0.0052867882956194, -0.0063441459547432 }
                },
                new double[][]
                {
                    new double[] { -0.0472516427982801, -0.0517626188463657 }
                }
            };

            layers[0][0][0] = 0.1;
            layers[0][0][1] = 0.2;
            layers[0][1][0] = 0.3;
            layers[0][1][1] = 0.4;
            layers[1][0][0] = 0.5;
            layers[1][0][1] = 0.5;
            layers[0][0].Bias = -0.01;
            layers[0][1].Bias = -0.02;
            layers[1][0].Bias = -0.05;

            sparseAutoencoder.UpdateCachedActivations(input);
            double[][] deltas = sparseAutoencoder.ComputeDeltas(0, target[0]);
            double[][][] partialDerivatives = sparseAutoencoder.ComputePartialDerivatives(0, deltas, input[0]);

            Assert.AreEqual(expected[0][0][0], partialDerivatives[0][0][0], 0.0001, "Invalid partial derivative");
            Assert.AreEqual(expected[0][0][1], partialDerivatives[0][0][1], 0.0001, "Invalid partial derivative");
            Assert.AreEqual(expected[0][1][0], partialDerivatives[0][1][0], 0.0001, "Invalid partial derivative");
            Assert.AreEqual(expected[0][1][1], partialDerivatives[0][1][1], 0.0001, "Invalid partial derivative");
            Assert.AreEqual(expected[1][0][0], partialDerivatives[1][0][0], 0.0001, "Invalid partial derivative");
            Assert.AreEqual(expected[1][0][1], partialDerivatives[1][0][1], 0.0001, "Invalid partial derivative");
        }