public void CanMakeWithContent() { Layer layer = new Layer(new NeuralNet.WeightsMatrix(new double[, ] { { 1 } })); NetComponentChain layerlist = new NetComponentChain(layer); Assert.IsNotNull(layerlist); }
public void CanRunWithZeroInput() { Layer layer = Layer.CreateLinearLayer(new NeuralNet.WeightsMatrix(new double[, ] { { 1 } })); NetComponentChain layerlist = new NetComponentChain(layer); NeuralNet.NetworkVector vector = new NeuralNet.NetworkVector(new double[] { 0 }); NetworkVector result = layerlist.Run(vector); Assert.AreEqual(vector, result); }
public void CanAddTrainable() { WeightedCombiner wc = new WeightedCombiner(new NeuralNet.WeightsMatrix(1, 1)); NetComponentChain layerlist = new NetComponentChain(); layerlist.AddTrainable(wc); List <NetComponent> allComponents = new List <NetComponent>(layerlist.ForwardEnumeration); List <NetComponent> trainableComponents = new List <NetComponent>(layerlist.ForwardTrainableComponentsEnumeration); Assert.AreEqual(1, layerlist.NumberOfComponents); Assert.IsTrue(allComponents.Contains(wc)); Assert.IsTrue(trainableComponents.Contains(wc)); }
public void CanAddFixed() { NeuralFunction nf = new NeuralFunction(1); NetComponentChain layerlist = new NetComponentChain(); layerlist.AddFixed(nf); List <NetComponent> allComponents = new List <NetComponent>(layerlist.ForwardEnumeration); List <NetComponent> trainableComponents = new List <NetComponent>(layerlist.ForwardTrainableComponentsEnumeration); Assert.AreEqual(1, layerlist.NumberOfComponents); Assert.IsTrue(allComponents.Contains(nf)); Assert.IsFalse(trainableComponents.Contains(nf)); }
public void CannotRunWithInputOfWrongSize() { Layer layer = Layer.CreateLinearLayer(new NeuralNet.WeightsMatrix(new double[, ] { { 1 } })); NetComponentChain layerlist = new NetComponentChain(layer); NeuralNet.NetworkVector input = new NeuralNet.NetworkVector(new double[] { 0, 0 }); try { layerlist.Run(input); Assert.Fail("Run should throw an ArgumentException for input of the wrong size, but did not."); } catch (ArgumentException) { } }
public void CannotAddLayerOfWrongSize() { Layer layer1 = Layer.CreateLinearLayer(new NeuralNet.WeightsMatrix(new double[, ] { { 1 } })); Layer layer2 = Layer.CreateLogisticLayer(new NeuralNet.WeightsMatrix(new double[, ] { { 1, 2 } })); NetComponentChain layerlist = new NetComponentChain(layer1); try { layerlist.AddTrainable(layer2); Assert.Fail("Add should throw and ArgumentException if when trying to add a layer of the wrong size, but did not."); } catch (ArgumentException) { } }
public void CanRunTwoLayersWithZeroInput() { Layer layer1 = new Layer(new NeuralNet.WeightsMatrix(new double[, ] { { 1, 1 }, { 1, 1 } })); Layer layer2 = Layer.CreateLinearLayer(new NeuralNet.WeightsMatrix(new double[, ] { { 1, 1 } })); NetComponentChain layerlist = new NetComponentChain(layer1); layerlist.AddFixed(layer2); NeuralNet.NetworkVector vector = new NeuralNet.NetworkVector(new double[] { 0, 0 }); NetworkVector result = layerlist.Run(vector); NeuralNet.NetworkVector outputCheck = new NeuralNet.NetworkVector(new double[] { 0 }); Assert.AreEqual(outputCheck, result); }
public void CanRunTwoLayerNetWithOneInput() { Layer inputlayer = new Layer(new NeuralNet.WeightsMatrix(new double[, ] { { 1, 1, 1 }, { 1, 1, 1 } })); Layer outputlayer = Layer.CreateLinearLayer(new NeuralNet.WeightsMatrix(new double[, ] { { 1, 1 } })); NetComponentChain network = new NetComponentChain(); network.AddFixed(inputlayer); network.AddTrainable(outputlayer); NeuralNet.NetworkVector inputvector = new NeuralNet.NetworkVector(new double[] { 1, 0, 0 }); NetworkVector result = network.Run(inputvector); NeuralNet.NetworkVector outputCheck = new NeuralNet.NetworkVector(new double[] { 2 }); Assert.AreEqual(outputCheck, result); }
public void CanBackPropagateTwoLayerNetGradient1() { Layer inputlayer = new Layer(new NeuralNet.WeightsMatrix(new double[, ] { { 1, 1, 1 }, { 1, 1, 1 } })); Layer outputlayer = Layer.CreateLinearLayer(new NeuralNet.WeightsMatrix(new double[, ] { { 1, 1 } })); NetComponentChain network = new NetComponentChain(); network.AddFixed(inputlayer); network.AddTrainable(outputlayer); NeuralNet.NetworkVector inputvector = new NeuralNet.NetworkVector(new double[] { 1, 0, 0 }); NeuralNet.NetworkVector outputgradient = new NeuralNet.NetworkVector(new double[] { 1 }); network.Run(inputvector); NeuralNet.NetworkVector inputGradientCheck = new NeuralNet.NetworkVector(new double[] { 2, 2, 2 }); Assert.AreEqual(inputGradientCheck, network.InputGradient(outputgradient)); }
public void TrainBatch_SmallChain_CorrectOnePass() { int inputs = 3; int inputneurons = 2; int outputneurons = 1; double[,] inputWeights = new double[inputneurons, inputs]; double[,] outputWeights = new double[outputneurons, inputneurons]; for (int i = 0; i < inputneurons; i++) { for (int j = 0; j < inputs; j++) { inputWeights[i, j] = 1; } } for (int i = 0; i < outputneurons; i++) { for (int j = 0; j < inputneurons; j++) { outputWeights[i, j] = 1; } } Layer InputLayer = Layer.CreateLinearLayer(new WeightsMatrix(inputWeights), new NetworkVector(inputneurons)); Layer OutputLayer = Layer.CreateLinearLayer(new WeightsMatrix(outputWeights), new NetworkVector(outputneurons)); NetComponentChain network = new NetComponentChain(); network.AddTrainable(InputLayer); network.AddTrainable(OutputLayer); TrainingCollection trainingVectors = new TrainingCollection { new VectorPair( new NetworkVector(new double[] { 0, 0, 0 }), new NetworkVector(new double[] { 1 }) ), new VectorPair( new NetworkVector(new double[] { 1, 0, 0 }), new NetworkVector(new double[] { 0 }) ), new VectorPair( new NetworkVector(new double[] { 0, 1, 0 }), new NetworkVector(new double[] { 0 }) ), new VectorPair( new NetworkVector(new double[] { 1, 1, 0 }), new NetworkVector(new double[] { 1 }) ) }; Trainer trainer = new Trainer(network, new SquaredError(), new GradientDescent()); trainer.Train(trainingVectors); WeightsMatrix inputWeightsCheck = new WeightsMatrix(new double[, ] { { -4, -4, 1 }, { -4, -4, 1 } }); NetworkVector inputBiasesCheck = new NetworkVector(new double[] { -6, -6 }); WeightsMatrix outputWeightsCheck = new WeightsMatrix(new double[, ] { { -9, -9 } }); NetworkVector outputBiasesCheck = new NetworkVector(new double[] { -6 }); Assert.AreEqual(inputWeightsCheck, InputLayer.Weights); Assert.AreEqual(inputBiasesCheck, InputLayer.Biases); Assert.AreEqual(outputWeightsCheck, OutputLayer.Weights); Assert.AreEqual(outputBiasesCheck, OutputLayer.Biases); }
public void CanMake() { NetComponentChain layerlist = new NetComponentChain(); Assert.IsNotNull(layerlist); }