private void MnistDemo() { // Load data this.training = MnistReader.Load(@"..\..\Mnist\train-labels.idx1-ubyte", @"..\..\Mnist\train-images.idx3-ubyte"); this.testing = MnistReader.Load(@"..\..\Mnist\t10k-labels.idx1-ubyte", @"..\..\Mnist\t10k-images.idx3-ubyte"); if (this.training.Count == 0 || this.testing.Count == 0) { Console.WriteLine("Missing Mnist training/testing files."); Console.ReadKey(); return; } // Create network this.net = new Net(); this.net.AddLayer(new InputLayer(24, 24, 1)); this.net.AddLayer(new ConvLayer(5, 5, 8) { Stride = 1, Pad = 2, Activation = Activation.Relu }); this.net.AddLayer(new PoolLayer(2, 2) { Stride = 2 }); this.net.AddLayer(new ConvLayer(5, 5, 16) { Stride = 1, Pad = 2, Activation = Activation.Relu }); this.net.AddLayer(new PoolLayer(3, 3) { Stride = 3 }); this.net.AddLayer(new SoftmaxLayer(10)); this.trainer = new Trainer(this.net) { BatchSize = 20, L2Decay = 0.001, TrainingMethod = Trainer.Method.Adadelta }; do { var sample = this.SampleTrainingInstance(); this.Step(sample); } while (!Console.KeyAvailable); }
private static void Main(string[] args) { // species a 2-layer neural network with one hidden layer of 20 neurons var net = new Net(); // input layer declares size of input. here: 2-D data // ConvNetJS works on 3-Dimensional volumes (width, height, depth), but if you're not dealing with images // then the first two dimensions (width, height) will always be kept at size 1 net.AddLayer(new InputLayer(1, 1, 2)); // declare 20 neurons, followed by ReLU (rectified linear unit non-linearity) net.AddLayer(new FullyConnLayer(20, Activation.Relu)); // declare the linear classifier on top of the previous hidden layer net.AddLayer(new SoftmaxLayer(10)); // forward a random data point through the network var x = new Volume(new[] {0.3, -0.5}); var prob = net.Forward(x); // prob is a Volume. Volumes have a property Weights that stores the raw data, and WeightGradients that stores gradients Console.WriteLine("probability that x is class 0: " + prob.Weights[0]); // prints e.g. 0.50101 var trainer = new Trainer(net) {LearningRate = 0.01, L2Decay = 0.001}; trainer.Train(x, 0); // train the network, specifying that x is class zero var prob2 = net.Forward(x); Console.WriteLine("probability that x is class 0: " + prob2.Weights[0]); // now prints 0.50374, slightly higher than previous 0.50101: the networks // weights have been adjusted by the Trainer to give a higher probability to // the class we trained the network with (zero) }
private static void Regression1DDemo() { var net = new Net(); net.AddLayer(new InputLayer(1, 1, 1)); net.AddLayer(new FullyConnLayer(20, Activation.Relu)); net.AddLayer(new FullyConnLayer(20, Activation.Sigmoid)); net.AddLayer(new RegressionLayer(1)); var trainer = new Trainer(net) { LearningRate = 0.01, Momentum = 0.0, BatchSize = 1, L2Decay = 0.001 }; // Function we want to learn double[] x = { 0.0, 0.5, 1.0 }; double[] y = { 0.0, 0.1, 0.2 }; var n = x.Length; // Training do { RegressionUpdate(n, x, trainer, y); } while (!Console.KeyAvailable); // Testing var netx = new Volume(1, 1, 1); for (var ix = 0; ix < n; ix++) { netx.Weights = new[] { x[ix] }; var result = net.Forward(netx); } }
private static void Classify2DDemo() { var net = new Net(); net.AddLayer(new InputLayer(1, 1, 2)); net.AddLayer(new FullyConnLayer(6, Activation.Tanh)); net.AddLayer(new FullyConnLayer(2, Activation.Tanh)); net.AddLayer(new SoftmaxLayer(2)); var trainer = new Trainer(net) { LearningRate = 0.01, Momentum = 0.0, BatchSize = 10, L2Decay = 0.001 }; // Data var data = new List<double[]>(); var labels = new List<int>(); data.Add(new[] { -0.4326, 1.1909 }); labels.Add(1); data.Add(new[] { 3.0, 4.0 }); labels.Add(1); data.Add(new[] { 0.1253, -0.0376 }); labels.Add(1); data.Add(new[] { 0.2877, 0.3273 }); labels.Add(1); data.Add(new[] { -1.1465, 0.1746 }); labels.Add(1); data.Add(new[] { 1.8133, 1.0139 }); labels.Add(0); data.Add(new[] { 2.7258, 1.0668 }); labels.Add(0); data.Add(new[] { 1.4117, 0.5593 }); labels.Add(0); data.Add(new[] { 4.1832, 0.3044 }); labels.Add(0); data.Add(new[] { 1.8636, 0.1677 }); labels.Add(0); data.Add(new[] { 0.5, 3.2 }); labels.Add(1); data.Add(new[] { 0.8, 3.2 }); labels.Add(1); data.Add(new[] { 1.0, -2.2 }); labels.Add(1); var n = labels.Count; // Training do { Classify2DUpdate(n, data, trainer, labels); } while (!Console.KeyAvailable); // Testing var netx = new Volume(1, 1, 1); for (var ix = 0; ix < n; ix++) { netx.Weights = data[ix]; var result = net.Forward(netx); var c = net.GetPrediction(); bool accurate = c == labels[ix]; } }
private static void Classify2DUpdate(int n, List<double[]> data, Trainer trainer, List<int> labels) { var netx = new Volume(1, 1, 1); var avloss = 0.0; for (var iters = 0; iters < 50; iters++) { for (var ix = 0; ix < n; ix++) { netx.Weights = data[ix]; trainer.Train(netx, labels[ix]); avloss += trainer.Loss; } } avloss /= n * 50.0; Console.WriteLine("Loss:" + avloss); }
private static void RegressionUpdate(int n, double[] x, Trainer trainer, double[] y) { var netx = new Volume(1, 1, 1); var avloss = 0.0; for (var iters = 0; iters < 50; iters++) { for (var ix = 0; ix < n; ix++) { netx.Weights = new[] { x[ix] }; trainer.Train(netx, y[ix]); avloss += trainer.Loss; } } avloss /= n * 50.0; Console.WriteLine("Loss:" + avloss); }