Exemplo n.º 1
0
        private static void Main(string[] args)
        {
            // species a 2-layer neural network with one hidden layer of 20 neurons
            var net = new Net();

            // input layer declares size of input. here: 2-D data
            // ConvNetJS works on 3-Dimensional volumes (width, height, depth), but if you're not dealing with images
            // then the first two dimensions (width, height) will always be kept at size 1
            net.AddLayer(new InputLayer(1, 1, 2));

            // declare 20 neurons, followed by ReLU (rectified linear unit non-linearity)
            net.AddLayer(new FullyConnLayer(20, Activation.Relu));

            // declare the linear classifier on top of the previous hidden layer
            net.AddLayer(new SoftmaxLayer(10));

            // forward a random data point through the network
            var x = new Volume(new[] {0.3, -0.5});

            var prob = net.Forward(x);

            // prob is a Volume. Volumes have a property Weights that stores the raw data, and WeightGradients that stores gradients
            Console.WriteLine("probability that x is class 0: " + prob.Weights[0]); // prints e.g. 0.50101

            var trainer = new Trainer(net) {LearningRate = 0.01, L2Decay = 0.001};
            trainer.Train(x, 0); // train the network, specifying that x is class zero

            var prob2 = net.Forward(x);
            Console.WriteLine("probability that x is class 0: " + prob2.Weights[0]);
            // now prints 0.50374, slightly higher than previous 0.50101: the networks
            // weights have been adjusted by the Trainer to give a higher probability to
            // the class we trained the network with (zero)
        }
Exemplo n.º 2
0
        private static void Regression1DDemo()
        {
            var net = new Net();
            net.AddLayer(new InputLayer(1, 1, 1));
            net.AddLayer(new FullyConnLayer(20, Activation.Relu));
            net.AddLayer(new FullyConnLayer(20, Activation.Sigmoid));
            net.AddLayer(new RegressionLayer(1));

            var trainer = new Trainer(net) { LearningRate = 0.01, Momentum = 0.0, BatchSize = 1, L2Decay = 0.001 };

            // Function we want to learn
            double[] x = { 0.0, 0.5, 1.0 };
            double[] y = { 0.0, 0.1, 0.2 };
            var n = x.Length;

            // Training
            do
            {
                RegressionUpdate(n, x, trainer, y);
            } while (!Console.KeyAvailable);

            // Testing
            var netx = new Volume(1, 1, 1);
            for (var ix = 0; ix < n; ix++)
            {
                netx.Weights = new[] { x[ix] };
                var result = net.Forward(netx);
            }
        }
Exemplo n.º 3
0
        private static void Classify2DDemo()
        {
            var net = new Net();
            net.AddLayer(new InputLayer(1, 1, 2));
            net.AddLayer(new FullyConnLayer(6, Activation.Tanh));
            net.AddLayer(new FullyConnLayer(2, Activation.Tanh));
            net.AddLayer(new SoftmaxLayer(2));

            var trainer = new Trainer(net) { LearningRate = 0.01, Momentum = 0.0, BatchSize = 10, L2Decay = 0.001 };

            // Data
            var data = new List<double[]>();
            var labels = new List<int>();
            data.Add(new[] { -0.4326, 1.1909 });
            labels.Add(1);
            data.Add(new[] { 3.0, 4.0 });
            labels.Add(1);
            data.Add(new[] { 0.1253, -0.0376 });
            labels.Add(1);
            data.Add(new[] { 0.2877, 0.3273 });
            labels.Add(1);
            data.Add(new[] { -1.1465, 0.1746 });
            labels.Add(1);
            data.Add(new[] { 1.8133, 1.0139 });
            labels.Add(0);
            data.Add(new[] { 2.7258, 1.0668 });
            labels.Add(0);
            data.Add(new[] { 1.4117, 0.5593 });
            labels.Add(0);
            data.Add(new[] { 4.1832, 0.3044 });
            labels.Add(0);
            data.Add(new[] { 1.8636, 0.1677 });
            labels.Add(0);
            data.Add(new[] { 0.5, 3.2 });
            labels.Add(1);
            data.Add(new[] { 0.8, 3.2 });
            labels.Add(1);
            data.Add(new[] { 1.0, -2.2 });
            labels.Add(1);
            var n = labels.Count;

            // Training
            do
            {
                Classify2DUpdate(n, data, trainer, labels);
            } while (!Console.KeyAvailable);

            // Testing
            var netx = new Volume(1, 1, 1);
            for (var ix = 0; ix < n; ix++)
            {
                netx.Weights = data[ix];
                var result = net.Forward(netx);
                var c = net.GetPrediction();
                bool accurate = c == labels[ix];
            }
        }
Exemplo n.º 4
0
        void Start()
        {
            for (var i = 0; i < pixels.GetLength(0); ++i)
            {
                for (var j = 0; j < pixels.GetLength(1); ++j)
                {
                    pixels [i, j] = Instantiate(pixel, new Vector3(i, j, 0), new Quaternion()) as GameObject;
                }
            }

            net     = new Net();
            trainer = new Trainer(net);

            net.AddLayer(new InputLayer(1, 1, 2));

            net.AddLayer(new FullyConnLayer(2, Activation.Sigmoid));

            net.AddLayer(new FullyConnLayer(1, Activation.Sigmoid));

            net.AddLayer(new RegressionLayer(1));
        }
Exemplo n.º 5
0
        // Use this for initialization
        public QLearning()
        {
            exp  = new Experience[experienceSize];
            expi = 0;
            expn = 0;
            t    = 0;
            r0   = -99f;

            // species a 2-layer neural network with one hidden layer of 20 neurons
            net = new Net();

            // input layer declares size of input. here: 2-D data
            // ConvNetSharp works on 3-Dimensional volumes (width, height, depth), but if you're not dealing with images
            // then the first two dimensions (width, height) will always be kept at size 1
            net.AddLayer(new InputLayer(1, 1, numStates));

            // declare 20 neurons, followed by ReLU (rectified linear unit non-linearity)
            net.AddLayer(new FullyConnLayer(hiddenNeurons - 10, Activation.Relu));

            //snet.AddLayer(new FullyConnLayer(hiddenNeurons/4, Activation.Relu));

            // declare the linear classifier on top of the previous hidden layer
            net.AddLayer(new RegressionLayer(numActions));

            Debug.Log("Network initialized");


            // species a 2-layer neural network with one hidden layer of 20 neurons
            netClassify = new Net();

            // input layer declares size of input. here: 2-D data
            // ConvNetSharp works on 3-Dimensional volumes (width, height, depth), but if you're not dealing with images
            // then the first two dimensions (width, height) will always be kept at size 1
            netClassify.AddLayer(new InputLayer(1, 1, 2));

            // declare 20 neurons, followed by ReLU (rectified linear unit non-linearity)
            netClassify.AddLayer(new FullyConnLayer(4, Activation.Relu));

            //snet.AddLayer(new FullyConnLayer(hiddenNeurons/4, Activation.Relu));

            // declare the linear classifier on top of the previous hidden layer
            netClassify.AddLayer(new SoftmaxLayer(2));

            Debug.Log("Network Classify initialized");

            /*
             * List<double> list = new List<double>();
             *
             * list = netToList(net);
             *
             * outputList(list, "agent1");
             *
             *
             * ListToNet(net, list);
             *
             * List<double> list2 = new List<double>();
             *
             * list2 = netToList(net);
             *
             * list2[1] = 0.5f;
             *
             * outputList(list2, "agent2");
             *
             */



            //double[] weights = { 0.3, -0.5, 0.1, 0.9, 0.6 };



            // forward a random data point through the network
            //var x = new Volume(weights);

            //var prob = net.Forward(x);

            // prob is a Volume. Volumes have a property Weights that stores the raw data, and WeightGradients that stores gradients
            //Debug.Log("probability that x is class 0: " + prob.Weights[0]); // prints e.g. 0.50101

            trainer = new SgdTrainer(net)
            {
                LearningRate = 0.01, L2Decay = 0.001, Momentum = 0.0, BatchSize = 5
            };

            //trainer.Train(x, 0); // train the network, specifying that x is class zero

            // Volume prob2 = net.Forward(x);

            //Debug.Log("probability that x is class 0: " + prob2.Weights[0]);
            // now prints 0.50374, slightly higher than previous 0.50101: the networks
            // weights have been adjusted by the Trainer to give a higher probability to
            // the class we trained the network with (zero)

            e = new Entropy();

            q = new Quartiles();

            double[] arr = new double[8] {
                5, 6, 7, 2, 1, 8, 4, 3
            };

            double[] ascOrderedArray = (from i in arr orderby i ascending select i).ToArray();

            Debug.Log(q.umidmean(ascOrderedArray));

            Debug.Log(q.lmidmean(ascOrderedArray));
        }