Ejemplo n.º 1
0
        // test sizes to make test algorithms stay in index bounds
        public static void TestSizes()
        {
            var nn = new SimpleNeuralNet();

            nn.Create(2, 3, 5, 1, 4, 3);
            var output = nn.FeedForward(new Vector(1, 2));

            nn.Backpropagate(0.1f, output);
        }
Ejemplo n.º 2
0
        // test example from https://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/
        public static void Test1()
        {
            var nn = new SimpleNeuralNet();

            nn.Create(2, 2, 2);
            nn.W[0][0, 0] = 0.15f;
            nn.W[0][0, 1] = 0.20f;
            nn.W[0][1, 0] = 0.25f;
            nn.W[0][1, 1] = 0.30f;
            nn.b[0][0]    = 0.35f;
            nn.b[0][1]    = 0.35f;

            nn.W[1][0, 0] = 0.40f;
            nn.W[1][0, 1] = 0.45f;
            nn.W[1][1, 0] = 0.50f;
            nn.W[1][1, 1] = 0.55f;
            nn.b[1][0]    = 0.60f;
            nn.b[1][1]    = 0.60f;

            nn.f[1]  = Vectorize(Logistic);
            nn.df[1] = Vectorize(dLogistic);
            nn.f[2]  = Vectorize(Logistic);
            nn.df[2] = Vectorize(dLogistic);

            var input = new Vector(0.05f, 0.10f);
            // should output ~ 0.75136507, 0.772928465
            var output = nn.FeedForward(input);

            // desired output
            var t1 = new Vector(0.01f, 0.99f);

            // error vec
            var evec = output - t1;

            // should be 0.29837110+
            var error = 0.5 * evec.LengthSquared();

            var learning = 0.50f;

            nn.Backpropagate(learning, evec);

            output = nn.FeedForward(input);

            // error vec
            evec = output - t1;

            // should be 0.291027924 - TODO - example did not update bias vecs!
            error = 0.5 * evec.LengthSquared();
        }
        public void Train(
            // the net to train
            SimpleNeuralNet neuralNet,
            // the data set (training and validation)
            DataSet dataSet,
            // source of randomness
            Random rand,
            // where to put notifications
            Action <Result> resultAction = null,
            // number of epochs to do (complete passes through data)
            int epochs = 100,
            // number to do per mini batch
            int miniBatchSize = 100,
            // learning rate
            float learningRate = 0.1f
            )
        {
            lockedInterlocked       = 0;
            this.neuralNet          = neuralNet;
            this.dataSet            = dataSet;
            this.rand               = rand;
            this.state.maxEpochs    = epochs;
            this.state.trainingSize = dataSet.TrainingSet.Count;
            this.state.testSize     = dataSet.TestSet.Count;
            this.miniBatchSize      = miniBatchSize;
            this.learningRate       = learningRate;
            this.resultAction       = resultAction;
            this.state.message      = "";

            state.curEpoch = state.trainingProcessed = state.testProcessed = 0;

            shuffled = new List <int>();
            for (var i = 0; i < state.trainingSize; ++i)
            {
                shuffled.Add(i);
            }

            int startMs = Environment.TickCount, endMs = 0;

            state.numBatches = (state.trainingSize + miniBatchSize - 1) / miniBatchSize;

            for (var epoch = 0; epoch < state.maxEpochs; epoch++)
            {
                Shuffle(rand, shuffled);

                state.curEpoch          = epoch + 1;
                state.trainingProcessed = 0;
                state.testProcessed     = 0;

                // do training via SGD for one epoch
                var totalTrainingPassed = 0;
                for (var minibatch = 0; minibatch < state.numBatches; ++minibatch)
                {
                    if (lockedInterlocked != 0)
                    {
                        // todo
                        return;
                    }

                    state.batchIndex = minibatch + 1;
                    var batchStart = minibatch * miniBatchSize;
                    var batchSize  = Math.Min(miniBatchSize, state.trainingSize - batchStart);
                    totalTrainingPassed += ProcessBatch(
                        i => dataSet.TrainingSet[shuffled[i + batchStart]],
                        batchSize,
                        true
                        );
                    state.trainingProcessed += batchSize;
                    state.trainingError      = (float)totalTrainingPassed / state.trainingProcessed;
                    endMs           = Environment.TickCount;
                    state.elapsedMs = endMs - startMs;
                    (state.boundsName, state.boundsValue) = neuralNet.Bounds();
                    resultAction(state);
                }

                // check against test data
                var totalTestPassed = ProcessBatch(
                    i => dataSet.TestSet[i],
                    state.testSize,
                    false
                    );
                state.testError          = (float)totalTestPassed / state.testSize;
                state.trainingProcessed += this.miniBatchSize;
                endMs           = Environment.TickCount;
                state.elapsedMs = endMs - startMs;
                (state.boundsName, state.boundsValue) = neuralNet.Bounds();
                resultAction(state);
            }
        }