Nguyen-Widrow weight initialization.

The Nguyen-Widrow initialization algorithm chooses values in order to distribute the active region of each neuron in the layer approximately evenly across the layers' input space.

The values contain a degree of randomness, so they are not the same each time this function is called.

Exemple #1
0
        // Worker thread
        void SearchSolution()
        {
            // number of learning samples
            int samples = data.GetLength(0);
            // data transformation factor
            double yFactor = 1.7 / chart.RangeY.Length;
            double yMin = chart.RangeY.Min;
            double xFactor = 2.0 / chart.RangeX.Length;
            double xMin = chart.RangeX.Min;

            // prepare learning data
            double[][] input = new double[samples][];
            double[][] output = new double[samples][];

            for (int i = 0; i < samples; i++)
            {
                input[i] = new double[1];
                output[i] = new double[1];

                // set input
                input[i][0] = (data[i, 0] - xMin) * xFactor - 1.0;
                // set output
                output[i][0] = (data[i, 1] - yMin) * yFactor - 0.85;
            }

            // create multi-layer neural network
            ActivationNetwork network = new ActivationNetwork(
                new BipolarSigmoidFunction(sigmoidAlphaValue),
                1, neuronsInFirstLayer, 1);

            if (useNguyenWidrow)
            {
                NguyenWidrow initializer = new NguyenWidrow(network);
                initializer.Randomize();
            }

            // create teacher
            var teacher = new ParallelResilientBackpropagationLearning(network);

            // iterations
            int iteration = 1;

            // solution array
            double[,] solution = new double[50, 2];
            double[] networkInput = new double[1];

            // calculate X values to be used with solution function
            for (int j = 0; j < 50; j++)
            {
                solution[j, 0] = chart.RangeX.Min + (double)j * chart.RangeX.Length / 49;
            }

            // loop
            while (!needToStop)
            {
                // run epoch of learning procedure
                double error = teacher.RunEpoch(input, output) / samples;

                // calculate solution
                for (int j = 0; j < 50; j++)
                {
                    networkInput[0] = (solution[j, 0] - xMin) * xFactor - 1.0;
                    solution[j, 1] = (network.Compute(networkInput)[0] + 0.85) / yFactor + yMin;
                }
                chart.UpdateDataSeries("solution", solution);
                // calculate error
                double learningError = 0.0;
                for (int j = 0, k = data.GetLength(0); j < k; j++)
                {
                    networkInput[0] = input[j][0];
                    learningError += Math.Abs(data[j, 1] - ((network.Compute(networkInput)[0] + 0.85) / yFactor + yMin));
                }

                // set current iteration's info
                SetText(currentIterationBox, iteration.ToString());
                SetText(currentErrorBox, learningError.ToString("F3"));

                // increase current iteration
                iteration++;

                // check if we need to stop
                if ((iterations != 0) && (iteration > iterations))
                    break;
            }


            // enable settings controls
            EnableControls(true);
        }
Exemple #2
0
        // Worker thread
        void SearchSolution()
        {
            // number of learning samples
            int samples = sourceMatrix.GetLength(0);

            // prepare learning data
            double[][] inputs = sourceMatrix.Submatrix(null, 0, 1).ToArray();
            double[][] outputs = sourceMatrix.GetColumn(2).Transpose().ToArray();

            // create multi-layer neural network
            ann = new ActivationNetwork(new GaussianFunction(sigmoidAlphaValue),
                //new BipolarSigmoidFunction(sigmoidAlphaValue),
                2, neuronsInFirstLayer, 1);

            if (useNguyenWidrow)
            {
                if (useSameWeights)
                    Accord.Math.Tools.SetupGenerator(0);

                NguyenWidrow initializer = new NguyenWidrow(ann);
                initializer.Randomize();
            }

            // create teacher
            LevenbergMarquardtLearning teacher = new LevenbergMarquardtLearning(ann, useRegularization);

            // set learning rate and momentum
            teacher.LearningRate = learningRate;

            // iterations
            iteration = 1;

            var ranges = Matrix.Range(sourceMatrix, 0);
            double[][] map = Matrix.Mesh(ranges[0], ranges[1], 1, 1);
            var sw = Stopwatch.StartNew();

            // loop
            while (!needToStop)
            {
                // run epoch of learning procedure
                error = teacher.RunEpoch(inputs, outputs) / samples;

                //var result = map.Apply(ann.Compute).GetColumn(0).Apply(Math.Sign);
                var result = map.Apply(ann.Compute).GetColumn(0).Apply(d =>
                {
                    if (d > 0.66) return 2;
                    else if (d < 0.33)
                        return 0;
                    return 1;
                });

                var graph = map.ToMatrix().InsertColumn(result.ToDouble());

                CreateScatterplot(zedGraphControl2, graph);

                // increase current iteration
                iteration++;

                elapsed = sw.Elapsed;

                updateStatus();

                // check if we need to stop
                if ((iterations != 0) && (iteration > iterations))
                    break;
            }

            sw.Stop();

            // enable settings controls
            EnableControls(true);
        }
Exemple #3
0
        public void configuraRede()
        {
            //Se houver algum 0 na lista de neurônios, remover.
            neurons.Remove(0);

            //O número de camadas é igual a quantidade de
            int camadaIntermediaria = neurons.Count;

            int[] camadas = new int[camadaIntermediaria + 1];
            for (int i = 0; i < camadaIntermediaria; i++)
            {
                camadas[i] = neurons[i];
            }
            camadas[camadaIntermediaria] = predictionSize;

            network = new ActivationNetwork (new BipolarSigmoidFunction(2.0), windowSize + predictionSize*6, camadas);

            //randomizar os pesos
            NguyenWidrow weightRandom = new NguyenWidrow(network);
            for (int i = 0; i < network.Layers.Length; i++)
                weightRandom.Randomize(i);
        }