示例#1
0
        public void PositionNeurons(BackpropagationTrainingParameters trainingParameters, List <RadialNeuron> radialNeurons)
        {
            trainingParameters.Validate();

            var inputPoints = trainingParameters.InputPoints;

            List <int> randomPositions = new List <int>();

            for (int i = 0; i < radialNeurons.Count; i++)
            {
                int randomPosition;
                do
                {
                    randomPosition = (int)RandomNumberProvider.GetRandomNumber(0, inputPoints.Count);
                } while (randomPositions.Exists(e => e == randomPosition));
                randomPositions.Add(randomPosition);
            }

            int radialIndex = 0;

            foreach (var index in randomPositions)
            {
                for (int i = 0; i < inputPoints[0].Input.Count; i++)
                {
                    radialNeurons[radialIndex].Position[i] = inputPoints[index].Input[i];
                }
                radialIndex++;
            }
        }
        public void PositionNeurons(BackpropagationTrainingParameters trainingParameters, List <RadialNeuron> radialNeurons)
        {
            if (!(trainingParameters is KMeansBackpropagationTrainingParameters))
            {
                throw new ArgumentException("k-means positioner passed non k-means arguments");
            }

            var SOMTrainingParameters = new IAD_zad2.Model.Parameters.KMeansTrainingParameters()
            {
                TrainingData = trainingParameters.InputPoints.Select(set => set.Input).ToList(),
                Epochs       = ((KMeansBackpropagationTrainingParameters)trainingParameters).KMeansEpochs
            };

            var    inputPoints = trainingParameters.InputPoints;
            double minPosition = inputPoints.Min(elem => elem.Input.Min());
            double maxPosition = inputPoints.Max(elem => elem.Input.Max());
            int    dimensions  = inputPoints.First().Input.Count;

            //train the KMeansNetwork to distribute rbf centers evenly
            KMeansNetwork mapNetwork = new KMeansNetwork(radialNeurons.Count, new NeuronRandomRectangularInitializer(minPosition, maxPosition, dimensions), new IAD_zad2.Utilities.Distance.EuclideanDistance());

            mapNetwork.Train(SOMTrainingParameters);

            //Copy the trained positions of radial neurons
            for (int i = 0; i < radialNeurons.Count; i++)
            {
                radialNeurons[i].Position = mapNetwork.Neurons[i].CurrentWeights.Clone();
            }
        }
示例#3
0
        private void TrainButton_Click(object sender, RoutedEventArgs e)
        {
            var learningRate = Double.Parse(LearningRateInput.Text, CultureInfo.InvariantCulture);
            var epochs       = Int32.Parse(EpochsCount.Text);
            var momentum     = Double.Parse(MomentumInput.Text, CultureInfo.InvariantCulture);
            var minWeight    = Double.Parse(MinInitialWeight.Text, CultureInfo.InvariantCulture);
            var maxWeight    = Double.Parse(MaxInitialWeight.Text, CultureInfo.InvariantCulture);
            var trainingPath = TrainingSetPath.Text;
            var kMeansEpochs = Int32.Parse(EpochsCount.Text);

            bool[] chosenInputs = new bool[] {
                Input1Check.IsChecked.Value,
                Input2Check.IsChecked.Value,
                Input3Check.IsChecked.Value,
                Input4Check.IsChecked.Value
            };

            BackpropagationTrainingParameters parameters;

            DataGetter dg           = new DataGetter();
            var        trainingData = new List <TrainingSet>();
            var        testingData  = new List <TrainingSet>();

            switch (taskSelected)
            {
            case TaskSelection.APPROX:
                trainingData = dg.GetTrainingDataWithOneOutput(trainingPath, inputCount);
                testingData  = dg.GetTrainingDataWithOneOutput(TestingSetPath, inputCount);
                parameters   = new BackpropagationTrainingParameters(learningRate, epochs, momentum, minWeight, maxWeight, trainingData);
                Network.Train(parameters, testingData);



                ApproximationExample(trainingData, testingData);
                break;

            case TaskSelection.CLASS:
                trainingData = dg.GetTrainingDataWithChosenInputs(trainingPath, chosenInputs);
                testingData  = dg.GetTrainingDataWithChosenInputs(TestingSetPath, chosenInputs);
                testingData  = testingData.Select(set => {
                    set.DesiredOutput = classToVector(set.DesiredOutput.At(0));
                    return(set);
                }).ToList();
                trainingData = trainingData.Select(set => {
                    set.DesiredOutput = classToVector(set.DesiredOutput.At(0));
                    return(set);
                }).ToList();
                parameters = new KMeansBackpropagationTrainingParameters(learningRate, epochs, momentum, minWeight, maxWeight, trainingData, kMeansEpochs);
                Network.Train(parameters, testingData);



                ClassificationExample(trainingData, testingData);
                break;

            default:
                throw new InvalidOperationException("use of nonexistent enum element");
            }
        }
示例#4
0
        private void initLayers(BackpropagationTrainingParameters parameters)
        {
            _neuronPositioner.PositionNeurons(parameters, RadialLayer);
            _widthCalculator.CalculateWidths(RadialLayer);

            foreach (var linear in LinearLayer)
            {
                linear.InitNeuron(RadialLayer.Count, parameters.MinWeightValue, parameters.MaxWeightValue);
            }
        }
示例#5
0
        public void Train(BackpropagationTrainingParameters parameters, List <TrainingSet> testingPoints)
        {
            MeanSquaredErrors.Clear();
            TestMeanSquaredErrors.Clear();
            initLayers(parameters);

            var epochs         = parameters.Epochs;
            var learningRate   = parameters.LearningRate;
            var maxWeightValue = parameters.MaxWeightValue;
            var minWeightValue = parameters.MinWeightValue;
            var momentum       = parameters.Momentum;

            for (int i = 0; i < epochs; i++)
            {
                for (int j = 0; j < RadialLayer.Count; j++)
                {
                    RadialLayer[j].Outputs.Clear();
                }
                double meanSquaredErrorAggregate = 0; // sum of all error squares of one epoch
                //test error gathering
                double testMeanSquaredErrorAggregate = 0;
                foreach (var tpoint in testingPoints)
                {
                    testMeanSquaredErrorAggregate += (ProcessInput(tpoint.Input) - tpoint.DesiredOutput).Sum(error => error * error);
                }
                TestMeanSquaredErrors.Add(Math.Sqrt(testMeanSquaredErrorAggregate));

                foreach (var point in parameters.InputPoints)
                {
                    var networkOutput = ProcessInput(point.Input);
                    var deltas        = (networkOutput - point.DesiredOutput);       //(*1) differential of linear neuron function is 1

                    meanSquaredErrorAggregate += deltas.Sum(delta => delta * delta); // sqrt(SUM[Dij^2]) add to the aggregate


                    for (int j = 0; j < LinearLayer.Count; j++)
                    {
                        //update weights based on radial neurons' outputs
                        for (int k = 0; k < RadialLayer.Count; k++)
                        {
                            var instantDelta = (deltas[j] * RadialLayer[k].Outputs.Last()) * learningRate;                    // LR * deltaM * xN

                            LinearLayer[j].WeightsDeltas[k] = -(instantDelta + (LinearLayer[j].WeightsDeltas[k] * momentum)); // add momentum
                            LinearLayer[j].Weights[k]       = LinearLayer[j].Weights[k] + LinearLayer[j].WeightsDeltas[k];    //update weights(-LR*POCHODNABLEDU)
                        }
                        //Bias update
                        LinearLayer[j].BiasWeightDelta = -((deltas[j] * learningRate) + (LinearLayer[j].BiasWeightDelta * momentum));
                        LinearLayer[j].BiasWeight      = LinearLayer[j].BiasWeight + LinearLayer[j].BiasWeightDelta;
                    }
                }
                MeanSquaredErrors.Add(Math.Sqrt(meanSquaredErrorAggregate));// sqrt of the sum of all errors
            }
        }