Exemple #1
0
 /// <summary>
 /// Deserialization Constructor
 /// </summary>
 /// <param name="info"></param>
 /// <param name="ctxt"></param>
 public NeuralNetworkInfo(SerializationInfo info, StreamingContext ctxt)
 {
     //Get the values from info and assign them to the appropriate properties
     m_DomainName = (string)info.GetValue("Domain", typeof(string));
     m_NetworkId  = (Guid)info.GetValue("NetworkID", typeof(Guid));
     m_Type       = (string)info.GetValue("Type", typeof(string));
     m_UserName   = (string)info.GetValue("UserName", typeof(string));
     m_UserId     = (Guid)info.GetValue("UserID", typeof(Guid));
     m_SketchName = (string)info.GetValue("SketchName", typeof(string));
     m_SketchId   = (Guid)info.GetValue("SketchID", typeof(Guid));
     m_DateTime   = (DateTime)info.GetValue("DateTime", typeof(DateTime));
     m_NetworkTrainingErrorValues = (Dictionary <int, double>)info.GetValue("TrainingError", typeof(Dictionary <int, double>));
     m_Teacher            = (string)info.GetValue("Teacher", typeof(string));
     m_NumTrainingEpochs  = (int)info.GetValue("NumTrainingEpochs", typeof(int));
     m_LearningRate       = (double)info.GetValue("LearningRate", typeof(double));
     m_Momentum           = (double)info.GetValue("Momentum", typeof(double));
     m_NumInputs          = (int)info.GetValue("NumInputs", typeof(int));
     m_Layers             = (int[])info.GetValue("Layers", typeof(int[]));
     m_ActivationFunction = (IActivationFunction)info.GetValue("ActivationFunction", typeof(IActivationFunction));
     m_LearningMethod     = (ISupervisedLearning)info.GetValue("LearningMethod", typeof(ISupervisedLearning));
     try
     {
         m_FeatureNames    = (List <string>)info.GetValue("FeatureNames", typeof(List <string>));
         m_OutputTypeNames = (List <string>)info.GetValue("OutputClassNames", typeof(List <string>));
     }
     catch (Exception e)
     {
         Console.WriteLine("You're using an old network that doesn't have some variables saved...but it's probably okay.");
         Console.WriteLine(e.Message);
     }
 }
        public MusicAnalyzer(List <string> contexts)
        {
            // _storedSongs = new double[MaxSongsAtOnce][];
            // _storedTypes = new int[MaxSongsAtOnce][];
            _allContexts = contexts;
            // _currentIndex = 0;

            // _network = new ActivationNetwork(new SigmoidFunction(), InputsSize, (int) ((InputsSize * (2.0/3.0)) + contexts.Count), (int)((InputsSize * (1.0 / 3.0)) + contexts.Count), contexts.Count);
            int sum = (InputsSize + contexts.Count);

            _network = new ActivationNetwork(new SigmoidFunction(), InputsSize, contexts.Count, contexts.Count);
            // _network = new ActivationNetwork(new SigmoidFunction(), InputsSize, contexts.Count * 3, contexts.Count, contexts.Count);
            _network.Randomize();

            _learning = new BackPropagationLearning(_network)
            {
                LearningRate = 0.2,
                Momentum     = 0.5
            };


            _standard = new Standardizer();

            // _learning = new ParallelResilientBackpropagationLearning(_network);

            // learning.RunEpoch()

            //_teacher = new MultilabelSupportVectorLearning<Linear>()
            //{
            //    Learner = (p) => new SequentialMinimalOptimization<Linear>()
            //    {
            //        Complexity = 100.0
            //    }
            //};
        }
        /// <summary>
        /// Initializes the member network and the member teacher.
        /// </summary>
        /// <param name="network"></param>
        /// <param name="teacher"></param>
        private void SetUpNetwork(ActivationNetwork network, NeuralNetworkInfo info)
        {
            string teacher = info.Teacher;

            m_Network = network;

            if (teacher == "BackPropagation")
            {
                BackPropagationLearning learner = new BackPropagationLearning(m_Network);
                learner.LearningRate = info.LearningRate;
                learner.Momentum     = info.Momentum;
                m_Teacher            = learner;
            }
            else if (teacher == "Perceptron")
            {
                PerceptronLearning learner = new PerceptronLearning(m_Network);
                learner.LearningRate = info.LearningRate;
                m_Teacher            = learner;
            }
            else if (teacher == "DeltaRule")
            {
                DeltaRuleLearning learner = new DeltaRuleLearning(m_Network);
                learner.LearningRate = info.LearningRate;
                m_Teacher            = learner;
            }
            else
            {
                BackPropagationLearning learner = new BackPropagationLearning(m_Network);
                learner.LearningRate = info.LearningRate;
                learner.Momentum     = info.Momentum;
                m_Teacher            = learner;
            }
        }
Exemple #4
0
        private void TrainNetworkS(TrainSet trainSet, ISupervisedLearning teacher)
        {
            double[] sides     = new double[4];
            double[] direction = new double[2];
            double[] input     = new double[5];
            double[] output    = new double[4];

            double error = 10;
            int    epoch = 10000;

            while (epoch-- > 0)
            {
                for (int s = 0; s < trainSet.Situation.Count; s++)
                {
                    sides     = SimplifyEnvironment(trainSet.Situation[s].Environment);
                    direction = VectorFromDirection(trainSet.Decision[s].Direction);

                    // INPUT
                    input[0] = trainSet.Situation[s].ColonyPopulation;
                    input[1] = sides[0]; // UP
                    input[2] = sides[1]; // RIGHT
                    input[3] = sides[2]; // DOWN
                    input[4] = sides[3]; // LEFT

                    // OUTPUT
                    output[0] = trainSet.Decision[s].IsLeaving ? 1 : -1;
                    output[1] = trainSet.Decision[s].PopulationToMove;
                    output[2] = direction[0]; // X
                    output[3] = direction[1]; // Y

                    error = teacher.Run(input, output);
                }
                Debug.Print(error.ToString());
            }
        }
 /// <summary>
 /// Deserialization Constructor
 /// </summary>
 /// <param name="info"></param>
 /// <param name="ctxt"></param>
 public NeuralNetwork(SerializationInfo info, StreamingContext ctxt)
 {
     //Get the values from info and assign them to the appropriate properties
     m_Info    = (NeuralNetworkInfo)info.GetValue("NeuralNetworkInfo", typeof(NeuralNetworkInfo));
     m_Network = (ActivationNetwork)info.GetValue("Network", typeof(ActivationNetwork));
     m_Teacher = (ISupervisedLearning)info.GetValue("Teacher", typeof(ISupervisedLearning));
     m_Trained = (bool)info.GetValue("Trained", typeof(bool));
 }
    public void InitNetwork()
    {
        // currently network has 3 input, 5 hidden nodes, and 7 output
        this.network = new ActivationNetwork(new BipolarSigmoidFunction(ALPHA),
                                             3, Config.Instance.node["num_hidden_nodes"].AsInt, 7);

        this.teacher = new ParallelResilientBackpropagationLearning(this.network);
    }
 /// <summary>
 /// Initializes a new instance of the <see cref="T:DriverTracker.Domain.PickupPrediction"/> class.
 /// </summary>
 /// <param name="locationClustering">Location clustering servive.</param>
 /// <param name="legRepository">Leg repository.</param>
 /// <param name="logisticRegressionAnalysis">Object that can perform logistic regression analysis.</param>
 public PickupPrediction(ILocationClustering locationClustering, ILegRepository legRepository,
                         IGeocodingDbSync geocodingDbSync,
                         ISupervisedLearning <LogisticRegression, double[], double> logisticRegressionAnalysis)
 {
     _locationClustering         = locationClustering;
     _legRepository              = legRepository;
     _geocodingDbSync            = geocodingDbSync;
     _logisticRegressionAnalysis = logisticRegressionAnalysis;
 }
        private void createAlgorithms()
        {
            if (layerCount == 1)
            {
                algorithm = configure(network.Machines[layerIndex], layerIndex);
            }
            else
            {
                var machines = new RestrictedBoltzmannMachine[layerCount];
                for (int i = 0; i < machines.Length; i++)
                    machines[i] = network.Machines[i + layerIndex];
                int inputsCount = machines[0].InputsCount;

                algorithm = configure(new DeepBeliefNetwork(inputsCount, machines), layerIndex);
            }
        }
Exemple #9
0
        private void createAlgorithms()
        {
            if (layerCount == 1)
            {
                algorithm = configure(network.Machines[layerIndex], layerIndex);
            }
            else
            {
                var machines = new RestrictedBoltzmannMachine[layerCount];
                for (int i = 0; i < machines.Length; i++)
                {
                    machines[i] = network.Machines[i + layerIndex];
                }
                int inputsCount = machines[0].InputsCount;

                algorithm = configure(new DeepBeliefNetwork(inputsCount, machines), layerIndex);
            }
        }
Exemple #10
0
 public BackpropagationTrainer(NeuralNetwork network, int type, double learningRate)
 {
     if (type == resilient)
     {
         var rbpl = new ResilientBackpropagationLearning(network.ActivationNetwork);
         rbpl.LearningRate = learningRate;
         teacher           = (ISupervisedLearning)rbpl;
     }
     //TODO: find a way to implement "learningRate" with PRBL network!
     else if (type == parallelResilient)
     {
         teacher = new ParallelResilientBackpropagationLearning(network.ActivationNetwork);
     }
     else
     {
         var bpl = new BackPropagationLearning(network.ActivationNetwork);
         bpl.LearningRate = learningRate;
         teacher          = (ISupervisedLearning)bpl;
     }
 }
Exemple #11
0
 public NeuralNetworkInfo(NeuralNetworkInfo info)
 {
     m_ActivationFunction         = info.m_ActivationFunction;
     m_DomainName                 = info.m_DomainName;
     m_FeatureNames               = info.m_FeatureNames;
     m_Layers                     = info.m_Layers;
     m_LearningMethod             = info.m_LearningMethod;
     m_LearningRate               = info.m_LearningRate;
     m_Momentum                   = info.m_Momentum;
     m_NetworkId                  = Guid.NewGuid();
     m_NetworkTrainingErrorValues = new Dictionary <int, double>();
     m_NumInputs                  = info.m_NumInputs;
     m_NumTrainingEpochs          = info.m_NumTrainingEpochs;
     m_OutputTypeNames            = info.m_OutputTypeNames;
     m_SketchId                   = info.m_SketchId;
     m_SketchName                 = info.m_SketchName;
     m_Teacher                    = info.m_Teacher;
     m_Type     = info.m_Type;
     m_UserId   = info.m_UserId;
     m_UserName = info.m_UserName;
 }
        private void Train(IEnumerable <string> confusionSet, string[] features, List <RoughSample> trainingData)
        {
            var cloudClassifiers = new Dictionary <string, ISupervisedLearning[]>(confusionSet.Count());

            foreach (var word in confusionSet)
            {
                cloudClassifiers[word] = new ISupervisedLearning[1]
                {
                    new Winnow.Winnow(featuresCount: features.Length, threshold: 1, promotion: 1.5, demotion: 0.5, initialWeight: 1)
                };
            }

            Parallel.ForEach(trainingData, sample =>
            {
                var positive = new Sample
                {
                    Class    = true,
                    Features = CreateFeaturesVector(sample.Features, features)
                };

                Sample negative = positive.ToggleClass();

                foreach (var cloud in cloudClassifiers)
                {
                    var example = cloud.Key == sample.Word ? positive : negative;

                    foreach (var classifier in cloud.Value)
                    {
                        lock (_lock)
                        {
                            classifier.Train(example);
                        }
                    }
                }
            });

            _comparators.Add(new Comparator(cloudClassifiers, features));

            Console.WriteLine("Training done for " + confusionSet.Aggregate((a, b) => a + "," + b) + " " + DateTime.Now);
        }
        public DiscreteNeuralNetworkByChord(List<NGram<Chord>[]> bad, List<NGram<Chord>[]> okay, List<NGram<Chord>[]> good, IActivationFunction function)
        {
            bad.NullCheck();
            okay.NullCheck();
            good.NullCheck();

            bad.Any().AssertTrue();
            okay.Any().AssertTrue();
            good.Any().AssertTrue();

            List<Tuple<double[], double[]>> input = new List<Tuple<double[], double[]>>(bad.Count + okay.Count + good.Count);

            input.AddRange(
                bad.Select(x => new Tuple<double[], double[]>(
                    x.SelectMany(y => y.SelectMany(p => ConvertChordIntoTrainingInput(p))).ToArray(),
                    Enumerable.Repeat<double>(DiscreteNeuralNetworkByChord.BADWEIGHT, bad.Count).ToArray())));

            input.AddRange(
                okay.Select(x => new Tuple<double[], double[]>(
                    x.SelectMany(y => y.SelectMany(p => ConvertChordIntoTrainingInput(p))).ToArray(),
                    Enumerable.Repeat<double>(OkayWeight, okay.Count).ToArray())));

            input.AddRange(
                good.Select(x => new Tuple<double[], double[]>(
                    x.SelectMany(y => y.SelectMany(p => ConvertChordIntoTrainingInput(p))).ToArray(),
                    Enumerable.Repeat<double>(DiscreteNeuralNetworkByChord.GOODWEIGHT, good.Count).ToArray())));

            this.Max = input.Max(x => x.Item1.Max());
            int minIndex = input.Min(x => x.Item1.Length);

            var normalized = input.Select(item => Tuple.Create(item.Item1.Take(minIndex).Select(x => x / this.Max).ToArray(), item.Item2.Take(minIndex).ToArray())).ToArray();

            this.trainingData = normalized.ToArray();

            this.ActivationNetwork = new ActivationNetwork(function, this.trainingData.Max(y => y.Item1.Length), (HiddenLayerSize == 0) ? 23 : HiddenLayerSize, 1);
            this.LearningMethod = new ResilientBackpropagationLearning(this.ActivationNetwork);
            this.ActivationNetwork.Randomize();
        }
        public DiscreteNeuralNetworkByChord(List <NGram <Chord>[]> bad, List <NGram <Chord>[]> okay, List <NGram <Chord>[]> good, IActivationFunction function)
        {
            bad.NullCheck();
            okay.NullCheck();
            good.NullCheck();

            bad.Any().AssertTrue();
            okay.Any().AssertTrue();
            good.Any().AssertTrue();

            List <Tuple <double[], double[]> > input = new List <Tuple <double[], double[]> >(bad.Count + okay.Count + good.Count);

            input.AddRange(
                bad.Select(x => new Tuple <double[], double[]>(
                               x.SelectMany(y => y.SelectMany(p => ConvertChordIntoTrainingInput(p))).ToArray(),
                               Enumerable.Repeat <double>(DiscreteNeuralNetworkByChord.BADWEIGHT, bad.Count).ToArray())));

            input.AddRange(
                okay.Select(x => new Tuple <double[], double[]>(
                                x.SelectMany(y => y.SelectMany(p => ConvertChordIntoTrainingInput(p))).ToArray(),
                                Enumerable.Repeat <double>(OkayWeight, okay.Count).ToArray())));

            input.AddRange(
                good.Select(x => new Tuple <double[], double[]>(
                                x.SelectMany(y => y.SelectMany(p => ConvertChordIntoTrainingInput(p))).ToArray(),
                                Enumerable.Repeat <double>(DiscreteNeuralNetworkByChord.GOODWEIGHT, good.Count).ToArray())));

            this.Max = input.Max(x => x.Item1.Max());
            int minIndex = input.Min(x => x.Item1.Length);

            var normalized = input.Select(item => Tuple.Create(item.Item1.Take(minIndex).Select(x => x / this.Max).ToArray(), item.Item2.Take(minIndex).ToArray())).ToArray();

            this.trainingData = normalized.ToArray();

            this.ActivationNetwork = new ActivationNetwork(function, this.trainingData.Max(y => y.Item1.Length), (HiddenLayerSize == 0) ? 23 : HiddenLayerSize, 1);
            this.LearningMethod    = new ResilientBackpropagationLearning(this.ActivationNetwork);
            this.ActivationNetwork.Randomize();
        }
Exemple #15
0
        void Process(int learnIterations, double desiredError, 
                ActivationNetwork net, ISupervisedLearning lerning, 
                double[][] input, double[][] desiredOutput, 
                ErrorCalculator errorCalculator)
        {
            for (int i = 0; i < learnIterations; i++) {
                double[][] result = Compute(net, input);
                double error = errorCalculator.CalculateEpoch(desiredOutput, result);
                Console.WriteLine("{0}\t{1}", i, error);

                if (error < desiredError) break;
                lerning.RunEpoch(input, desiredOutput);
            }
        }
            public void Learn()
            {
                try
                {
                    //if (object.Equals(networkStruct, null)) { networkStruct = GetLayersStruct(LayersStruct); }

                    if (Equals(networkStruct, null))
                    {
                        return;
                    }

                    if (ANN_InputsCount == -1 | ANN_OuputsCount == -1)
                    {
                        return;
                    }

                    if (Equals(ActiveFunction_Params, null))
                    {
                        throw new Exception("No activation function parameterss are specified !!!");
                    }
                    if (ActiveFunction_Params.Length < 1)
                    {
                        throw new Exception("No activation function parameterss are specified !!!");
                    }


                    if (Equals(LearningAlgorithm_Params, null))
                    {
                        throw new Exception("No learning algorithm parameters are specified !!!");
                    }

                    // create neural network
                    // Network = new ActivationNetwork(new SigmoidFunction(1),mInputsCount, networkStruct);
                    //Network = new ActivationNetwork(new BipolarSigmoidFunction(2), mInputsCount, networkStruct);
                    //2  :  two inputs in the network
                    // 2  : two neurons in the first layer
                    // 1  : one neuron in the second layer

                    switch (ActivationFunction)
                    {
                    case ActivationFunctionEnum.LinearFunction:

                        Network = new ActivationNetwork(new LinearFunction(ActiveFunction_Params[0]), ANN_InputsCount, networkStruct);
                        break;

                    case ActivationFunctionEnum.SigmoidFunction:
                        Network = new ActivationNetwork(new SigmoidFunction(ActiveFunction_Params[0]), ANN_InputsCount, networkStruct);
                        break;

                    case ActivationFunctionEnum.BipolarSigmoidFunction:
                        Network = new ActivationNetwork(new BipolarSigmoidFunction(ActiveFunction_Params[0]), ANN_InputsCount, networkStruct);
                        break;

                    default:
                        Network = new ActivationNetwork(new SigmoidFunction(ActiveFunction_Params[0]), ANN_InputsCount, networkStruct);
                        break;
                    }

                    // create teacher
                    ISupervisedLearning teacher = null;
                    //LevenbergMarquardtLearning teacher = new LevenbergMarquardtLearning(Network);
                    //BackPropagationLearning teacher = new BackPropagationLearning(Network);
                    // EvolutionaryLearning teacher = new EvolutionaryLearning(Network, 25);

                    switch (LearningAlgorithm)
                    {
                    case LearningAlgorithmEnum.BackPropagationLearning:
                        if (LearningAlgorithm_Params.Length < 2)
                        {
                            throw new Exception("No activation function parameterss are specified !!!");
                        }

                        teacher = new BackPropagationLearning(Network);
                        var teacherBP = (BackPropagationLearning)teacher;
                        teacherBP.LearningRate = LearningAlgorithm_Params[0];
                        teacherBP.Momentum     = LearningAlgorithm_Params[1];
                        teacher = teacherBP;
                        break;

                    case LearningAlgorithmEnum.LevenbergMarquardtLearning:
                        if (LearningAlgorithm_Params.Length < 2)
                        {
                            throw new Exception("No activation function parameterss are specified !!!");
                        }
                        teacher = new LevenbergMarquardtLearning(Network);
                        var teacherLM = (LevenbergMarquardtLearning)teacher;
                        teacherLM.LearningRate      = LearningAlgorithm_Params[0];
                        teacherLM.Adjustment        = LearningAlgorithm_Params[1];
                        teacherLM.UseRegularization = false;

                        teacher = teacherLM;
                        break;

                    case LearningAlgorithmEnum.BayesianLevenbergMarquardtLearning:
                        throw new NotImplementedException("The implementation is not finished yet.");

                        if (LearningAlgorithm_Params.Length < 4)
                        {
                            throw new Exception("No activation function parameterss are specified !!!");
                        }

                        teacher = new LevenbergMarquardtLearning(Network);
                        var teacherBLM = (LevenbergMarquardtLearning)teacher;
                        teacherBLM.UseRegularization = true;
                        teacherBLM.LearningRate      = LearningAlgorithm_Params[0];
                        teacherBLM.Adjustment        = LearningAlgorithm_Params[1];
                        teacherBLM.Alpha             = LearningAlgorithm_Params[2];
                        teacherBLM.Beta = LearningAlgorithm_Params[3];
                        teacher         = teacherBLM;
                        break;

                    case LearningAlgorithmEnum.EvolutionaryLearningGA:
                        if (LearningAlgorithm_Params.Length < 1)
                        {
                            throw new Exception("No activation function parameterss are specified !!!");
                        }

                        teacher = new EvolutionaryLearning(Network, (int)LearningAlgorithm_Params[0]);
                        var teacherEGA = (EvolutionaryLearning)teacher;

                        break;

                    case LearningAlgorithmEnum.RGA_Learning:
                        throw new NotImplementedException();
                        // teacher = new RGA_Learning(Network, EOA_PopulationSize, RGA_MutationPhrequency);
                        break;

                    case LearningAlgorithmEnum.GSA_Learning:
                        throw new NotImplementedException();
                        // teacher = new GSA_Learning(Network, EOA_PopulationSize, MaxIteration, GSA_Go, GSA_Alpha);
                        break;

                    case LearningAlgorithmEnum.GWO_Learning:
                        throw new NotImplementedException();
                        // teacher = new GWO_Learning(Network, EOA_PopulationSize, MaxIteration, GWO_Version, IGWO_uParameter);
                        break;

                    case LearningAlgorithmEnum.HPSOGWO_Learning:
                        throw new NotImplementedException();
                        //teacher = new HPSOGWO_Learning(Network, EOA_PopulationSize, MaxIteration, HPSOGWO_C1, HPSOGWO_C2, HPSOGWO_C3);
                        break;

                    case LearningAlgorithmEnum.mHPSOGWO_Learning:
                        throw new NotImplementedException();
                        //teacher = new HPSOGWO_Learning(Network, EOA_PopulationSize, MaxIteration, HPSOGWO_C1, HPSOGWO_C2, HPSOGWO_C3);
                        break;

                    case LearningAlgorithmEnum.PSOGSA_Learning:
                        if (Equals(LearningAlgorithm_Params, null))
                        {
                            throw new Exception("No activation function parameterss are specified!!!");
                        }
                        if (LearningAlgorithm_Params.Length < 6)
                        {
                            throw new Exception("No activation function parameterss are specified!!!");
                        }

                        teacher = new PSOGSA_Learning(Network, (int)LearningAlgorithm_Params[0], (int)LearningAlgorithm_Params[1], (int)LearningAlgorithm_Params[2], (int)LearningAlgorithm_Params[3], (int)LearningAlgorithm_Params[4], (int)LearningAlgorithm_Params[5]);
                        break;
                    }

                    bool needToStop = false;
                    IterationCounter = 0;
                    double error = double.NaN;

                    // loop
                    while (!needToStop)
                    {
                        // run epoch of learning procedure
                        error = teacher.RunEpoch(mTraining_Inputs, mTraining_Outputs);

                        IterationCounter += 1;

                        // check error value to see if we need to stop
                        // ...
                        //Console.WriteLine(error);

                        if (error <= mTeachingError || IterationCounter >= MaxIteration)
                        {
                            needToStop = true;
                        }
                    }

                    FinalTeachingErr = error;
                    //----------------------------------
                    switch (LearningAlgorithm)
                    {
                    case LearningAlgorithmEnum.GSA_Learning:
                        throw new NotImplementedException();

                        //GSA_Learning gsaL = (GSA_Learning)teacher;
                        //this.BestChart = gsaL.Best_Chart;
                        //this.BestWeights = gsaL.BestSolution;

                        //Set best weights parameters to the network:
                        //SetBestWeightsToTheNetwork();
                        break;

                    case LearningAlgorithmEnum.HPSOGWO_Learning:
                        throw new NotImplementedException();

                        //HPSOGWO_Learning hpgwoL = (HPSOGWO_Learning)teacher;
                        //this.BestChart = hpgwoL.Best_Chart;
                        //this.BestWeights = hpgwoL.BestSolution;

                        //Set best weights parameters to the network:
                        //SetBestWeightsToTheNetwork();
                        break;

                    case LearningAlgorithmEnum.mHPSOGWO_Learning:
                        throw new NotImplementedException();

                        //HPSOGWO_Learning hpsgwoL = (HPSOGWO_Learning)teacher;
                        //this.BestChart = hpsgwoL.Best_Chart;
                        //this.BestWeights = hpsgwoL.BestSolution;

                        //Set best weights parameters to the network:
                        //SetBestWeightsToTheNetwork();
                        break;

                    case LearningAlgorithmEnum.GWO_Learning:
                        throw new NotImplementedException();

                        //GWO_Learning gwoL = (GWO_Learning)teacher;
                        //this.BestChart = gwoL.Best_Chart;
                        //this.BestWeights = gwoL.BestSolution;

                        //Set best weights parameters to the network:
                        //SetBestWeightsToTheNetwork();
                        break;

                    case LearningAlgorithmEnum.RGA_Learning:
                        throw new NotImplementedException();
                        //RGA_Learning rgaL = (RGA_Learning)teacher;
                        //this.BestChart = rgaL.Best_Chart;
                        //this.BestWeights = rgaL.BestSolution;

                        //Set best weights parameters to the network:
                        //SetBestWeightsToTheNetwork();
                        break;

                    case LearningAlgorithmEnum.PSOGSA_Learning:
                        PSOGSA_Learning psogsaL = (PSOGSA_Learning)teacher;
                        BestChart   = psogsaL.Best_Chart;
                        BestWeights = psogsaL.BestSolution;

                        //Set best weights parameters to the network:
                        SetBestWeightsToTheNetwork();
                        break;
                    }
                }
                catch (Exception ex)
                { throw ex; }
            }
        //создание сети для обучения и учителя по топологии
        private void createLearn(int[] topology)
        {
            if (this.alphaBox.Text != "")
                activationFunc = new BipolarSigmoidFunction(Double.Parse(this.alphaBox.Text));
            else
                activationFunc = new BipolarSigmoidFunction();

            network = new ActivationNetwork(activationFunc,
            colCountData - 1, topology);
            //ActivationLayer layer = network.Layers[0] as ActivationLayer;

            NguyenWidrow initializer = new NguyenWidrow(network);
            initializer.Randomize();
            // create teacher
            GeneticLearning genetic = new GeneticLearning(network, chromosomes);
            teacher = genetic;
        }
Exemple #18
0
        public override void DoTraining(DocumentSetCaseCollectionSet trainingSet, classifierTools tools, ILogBuilder logger)
        {
            var state = states.SetState(trainingSet, GetExperimentSufix());


            if (activationFunction == null)
            {
                activationFunction = new BipolarSigmoidFunction(setup.neuralnetwork.alpha);
            }

            var neurons = setup.neuralnetwork.HiddenLayersNeuronCounts.ToList();

            ActivationNetwork machine = null;

            switch (neurons.Count)
            {
            case 0:
                machine = new ActivationNetwork(new BipolarSigmoidFunction(setup.neuralnetwork.alpha), state.data.NumberOfInputs, state.data.NumberOfClasses);
                break;

            case 1:
                machine = new ActivationNetwork(new BipolarSigmoidFunction(setup.neuralnetwork.alpha), state.data.NumberOfInputs, neurons[0], state.data.NumberOfClasses);
                break;

            case 2:
                machine = new ActivationNetwork(new BipolarSigmoidFunction(setup.neuralnetwork.alpha), state.data.NumberOfInputs, neurons[0], neurons[1], state.data.NumberOfClasses);
                break;

            case 3:
                machine = new ActivationNetwork(new BipolarSigmoidFunction(setup.neuralnetwork.alpha), state.data.NumberOfInputs, neurons[0], neurons[1], neurons[2], state.data.NumberOfClasses);
                break;

            case 4:
                machine = new ActivationNetwork(new BipolarSigmoidFunction(setup.neuralnetwork.alpha), state.data.NumberOfInputs, neurons[0], neurons[1], neurons[2], neurons[3], state.data.NumberOfClasses);
                break;

            case 5:
                machine = new ActivationNetwork(new BipolarSigmoidFunction(setup.neuralnetwork.alpha), state.data.NumberOfInputs, neurons[0], neurons[1], neurons[2], neurons[3], neurons[4], state.data.NumberOfClasses);
                break;

            case 6:
                machine = new ActivationNetwork(new BipolarSigmoidFunction(setup.neuralnetwork.alpha), state.data.NumberOfInputs, neurons[0], neurons[1], neurons[2], neurons[3], neurons[4], neurons[5], state.data.NumberOfClasses);
                break;

            case 7:
                machine = new ActivationNetwork(new BipolarSigmoidFunction(setup.neuralnetwork.alpha), state.data.NumberOfInputs, neurons[0], neurons[1], neurons[2], neurons[3], neurons[4], neurons[5], neurons[6], state.data.NumberOfClasses);
                break;

            default:
                throw new aceGeneralException("At current implementation NN with [" + neurons.Count + "] hidden layers is not allowed.", null, this, "To high number of hidden layers");
                break;
            }

            new NguyenWidrow(machine).Randomize();
            state.machine = machine;

            // BackPropagationLearning teacher = new BackPropagationLearning(machine);
            LevenbergMarquardtLearning teacher = new LevenbergMarquardtLearning(machine);

            teacher.LearningRate = setup.neuralnetwork.learningRate;

            var outputs = Accord.Statistics.Tools.Expand(state.data.outputs, state.data.NumberOfClasses, -1, 1);
            //teacher.Momentum = momentum;
            Int32  itOfSameError        = 0;
            Int32  itOfSameErrorLimit   = setup.neuralnetwork.learningIterationsMax / 10;
            Double errorSignificantSpan = setup.neuralnetwork.errorLowerLimit * setup.neuralnetwork.errorLowerLimit;

            for (int i = 0; i < setup.neuralnetwork.learningIterationsMax; i++)
            {
                double error = teacher.RunEpoch(state.data.inputs, outputs);

                if (Math.Abs(error - state.errorRate) < errorSignificantSpan)
                {
                    itOfSameError++;
                }

                if (itOfSameError > itOfSameErrorLimit)
                {
                    logger.log("Stoping training in [" + i.ToString("D3") + "] because error rate had no significant change [" + errorSignificantSpan.ToString("F8") + "] in last [" + itOfSameError + "] iterations [" + error.ToString("F8") + "]");
                    break;
                }
                if (i % 10 == 0)
                {
                    logger.log("Learning Neural Network [" + i.ToString("D3") + "]  Error rate: " + error.ToString("F5"));
                }
                if (error < state.errorRate)
                {
                    state.errorRate = error;
                }
                if (error < setup.neuralnetwork.errorLowerLimit)
                {
                    break;
                }
            }
            if (teacherRef == null)
            {
                teacherRef = teacher;
            }
            state.SaveState();
        }
Exemple #19
0
 private void InitBrain()
 {
     this._activationNetwork = new ActivationNetwork(this._activationFunction, this._inputsCount, this._neuronsCount);
     new NguyenWidrow(this._activationNetwork).Randomize();
     this._teacher = new ParallelResilientBackpropagationLearning(this._activationNetwork);
 }
Exemple #20
0
 public OnlineSupervisedLearningProcessor(IMessageSource <SupervisedData> messageSource,
                                          ISupervisedLearning learner) : base(messageSource)
 {
     this.learner = learner;
 }
Exemple #21
0
            public void LuanchLearning()
            {
                try
                {
                    if (Equals(mTrainingInputs, null))
                    {
                        return;
                    }
                    if (Equals(mTrainingInputs.Data, null))
                    {
                        return;
                    }

                    if (Equals(mTrainingOutputs_DS1, null))
                    {
                        return;
                    }
                    if (Equals(mTrainingOutputs_DS1.Data, null))
                    {
                        return;
                    }

                    //StandardizeData(TrainingInputs,mActivationFunction);
                    //StandardizeData(TrainingOutputs_DS1, mActivationFunction);

                    StandardizeData(TrainingInputs);
                    StandardizeData(TrainingOutputs_DS1);

                    mTraining_Inputs  = Convert(mTrainingInputs);
                    mTraining_Outputs = Convert(mTrainingOutputs_DS1);

                    mInputsCount = mTrainingInputs.Data[0].List.Count();

                    int[] networkStruct = GetLayersStruct(LayersStruct);
                    if (Equals(networkStruct, null))
                    {
                        return;
                    }

                    // create neural network
                    // Network = new ActivationNetwork(new SigmoidFunction(1),mInputsCount, networkStruct);
                    //Network = new ActivationNetwork(new BipolarSigmoidFunction(2), mInputsCount, networkStruct);
                    //2  :  two inputs in the network
                    // 2  : two neurons in the first layer
                    // 1  : one neuron in the second layer

                    switch (mActivationFunction)
                    {
                    case ActivationFunctionEnum.SigmoidFunction:
                        Network = new ActivationNetwork(new SigmoidFunction(ActiveFunction_Alpha), mInputsCount, networkStruct);
                        break;

                    case ActivationFunctionEnum.BipolarSigmoidFunction:
                        Network = new ActivationNetwork(new BipolarSigmoidFunction(ActiveFunction_Alpha), mInputsCount, networkStruct);
                        break;

                    case ActivationFunctionEnum.RectifiedLinearFunction:
                        Network = new ActivationNetwork(new RectifiedLinearFunction(), mInputsCount, networkStruct);
                        break;

                    default:
                        Network = new ActivationNetwork(new SigmoidFunction(ActiveFunction_Alpha), mInputsCount, networkStruct);
                        break;
                    }

                    // create teacher
                    ISupervisedLearning teacher = null;

                    //LevenbergMarquardtLearning teacher = new LevenbergMarquardtLearning(Network);
                    //BackPropagationLearning teacher = new BackPropagationLearning(Network);
                    // EvolutionaryLearning teacher = new EvolutionaryLearning(Network, 25);

                    switch (mLearningAlgorithm)
                    {
                    case LearningAlgorithmEnum.BackPropagationLearning:
                        teacher = new BackPropagationLearning(Network);
                        break;

                    case LearningAlgorithmEnum.LevenbergMarquardtLearning:
                        teacher = new LevenbergMarquardtLearning(Network);
                        break;

                    case LearningAlgorithmEnum.EvolutionaryLearningGA:
                        teacher = new EvolutionaryLearning(Network, EOA_PopulationSize);

                        break;

                    case LearningAlgorithmEnum.RGA_Learning:
                        teacher = new RGA_Learning(Network, EOA_PopulationSize, RGA_MutationPhrequency);
                        break;

                    case LearningAlgorithmEnum.GSA_Learning:
                        teacher = new GSA_Learning(Network, EOA_PopulationSize, MaxIteration, GSA_Go, GSA_Alpha);
                        break;

                    case LearningAlgorithmEnum.GWO_Learning:
                        teacher = new GWO_Learning(Network, EOA_PopulationSize, MaxIteration, GWO_Version, IGWO_uParameter);
                        break;

                    case LearningAlgorithmEnum.HPSOGWO_Learning:
                        teacher = new HPSOGWO_Learning(Network, EOA_PopulationSize, MaxIteration, HPSOGWO_C1, HPSOGWO_C2, HPSOGWO_C3);
                        break;

                    case LearningAlgorithmEnum.mHPSOGWO_Learning:
                        teacher = new HPSOGWO_Learning(Network, EOA_PopulationSize, MaxIteration, HPSOGWO_C1, HPSOGWO_C2, HPSOGWO_C3);
                        break;

                    case LearningAlgorithmEnum.PSOGSA_Learning:
                        teacher = new PSOGSA_Learning(Network, EOA_PopulationSize, MaxIteration, PSOGSA_Go, PSOGSA_Alpha, PSOGSA_C1, PSOGSA_C2);
                        break;
                    }

                    bool needToStop = false;
                    IterationCounter = 0;
                    double error = double.NaN;

                    // loop
                    while (!needToStop)
                    {
                        // run epoch of learning procedure
                        error = teacher.RunEpoch(mTraining_Inputs, mTraining_Outputs);

                        IterationCounter += 1;

                        // check error value to see if we need to stop
                        // ...
                        //Console.WriteLine(error);

                        if (error <= mTeachingError)
                        {
                            needToStop = true;
                        }

                        if (IterationCounter >= MaxIteration)
                        {
                            needToStop = true;
                        }
                    }

                    FinalTeachingErr = error;
                    //----------------------------------
                    switch (LearningAlgorithm)
                    {
                    case LearningAlgorithmEnum.GSA_Learning:
                        GSA_Learning gsaL = (GSA_Learning)teacher;
                        this.BestChart   = gsaL.Best_Chart;
                        this.BestWeights = gsaL.BestSolution;

                        //Set best weights parameters to the network:
                        SetBestWeightsToTheNetwork();
                        break;

                    case LearningAlgorithmEnum.HPSOGWO_Learning:
                        HPSOGWO_Learning hpgwoL = (HPSOGWO_Learning)teacher;
                        this.BestChart   = hpgwoL.Best_Chart;
                        this.BestWeights = hpgwoL.BestSolution;

                        //Set best weights parameters to the network:
                        SetBestWeightsToTheNetwork();
                        break;

                    case LearningAlgorithmEnum.mHPSOGWO_Learning:
                        HPSOGWO_Learning hpsgwoL = (HPSOGWO_Learning)teacher;
                        this.BestChart   = hpsgwoL.Best_Chart;
                        this.BestWeights = hpsgwoL.BestSolution;

                        //Set best weights parameters to the network:
                        SetBestWeightsToTheNetwork();
                        break;

                    case LearningAlgorithmEnum.GWO_Learning:
                        GWO_Learning gwoL = (GWO_Learning)teacher;
                        this.BestChart   = gwoL.Best_Chart;
                        this.BestWeights = gwoL.BestSolution;

                        //Set best weights parameters to the network:
                        SetBestWeightsToTheNetwork();
                        break;

                    case LearningAlgorithmEnum.RGA_Learning:
                        RGA_Learning rgaL = (RGA_Learning)teacher;
                        this.BestChart   = rgaL.Best_Chart;
                        this.BestWeights = rgaL.BestSolution;

                        //Set best weights parameters to the network:
                        SetBestWeightsToTheNetwork();
                        break;

                    case LearningAlgorithmEnum.PSOGSA_Learning:
                        PSOGSA_Learning psogsaL = (PSOGSA_Learning)teacher;
                        BestChart   = psogsaL.Best_Chart;
                        BestWeights = psogsaL.BestSolution;

                        //Set best weights parameters to the network:
                        SetBestWeightsToTheNetwork();
                        break;
                    }
                }
                catch (Exception ex)
                { throw ex; }
            }
        private static void trainNetwork(ActivationNetwork neuralNet, ISupervisedLearning teacher,
            double[][] input, double[][] output, double[][] crossValidationInput, char[] crossValidationDataLabels)
        {
            //Make the network learn the data
            DefaultLog.Info("Training the neural network . . .");
            double error;

            //TODO: Store the previous NUM_ITERATIONS_EQUAL_IMPLIES_PLATEAU networks so in the event of over-learning, we can return to the best one
            //Use the cross-validation data to notice if the network starts to over-learn the data.
            //Store the previous network (before training) and check if the performance drops on the cross-validation data
            MemoryStream prevNetworkStream = new MemoryStream();
            uint prevNetworkNumMisclassified = uint.MaxValue;
            Queue<uint> prevNetworksNumMisclassified = new Queue<uint>(NUM_ITERATIONS_EQUAL_IMPLIES_PLATEAU);
            //Initialise the queue to be full of uint.MaxValue
            for (int i = 0; i < NUM_ITERATIONS_EQUAL_IMPLIES_PLATEAU; i++)
            {
                prevNetworksNumMisclassified.Enqueue(prevNetworkNumMisclassified);
            }

            int iterNum = 1;
            do
            {
                //Perform an iteration of training (calls teacher.Run() for each item in the array of inputs/outputs provided)
                error = teacher.RunEpoch(input, output);

                //Progress update
                if (iterNum % ITERATIONS_PER_PROGRESS_UPDATE == 0)
                {
                    DefaultLog.Debug(String.Format("Learned for {0} iterations. Error: {1}", iterNum, error));
                }

                //Evaluate this network on the cross-validation data
                NeuralNetworkEvaluator crossValidationEvaluator = new NeuralNetworkEvaluator(neuralNet);
                crossValidationEvaluator.Evaluate(crossValidationInput, crossValidationDataLabels);
                uint networkNumMisclassified = crossValidationEvaluator.ConfusionMatrix.NumMisclassifications;
                DefaultLog.Debug(String.Format("Network misclassified {0} / {1} on the cross-validation data set", networkNumMisclassified,
                    crossValidationEvaluator.ConfusionMatrix.TotalClassifications));

                //Check if we've overlearned the data and performance on the cross-valiadtion data has dropped off
                if (networkNumMisclassified > Stats.Mean(prevNetworksNumMisclassified)) //Use the mean of the number of misclassification, as the actual number can move around a bit
                {
                    //Cross-Validation performance has dropped, reinstate the previous network & break
                    DefaultLog.Debug(String.Format("Network has started to overlearn the training data on iteration {0}. Using previous classifier.", iterNum));
                    prevNetworkStream.Position = 0; //Set head to start of stream
                    neuralNet = ActivationNetwork.Load(prevNetworkStream) as ActivationNetwork; //Read in the network
                    break;
                }

                //Clear the Memory Stream storing the previous network
                prevNetworkStream.SetLength(0);
                //Store this network & the number of characters it misclassified on the cross-validation data
                neuralNet.Save(prevNetworkStream);

                //This is now the previous network, update the number it misclassified
                prevNetworkNumMisclassified = networkNumMisclassified;
                prevNetworksNumMisclassified.Dequeue();
                prevNetworksNumMisclassified.Enqueue(prevNetworkNumMisclassified);

                //Check if the performance has plateaued
                if (prevNetworksNumMisclassified.Distinct().Count() == 1) //Allow for slight movement in performance here??
                {
                    //Cross-Validation performance has plateaued, use this network as the final one & break
                    DefaultLog.Debug(String.Format("Network performance on cross-validation data has plateaued on iteration {0}.", iterNum));
                    break;
                }

                //Check if we've performed the max number of iterations
                if (iterNum > MAX_LEARNING_ITERATIONS)
                {
                    DefaultLog.Debug(String.Format("Reached the maximum number of learning iterations ({0}), with error {1}", MAX_LEARNING_ITERATIONS, error));
                    break;
                }
                iterNum++;
            }
            while (error > LEARNED_AT_ERROR);

            DefaultLog.Info("Data learned to an error of {0}", error);
        }
        //Train the network many times, with different initial values, evaluate them on the cross valiadtion data and select the best one
        private static ActivationNetwork trainNetworksCompeteOnCrossValidation(ActivationNetwork neuralNet, ISupervisedLearning teacher,
            double[][] input, double[][] output, double[][] crossValidationInput, char[] crossValidationDataLabels)
        {
            DefaultLog.Info("Training {0} neural networks & picking the one that performs best on the cross-validation data . . .",
                NUM_NETWORKS_TO_TRAIN_FOR_CROSS_VALIDATION_COMPETITION);

            MemoryStream bestNetworkStream = new MemoryStream();
            uint bestNetworkNumMisclassified = uint.MaxValue;

            for (int i = 0; i < NUM_NETWORKS_TO_TRAIN_FOR_CROSS_VALIDATION_COMPETITION; i++)
            {
                DefaultLog.Info("Training network {0}/{1}", (i + 1), NUM_NETWORKS_TO_TRAIN_FOR_CROSS_VALIDATION_COMPETITION);
                //Train a new network
                neuralNet.Randomize(); //Reset the weights to random values
                trainNetwork(neuralNet, teacher, input, output, crossValidationInput, crossValidationDataLabels);

                //Compare this new networks performance to our current best network
                NeuralNetworkEvaluator evaluator = new NeuralNetworkEvaluator(neuralNet);
                evaluator.Evaluate(crossValidationInput, crossValidationDataLabels);
                uint numMisclassified = evaluator.ConfusionMatrix.NumMisclassifications;

                if (numMisclassified < bestNetworkNumMisclassified)
                {
                    //This network performed better than out current best network, make this the new best

                    //Clear the Memory Stream storing the current best network
                    bestNetworkStream.SetLength(0);
                    //Save the network & update the best numMisclassified
                    neuralNet.Save(bestNetworkStream);
                    bestNetworkNumMisclassified = numMisclassified;
                }
            }

            DefaultLog.Info("Trained all networks and selected the best one");

            //Load up the network that performed best
            bestNetworkStream.Position = 0; //Read from the start of the stream
            ActivationNetwork bestNetwork = ActivationNetwork.Load(bestNetworkStream) as ActivationNetwork;
            return bestNetwork;
        }