public PlanogramOptimizerEncog(Item[] items, SimulationSettings simSettings, UpdateUINonRLMCallback updateUI = null, UpdateStatusCallback updateStatus = null, SimulationCsvLogger logger = null, bool anneal = true)
        {
            updateStatus?.Invoke("Initializing...");

            //this.items = items;
            this.simSettings  = simSettings;
            this.logger       = logger;
            this.updateUI     = updateUI;
            this.updateStatus = updateStatus;

            network        = CreateNetwork();
            planogramScore = new PlanogramScore()
            {
                SimSettings = simSettings,
                Items       = items,
                UpdateUI    = updateUI,
                Logger      = logger
            };

            if (anneal)
            {
                train = new NeuralSimulatedAnnealing(network, planogramScore, 10, 2, (simSettings.SimType == SimulationType.Sessions) ?  1 : 10); // todo make the # of cycles an input for users?
            }
            else
            {
                train = new MLMethodGeneticAlgorithm(() => {
                    ((IMLResettable)network).Reset();
                    return(network);
                }, planogramScore, 500);
            }
        }
Example #2
0
        /// <summary>
        /// Train the network, using the specified training algorithm, and send the
        /// output to the console.
        /// </summary>
        /// <param name="train">The training method to use.</param>
        /// <param name="network">The network to train.</param>
        /// <param name="trainingSet">The training set.</param>
        /// <param name="seconds">The second to train for.</param>
        public static void TrainConsole(IMLTrain train, BasicNetwork network, IMLDataSet trainingSet, double seconds)
        {
            int    epoch = 1;
            double remaining;

            Console.WriteLine(@"Beginning training...");
            long start = Environment.TickCount;

            do
            {
                train.Iteration();

                double current = Environment.TickCount;
                double elapsed = (current - start) / 1000;
                remaining = seconds - elapsed;

                Console.WriteLine(@"Iteration #" + Format.FormatInteger(epoch)
                                  + @" Error:" + Format.FormatPercent(train.Error)
                                  + @" elapsed time = " + Format.FormatTimeSpan((int)elapsed)
                                  + @" time left = "
                                  + Format.FormatTimeSpan((int)remaining));
                epoch++;
            } while (remaining > 0 && !train.TrainingDone);
            train.FinishTraining();
        }
Example #3
0
    void Start()
    {
        double[][] x =
        {
            new[] { 0.0, 0.0 },
            new[] { 0.0, 1.0 },
            new[] { 1.0, 0.0 },
            new[] { 1.0, 1.0 }
        };

        double[][] y =
        {
            new[] { 0.0 },
            new[] { 1.0 },
            new[] { 1.0 },
            new[] { 0.0 }
        };

        basicNetwork = new BasicNetwork();
        basicNetwork.AddLayer(new BasicLayer(inputLayer));
        for (int i = 0; i < hiddenLayer.Length; i++)
        {
            basicNetwork.AddLayer(new BasicLayer(hiddenLayer[i]));
        }
        basicNetwork.AddLayer(new BasicLayer(outputLayer));
        basicNetwork.Structure.FinalizeStructure();
        basicNetwork.Reset();

        dataPairs = new BasicMLDataSet(x, y);

        backpropagation = new ResilientPropagation(basicNetwork, dataPairs, 0.4, 0.12);
    }
Example #4
0
        public static void MyTrainConsole(IMLTrain train, BasicNetwork network, IMLDataSet trainingSet, int minutes, FileInfo networkFile, FileInfo trainFile)
        {
            int  epoch = 1;
            long remaining;

            Console.WriteLine(@"Beginning training...");
            long start = Environment.TickCount;

            do
            {
                train.Iteration();
                long current = Environment.TickCount;
                long elapsed = (current - start) / 1000;
                remaining = minutes - elapsed / 60;
                Console.WriteLine($@"Iteration #{Format.FormatInteger(epoch)} Error:{Format.FormatPercent(train.Error)} elapsed time = {Format.FormatTimeSpan((int)elapsed)} time left = {Format.FormatTimeSpan((int)remaining * 60)}");
                epoch++;
                EncogDirectoryPersistence.SaveObject(networkFile, network);
                TrainingContinuation cont = train.Pause();
                EncogDirectoryPersistence.SaveObject(trainFile, cont);
                train.Resume(cont);
                foreach (var x in cont.Contents)
                {
                    Console.WriteLine($"{x.Key}: {((double[])x.Value).Average()}");
                }
            }while (remaining > 0 && !train.TrainingDone && !Console.KeyAvailable);
            Console.WriteLine("Finishing.");
            train.FinishTraining();
        }
Example #5
0
        public void Process(String methodName, String methodArchitecture, String trainerName, String trainerArgs,
                            int outputNeurons)
        {
            // first, create the machine learning method
            var       methodFactory = new MLMethodFactory();
            IMLMethod method        = methodFactory.Create(methodName, methodArchitecture, 2, outputNeurons);

            // second, create the data set
            IMLDataSet dataSet = new BasicMLDataSet(XORInput, XORIdeal);

            // third, create the trainer
            var      trainFactory = new MLTrainFactory();
            IMLTrain train        = trainFactory.Create(method, dataSet, trainerName, trainerArgs);

            // reset if improve is less than 1% over 5 cycles
            if (method is IMLResettable && !(train is ManhattanPropagation))
            {
                train.AddStrategy(new RequiredImprovementStrategy(500));
            }

            // fourth, train and evaluate.
            EncogUtility.TrainToError(train, 0.01);
            method = train.Method;
            EncogUtility.Evaluate((IMLRegression)method, dataSet);

            // finally, write out what we did
            Console.WriteLine(@"Machine Learning Type: " + methodName);
            Console.WriteLine(@"Machine Learning Architecture: " + methodArchitecture);

            Console.WriteLine(@"Training Method: " + trainerName);
            Console.WriteLine(@"Training Args: " + trainerArgs);
        }
Example #6
0
        /// <summary>
        ///     Perform the training.
        /// </summary>
        /// <param name="train">The training method.</param>
        /// <param name="method">The ML method.</param>
        /// <param name="trainingSet">The training set.</param>
        private void PerformTraining(IMLTrain train, IMLMethod method,
                                     IMLDataSet trainingSet)
        {
            ValidateNetwork.ValidateMethodToData(method, trainingSet);
            double targetError = Prop.GetPropertyDouble(
                ScriptProperties.MlTrainTargetError);

            Analyst.ReportTrainingBegin();
            int maxIteration = Analyst.MaxIteration;

            if (train.ImplementationType == TrainingImplementationType.OnePass)
            {
                train.Iteration();
                Analyst.ReportTraining(train);
            }
            else
            {
                do
                {
                    train.Iteration();
                    Analyst.ReportTraining(train);
                } while ((train.Error > targetError) &&
                         !Analyst.ShouldStopCommand() &&
                         !train.TrainingDone &&
                         ((maxIteration == -1) || (train.IterationNumber < maxIteration)));
            }
            train.FinishTraining();

            Analyst.ReportTrainingEnd();
        }
        /// <summary>
        /// Perform the training option.
        /// </summary>
        public void Train()
        {
            // first, create the machine learning method
            var       methodFactory = new MLMethodFactory();
            IMLMethod method        = methodFactory.Create(Config.MethodType, Config.MethodArchitecture, Config.InputWindow, 1);

            // second, create the data set
            string     filename = FileUtil.CombinePath(new FileInfo(_path), Config.FilenameTrain).ToString();
            IMLDataSet dataSet  = EncogUtility.LoadEGB2Memory(new FileInfo(filename));

            // third, create the trainer
            var      trainFactory = new MLTrainFactory();
            IMLTrain train        = trainFactory.Create(method, dataSet, Config.TrainType, Config.TrainParams);

            // reset if improve is less than 1% over 5 cycles
            if (method is IMLResettable && !(train is ManhattanPropagation))
            {
                train.AddStrategy(new RequiredImprovementStrategy(500));
            }

            // fourth, train and evaluate.
            EncogUtility.TrainToError(train, Config.TargetError);
            method = train.Method;
            EncogDirectoryPersistence.SaveObject(FileUtil.CombinePath(new FileInfo(_path), Config.MethodName), method);

            // finally, write out what we did
            Console.WriteLine(@"Machine Learning Type: " + Config.MethodType);
            Console.WriteLine(@"Machine Learning Architecture: " + Config.MethodArchitecture);

            Console.WriteLine(@"Training Method: " + Config.TrainType);
            Console.WriteLine(@"Training Args: " + Config.TrainParams);
        }
Example #8
0
 /// <summary>
 ///     Report training.
 /// </summary>
 /// <param name="train">The trainer.</param>
 public void ReportTraining(IMLTrain train)
 {
     foreach (IAnalystListener listener in _listeners)
     {
         listener.ReportTraining(train);
     }
 }
Example #9
0
 /// <inheritdoc />
 public void Init(IMLTrain theTrain)
 {
     _train               = theTrain;
     _calc                = (IMLError)_train.Method;
     _stop                = false;
     _lastCheck           = 0;
     _lastValidationError = _calc.CalculateError(_validationSet);
 }
Example #10
0
 /// <summary>
 /// Initialize this strategy.
 /// </summary>
 ///
 /// <param name="train_0">The training algorithm.</param>
 public void Init(IMLTrain train_0)
 {
     _train           = train_0;
     _setter          = (IMomentum)train_0;
     _ready           = false;
     _setter.Momentum = 0.0d;
     _currentMomentum = 0;
 }
Example #11
0
 public void Init(IMLTrain train_0)
 {
     this._xd87f6a9c53c2ed9f = train_0;
     this._x6947f9fc231e17e8 = (IMomentum) train_0;
     this._x6c7711ed04d2ac90 = false;
     this._x6947f9fc231e17e8.Momentum = 0.0;
     this._xd02ba004f6c6d639 = 0.0;
 }
 public void Init(IMLTrain train)
 {
     basicTrainSOM = train as BasicTrainSOM;
     if (basicTrainSOM == null)
     {
         throw new ArgumentException(
             String.Format("Argument shoud be of {0} type.", typeof(BasicTrainSOM)), "train");
     }
 }
Example #13
0
 public virtual void Init(IMLTrain train)
 {
     this._xd87f6a9c53c2ed9f = train;
     while (!(train.Method is IMLResettable))
     {
         throw new TrainingError("To use the reset strategy the machine learning method must support MLResettable.");
     }
     this._x1306445c04667cc7 = (IMLResettable) this._xd87f6a9c53c2ed9f.Method;
 }
Example #14
0
        /// <summary>
        /// Train, using the specified training method, display progress to a dialog
        /// box.
        /// </summary>
        /// <param name="train">The training method to use.</param>
        /// <param name="network">The network to train.</param>
        /// <param name="trainingSet">The training set to use.</param>
        public static void TrainDialog(IMLTrain train,
                                       BasicNetwork network, IMLDataSet trainingSet)
        {
            var dialog = new TrainingDialog {
                Train = train
            };

            dialog.ShowDialog();
        }
 /// <inheritdoc/>
 public void Init(IMLTrain theTrain)
 {
     _train     = theTrain;
     _calc      = (IMLError)_train.Method;
     _eOpt      = Double.PositiveInfinity;
     _stripOpt  = Double.PositiveInfinity;
     _stop      = false;
     _lastCheck = 0;
 }
 /// <summary>
 ///
 /// </summary>
 ///
 public void ReportTraining(IMLTrain train)
 {
     Console.Out.WriteLine("Iteration #"
                           + Format.FormatInteger(train.IterationNumber)
                           + " Error:"
                           + Format.FormatPercent(train.Error)
                           + " elapsed time = "
                           + Format.FormatTimeSpan((int)(_stopwatch.ElapsedMilliseconds / Format.MiliInSec)));
 }
Example #17
0
        public void StartTrain(Action <double, int> trainCallback = null, int reportEach = 5)
        {
            // initialize input and output values
            var inputs      = new double[_learnLength][];
            var outputs     = new double[_learnLength][];
            var i0          = TrainStartIndex();
            var sourceArray = GetSource().ToArray();
            var window      = new Queue <double>(sourceArray.Skip(i0).Take(_windowSize).Select(d => d.Val[(byte)_wantedInput]));

            for (var i = 0; i < _learnLength; i++)
            {
                // берем _windowSize предыдущих значений для _learnLength значений начиная с выбранной даты
                var innerArray = window.ToArray();
                inputs[i] = NormalizeInput(innerArray);

                window.Dequeue();
                window.Enqueue(sourceArray[i0 + i + _windowSize].Val[(byte)_wantedInput]);

                var ouputArray = sourceArray.Skip(i + _windowSize).Take(_layers.Last()).Select(d => d.Val[(byte)_wantedOutput]).ToArray();
                outputs[i] = NormalizeOutput(ouputArray);
            }

            if (_network != null)
            {
                var      trainingSet = new BasicMLDataSet(inputs, outputs);
                IMLTrain teacher     =
                    _trainMethod == TrainMethod.Specific ? (IMLTrain) new LevenbergMarquardtTraining(_network, trainingSet) :
                    _trainMethod == TrainMethod.BackProp ? (IMLTrain) new Backpropagation(_network, trainingSet) :
                    _trainMethod == TrainMethod.Resilent ? (IMLTrain) new ResilientPropagation(_network, trainingSet) :
                    _trainMethod == TrainMethod.Genetic ? (IMLTrain) new NeuralGeneticAlgorithm(
                        _network,
                        new Encog.MathUtil.Randomize.NguyenWidrowRandomizer(),
                        new TrainingSetScore(trainingSet),
                        _population,
                        _mutationPercent / 100.0,
                        _matePercent / 100.0) : null;
                if (teacher == null)
                {
                    return;
                }

                _stop = false;
                for (int i = 1; !_stop; i++)
                {
                    teacher.Iteration();
                    if (teacher is NeuralGeneticAlgorithm)
                    {
                        _network = (teacher as NeuralGeneticAlgorithm).Genetic.Population.Best.Organism as BasicNetwork;
                    }
                    if (i % reportEach == 0 && trainCallback != null)
                    {
                        trainCallback(teacher.Error, i);
                    }
                }
            }
        }
Example #18
0
 /// <summary>
 /// Create a hybrid strategy.
 /// </summary>
 ///
 /// <param name="altTrain">The alternate training algorithm.</param>
 /// <param name="minImprovement">The minimum improvement to switch algorithms.</param>
 /// <param name="tolerateMinImprovement"></param>
 /// <param name="alternateCycles"></param>
 public HybridStrategy(IMLTrain altTrain, double minImprovement,
                       int tolerateMinImprovement, int alternateCycles)
 {
     _altTrain               = altTrain;
     _ready                  = false;
     _lastHybrid             = 0;
     _minImprovement         = minImprovement;
     _tolerateMinImprovement = tolerateMinImprovement;
     _alternateCycles        = alternateCycles;
 }
Example #19
0
    public void CreateNetwork()
    {
        FeedImages();

        double[][] a =
        {
            new[] { 0.0, 0.0 },
            new[] { 0.0, 1.0 },
            new[] { 1.0, 0.0 },
            new[] { 1.0, 1.0 }
        };

        double[][] b =
        {
            new[] { 0.0 },
            new[] { 1.0 },
            new[] { 1.0 },
            new[] { 0.0 }
        };

        basicNetwork = new BasicNetwork();
        basicNetwork.AddLayer(new BasicLayer(inputLayer));
        for (int i = 0; i < hiddenLayer.Length; i++)
        {
            basicNetwork.AddLayer(new BasicLayer(hiddenLayer[i]));
        }
        basicNetwork.AddLayer(new BasicLayer(outputLayer));
        basicNetwork.Structure.FinalizeStructure();
        basicNetwork.Reset();

        dataPairs = new BasicMLDataSet(x, y);

        backpropagation = new ResilientPropagation(basicNetwork, dataPairs);
        //backpropagation = new Backpropagation(basicNetwork, dataPairs, learningRate, momentum);
        //backpropagation.AddStrategy(new ResetStrategy(0.5, 50));

        epoch = 0;

        maximumEpoch.interactable = false;
        minimumError.interactable = false;

        if (maximumEpoch.text == "")
        {
            maximumEpoch.text = "1000";
        }

        if (minimumError.text == "")
        {
            minimumError.text = "0.01";
        }

        coroutine = Train();
        StartCoroutine(coroutine);
    }
 /// <summary>
 /// Initialize this strategy.
 /// </summary>
 ///
 /// <param name="train">The training algorithm.</param>
 public void Init(IMLTrain train)
 {
     _train               = train;
     _ready               = false;
     _setter              = (ILearningRate)train;
     _trainingSize        = train.Training.Count;
     _currentLearningRate = 1.0d / _trainingSize;
     EncogLogging.Log(EncogLogging.LevelDebug, "Starting learning rate: "
                      + _currentLearningRate);
     _setter.LearningRate = _currentLearningRate;
 }
 /// <summary>
 /// Initialize this strategy.
 /// </summary>
 ///
 /// <param name="train">The training algorithm.</param>
 public void Init(IMLTrain train)
 {
     _train = train;
     _ready = false;
     _setter = (ILearningRate) train;
     _trainingSize = train.Training.Count;
     _currentLearningRate = 1.0d/_trainingSize;
     EncogLogging.Log(EncogLogging.LevelDebug, "Starting learning rate: "
                                                + _currentLearningRate);
     _setter.LearningRate = _currentLearningRate;
 }
Example #22
0
 public HybridStrategy(IMLTrain altTrain, double minImprovement, int tolerateMinImprovement, int alternateCycles)
 {
     if (((((uint) minImprovement) | 2) != 0) && (0 == 0))
     {
         this._xa45232be281da68a = altTrain;
         this._x6c7711ed04d2ac90 = false;
     }
     this._x123e81b4d1593407 = 0;
     this._x75deb38bfba59a18 = minImprovement;
     this._x671aa26bb37ef7df = tolerateMinImprovement;
     this._x2801df77d59cbd36 = alternateCycles;
 }
Example #23
0
 public RedNeuronal()
 {
     trainingSet  = new BasicMLDataSet(neuralInput, neuralOutput);
     this.network = new BasicNetwork();
     network.AddLayer(new BasicLayer(new ActivationSigmoid(), true, 72));
     network.AddLayer(new BasicLayer(new ActivationSigmoid(), true, 216));
     network.AddLayer(new BasicLayer(new ActivationSigmoid(), true, 144));
     network.AddLayer(new BasicLayer(new ActivationSigmoid(), true, 12));
     network.Structure.FinalizeStructure();
     network.Reset();
     train = new ResilientPropagation(network, trainingSet);
 }
Example #24
0
        /// <summary>
        /// Initialize this strategy.
        /// </summary>
        ///
        /// <param name="train">The training algorithm.</param>
        public virtual void Init(IMLTrain train)
        {
            _train = train;

            if (!(train.Method is IMLResettable))
            {
                throw new TrainingError(
                          "To use the required improvement strategy the machine learning method must support MLResettable.");
            }

            _method = (IMLResettable)_train.Method;
        }
Example #25
0
        /// <summary>
        /// Construct a cross validation trainer.
        /// </summary>
        ///
        /// <param name="train">The training</param>
        /// <param name="k">The number of folds.</param>
        public CrossValidationKFold(IMLTrain train, int k) : base(train.Method, (FoldedDataSet) train.Training)
        {
            _train = train;
            Folded.Fold(k);

            _flatNetwork = ((BasicNetwork) train.Method).Structure.Flat;

            _networks = new NetworkFold[k];
            for (int i = 0; i < _networks.Length; i++)
            {
                _networks[i] = new NetworkFold(_flatNetwork);
            }
        }
        /// <summary>
        ///     Create a trainer.
        /// </summary>
        /// <param name="method">The method to train.</param>
        /// <param name="dataset">The dataset.</param>
        /// <returns>The trainer.</returns>
        private IMLTrain CreateTrainer(IMLMethod method, IMLDataSet dataset)
        {
            if (_trainingType == null)
            {
                throw new EncogError(
                          "Please call selectTraining first to choose how to train.");
            }
            var      trainFactory = new MLTrainFactory();
            IMLTrain train        = trainFactory.Create(method, dataset, _trainingType,
                                                        _trainingArgs);

            return(train);
        }
Example #27
0
        /// <summary>
        /// Construct a cross validation trainer.
        /// </summary>
        ///
        /// <param name="train">The training</param>
        /// <param name="k">The number of folds.</param>
        public CrossValidationKFold(IMLTrain train, int k) : base(train.Method, (FoldedDataSet)train.Training)
        {
            _train = train;
            Folded.Fold(k);

            _flatNetwork = ((BasicNetwork)train.Method).Structure.Flat;

            _networks = new NetworkFold[k];
            for (int i = 0; i < _networks.Length; i++)
            {
                _networks[i] = new NetworkFold(_flatNetwork);
            }
        }
Example #28
0
        /// <summary>
        /// Le constructeur qui le réseau de neurones.
        /// </summary>
        public IndicatorEncog(int nbPasse, int nbHidden)
        {
            // Initialisation des données.
            NbPasse  = nbPasse;
            NbHidden = nbHidden;

            // Initialisation du réseau.
            ErrorCalculation.Mode = ErrorCalculationMode.RMS;

            Reseau = new BasicNetwork();
            Reseau.AddLayer(new BasicLayer(null, true, NbNeuronesEntre * NbPasse));
            Reseau.AddLayer(new BasicLayer(new ActivationTANH(), true, NbHidden));
            //Reseau.AddLayer(new BasicLayer(new ActivationTANH(), true, NbHidden / 2));
            //Reseau.AddLayer(new BasicLayer(new ActivationTANH(), true, NbHidden / 4));
            //Reseau.AddLayer(new BasicLayer(new ActivationStep(0,0,1), false, 1));
            Reseau.AddLayer(new BasicLayer(new ActivationSigmoid(), false, 1));
            Reseau.Structure.FinalizeStructure();
            Reseau.Reset();

            // Initialisation des listes.
            DonneesEntreesTest       = new List <List <double> >();
            DonneesSortiesTest       = new List <List <double> >();
            DonneesEntreesValidation = new List <List <double> >();
            DonneesSortiesValidation = new List <List <double> >();


            //// Récupère les données d'entrées et de sorties pour toutes les monnaies.
            Donnees = new Donnees(MonnaieBTC.XRP);
            this.AggregationDonnees(Donnees);

            // Création du set d'entrainement.
            double[][] donneesEntreesTest = this.DonneesEntreesTest.Select(a => a.ToArray()).ToArray();
            double[][] donneesSortiesTest = this.DonneesSortiesTest.Select(a => a.ToArray()).ToArray();
            TrainingSet = new BasicMLDataSet(donneesEntreesTest, donneesSortiesTest);

            // Création du set de validation.
            double[][] donneesEntreesValidation = this.DonneesEntreesValidation.Select(a => a.ToArray()).ToArray();
            double[][] donneesSortiesValidation = this.DonneesSortiesValidation.Select(a => a.ToArray()).ToArray();
            ValidationSet = new BasicMLDataSet(donneesEntreesValidation, donneesSortiesValidation);

            // Création du type d'entrainement.
            //train = new Backpropagation(Reseau, TrainingSet, 0.001, 0.01);
            train = new ResilientPropagation(Reseau, TrainingSet);

            // Initialise les listes d'erreurs et les erreurs
            ListeErreurEntrainement    = new List <double>();
            ListeErreurValidation      = new List <double>();
            ErreurOptimaleEntrainement = 100;
            ErreurOptimaleValidation   = 100;
        }
Example #29
0
        /// <summary>
        /// Initialize this strategy.
        /// </summary>
        ///
        /// <param name="train">The training algorithm.</param>
        public virtual void Init(IMLTrain train)
        {
            _train = train;
            _ready = false;

            if (!(train.Method is IMLEncodable))
            {
                throw new TrainingError(
                          "To make use of the Greedy strategy the machine learning method must support MLEncodable.");
            }

            _method      = ((IMLEncodable)train.Method);
            _lastNetwork = new double[_method.EncodedArrayLength()];
        }
Example #30
0
        /// <summary>
        /// Initialize this strategy.
        /// </summary>
        ///
        /// <param name="train">The training algorithm.</param>
        public virtual void Init(IMLTrain train)
        {
            _train = train;
            _ready = false;

            if (!(train.Method is IMLEncodable))
            {
                throw new TrainingError(
                    "To make use of the Greedy strategy the machine learning method must support MLEncodable.");
            }

            _method = ((IMLEncodable) train.Method);
            _lastNetwork = new double[_method.EncodedArrayLength()];
        }
Example #31
0
        public static void TestTraining(IMLTrain train, double requiredImprove)
        {
            train.Iteration();
            double error1 = train.Error;

            for (int i = 0; i < 10; i++)
                train.Iteration();

            double error2 = train.Error;

            double improve = (error1 - error2) / error1;
            Assert.IsTrue(improve >= requiredImprove,"Improve rate too low for " + train.GetType().Name +
                    ",Improve=" + improve + ",Needed=" + requiredImprove);
        }
        /// <summary>
        ///     Fit the model using cross validation.
        /// </summary>
        /// <param name="k">The number of folds total.</param>
        /// <param name="foldNum">The current fold.</param>
        /// <param name="fold">The current fold.</param>
        private void FitFold(int k, int foldNum, DataFold fold)
        {
            IMLMethod method = CreateMethod();
            IMLTrain  train  = CreateTrainer(method, fold.Training);

            if (train.ImplementationType == TrainingImplementationType.Iterative)
            {
                var earlyStop = new SimpleEarlyStoppingStrategy(
                    fold.Validation);
                train.AddStrategy(earlyStop);

                var line = new StringBuilder();
                while (!train.TrainingDone)
                {
                    train.Iteration();
                    line.Length = 0;
                    line.Append("Fold #");
                    line.Append(foldNum);
                    line.Append("/");
                    line.Append(k);
                    line.Append(": Iteration #");
                    line.Append(train.IterationNumber);
                    line.Append(", Training Error: ");
                    line.Append(Format.FormatDouble(train.Error, 8));
                    line.Append(", Validation Error: ");
                    line.Append(Format.FormatDouble(earlyStop.ValidationError,
                                                    8));
                    Report.Report(k, foldNum, line.ToString());
                }
                fold.Score  = earlyStop.ValidationError;
                fold.Method = method;
            }
            else if (train.ImplementationType == TrainingImplementationType.OnePass)
            {
                train.Iteration();
                double validationError = CalculateError(method,
                                                        fold.Validation);
                Report.Report(k, k,
                              "Trained, Training Error: " + train.Error
                              + ", Validatoin Error: " + validationError);
                fold.Score  = validationError;
                fold.Method = method;
            }
            else
            {
                throw new EncogError("Unsupported training type for EncogModel: "
                                     + train.ImplementationType);
            }
        }
Example #33
0
 public virtual void Init(IMLTrain train)
 {
     this._xd87f6a9c53c2ed9f = train;
     this._x6c7711ed04d2ac90 = false;
     while (!(train.Method is IMLEncodable))
     {
         throw new TrainingError("To make use of the Greedy strategy the machine learning method must support MLEncodable.");
     }
     do
     {
         this._x1306445c04667cc7 = (IMLEncodable) train.Method;
     }
     while (2 == 0);
     this._x8ca12f17f1ae2b01 = new double[this._x1306445c04667cc7.EncodedArrayLength()];
 }
Example #34
0
        public PSO()
        {
            network = new BasicNetwork();
            network.AddLayer(new BasicLayer(5));
            network.AddLayer(new BasicLayer(1));
            network.AddLayer(new BasicLayer(1));
            network.Structure.FinalizeStructure();
            network.Reset();

            IMLDataSet dataSet = new BasicMLDataSet();

            dataSet.Add(new BasicMLData(new double[] { 1.0, 4.0, 3.0, 4.0, 5.0 }), new BasicMLData(new double[] { 2.0, 4.0, 6.0, 8.0, 10 }));

            train = new NeuralPSO(network, new RangeRandomizer(0, 10), new TrainingSetScore(dataSet), 5);
        }
Example #35
0
File: PSO.cs Project: ifzz/QuantSys
        public PSO()
        {
            network = new BasicNetwork();
            network.AddLayer(new BasicLayer(5));
            network.AddLayer(new BasicLayer(1));
            network.AddLayer(new BasicLayer(1));
            network.Structure.FinalizeStructure();
            network.Reset();

            IMLDataSet dataSet = new BasicMLDataSet();
            dataSet.Add(new BasicMLData(new double[] { 1.0, 4.0, 3.0, 4.0, 5.0}) , new BasicMLData(new double[] { 2.0, 4.0, 6.0 , 8.0, 10} ));
            
            train = new NeuralPSO(network, new RangeRandomizer(0, 10), new TrainingSetScore(dataSet),5);
            
        }
Example #36
0
        /// <summary>
        /// Train to a specific error, using the specified training method, send the
        /// output to the console.
        /// </summary>
        ///
        /// <param name="train">The training method.</param>
        /// <param name="error">The desired error level.</param>
        public static void TrainToError(IMLTrain train, double error)
        {
            int epoch = 1;

            Console.Out.WriteLine(@"Beginning training...");

            do
            {
                train.Iteration();

                Console.Out.WriteLine(@"Iteration #" + Format.FormatInteger(epoch)
                                      + @" Error:" + Format.FormatPercent(train.Error)
                                      + @" Target Error: " + Format.FormatPercent(error));
                epoch++;
            } while ((train.Error > error) && !train.TrainingDone);
            train.FinishTraining();
        }
Example #37
0
 /// <summary>
 /// Create a trainer.
 /// </summary>
 ///
 /// <param name="method">The method to train.</param>
 /// <param name="training">The training data.</param>
 /// <param name="type">Type type of trainer.</param>
 /// <param name="args">The training args.</param>
 /// <returns>The new training method.</returns>
 public IMLTrain Create(IMLMethod method,
                        IMLDataSet training, String type, String args)
 {
     foreach (EncogPluginBase plugin in EncogFramework.Instance.Plugins)
     {
         if (plugin is IEncogPluginService1)
         {
             IMLTrain result = ((IEncogPluginService1)plugin).CreateTraining(
                 method, training, type, args);
             if (result != null)
             {
                 return(result);
             }
         }
     }
     throw new EncogError("Unknown training type: " + type);
 }
Example #38
0
        public static void TestTraining(IMLTrain train, double requiredImprove)
        {
            train.Iteration();
            double error1 = train.Error;

            for (int i = 0; i < 10; i++)
            {
                train.Iteration();
            }

            double error2 = train.Error;

            double improve = (error1 - error2) / error1;

            Assert.IsTrue(improve >= requiredImprove, "Improve rate too low for " + train.GetType().Name +
                          ",Improve=" + improve + ",Needed=" + requiredImprove);
        }
Example #39
0
            public void Init(IMLTrain train)
            {
                Method      = (MLMethodGeneticAlgorithm)train;
                Adversaries = new List <Adversaries>();
                var r = new RangeRandomizer(-1, 1);

                for (int i = 0; i < 6; i++)
                {
                    Adversaries.Add(new Adversaries()
                    {
                        PlayerStart = r.NextDouble() >= 0.5 ? 1.0 : -1.0, Player = new PlayerVertical(i)
                    });
                    Adversaries.Add(new Adversaries()
                    {
                        PlayerStart = r.NextDouble() >= 0.5 ? 1.0 : -1.0, Player = new PlayerHorizontal(i)
                    });
                }
                BaseAdversaries = Adversaries.ToList();
            }
Example #40
0
        private void TrainCommand()
        {
            String methodFile   = _cmd.Args[0];
            String trainingFile = _cmd.Args[1];

            String type     = _cmd.PromptString("type", "rprop");
            String args     = _cmd.PromptString("args", "");
            double maxError = _cmd.PromptDouble("maxError", 0.01);

            var      dataSet = new BufferedMLDataSet(trainingFile);
            var      method  = (IMLMethod)EncogDirectoryPersistence.LoadObject(new FileInfo(methodFile));
            var      factory = new MLTrainFactory();
            IMLTrain train   = factory.Create(method, dataSet, type, args);

            _sw.Start();
            EncogUtility.TrainToError(train, maxError);
            Console.WriteLine(@"Saving machine learning method");
            EncogDirectoryPersistence.SaveObject(new FileInfo(methodFile), method);
        }
Example #41
0
 public void Init(IMLTrain train)
 {
     this._xd87f6a9c53c2ed9f = train;
     while (true)
     {
         this._x6c7711ed04d2ac90 = false;
         this._x6947f9fc231e17e8 = (ILearningRate) train;
         this._x985befeef351542c = train.Training.Count;
         this._x6300a707dc67f3a2 = 1.0 / ((double) this._x985befeef351542c);
         EncogLogging.Log(0, "Starting learning rate: " + this._x6300a707dc67f3a2);
         do
         {
             this._x6947f9fc231e17e8.LearningRate = this._x6300a707dc67f3a2;
         }
         while (-2147483648 == 0);
         if (0 == 0)
         {
             return;
         }
     }
 }
Example #42
0
 public CrossValidationKFold(IMLTrain train, int k)
     : base(train.Method, (FoldedDataSet) train.Training)
 {
     int num;
     if ((((uint) k) | 1) != 0)
     {
         this._xd87f6a9c53c2ed9f = train;
         base.Folded.Fold(k);
         goto Label_0083;
     }
     if (0xff != 0)
     {
         goto Label_0083;
     }
     Label_0039:
     num = 0;
     while (num < this._x5f6ed0047d99f4b6.Length)
     {
         this._x5f6ed0047d99f4b6[num] = new NetworkFold(this._xef94864849922d07);
         if (((uint) k) >= 0)
         {
         }
         num++;
     }
     if (((uint) num) <= uint.MaxValue)
     {
         return;
     }
     Label_0083:
     this._xef94864849922d07 = ((BasicNetwork) train.Method).Structure.Flat;
     this._x5f6ed0047d99f4b6 = new NetworkFold[k];
     if (8 == 0)
     {
         return;
     }
     goto Label_0039;
 }
Example #43
0
 private void x0d87de1eb44df41c(IMLTrain xd87f6a9c53c2ed9f, IMLMethod x1306445c04667cc7, IMLDataSet x1c9e132f434262d8)
 {
     int maxIteration;
     ValidateNetwork.ValidateMethodToData(x1306445c04667cc7, x1c9e132f434262d8);
     double propertyDouble = base.Prop.GetPropertyDouble("ML:TRAIN_targetError");
     base.Analyst.ReportTrainingBegin();
     if ((((uint) maxIteration) & 0) == 0)
     {
         if (2 == 0)
         {
             goto Label_0038;
         }
         maxIteration = base.Analyst.MaxIteration;
         if (0xff != 0)
         {
             goto Label_0038;
         }
     }
     Label_001B:
     if (!xd87f6a9c53c2ed9f.TrainingDone && ((maxIteration == -1) || (xd87f6a9c53c2ed9f.IterationNumber < maxIteration)))
     {
         goto Label_0038;
     }
     Label_0023:
     xd87f6a9c53c2ed9f.FinishTraining();
     base.Analyst.ReportTrainingEnd();
     return;
     Label_0038:
     xd87f6a9c53c2ed9f.Iteration();
     base.Analyst.ReportTraining(xd87f6a9c53c2ed9f);
     if ((xd87f6a9c53c2ed9f.Error <= propertyDouble) || base.Analyst.ShouldStopCommand())
     {
         goto Label_0023;
     }
     goto Label_001B;
 }
Example #44
0
 /// <summary>
 /// Construct a hybrid strategy with the default minimum improvement
 /// and toleration cycles.
 /// </summary>
 ///
 /// <param name="altTrain">The alternative training strategy.</param>
 public HybridStrategy(IMLTrain altTrain)
     : this(altTrain, DefaultMinImprovement, DefaultTolerateCycles, DefaultAlternateCycles)
 {
 }
 /// <summary>
 /// 
 /// </summary>
 ///
 public virtual void Init(IMLTrain train)
 {
     _train = train;
     _shouldStop = false;
     _ready = false;
 }
Example #46
0
 public virtual void Init(IMLTrain train)
 {
     this._xaca68f1d554d41ca = true;
     this._x6befdef5133de63a = DateTime.Now.Millisecond;
 }
Example #47
0
        /// <summary>
        /// Train to a specific error, using the specified training method, send the
        /// output to the console.
        /// </summary>
        ///
        /// <param name="train">The training method.</param>
        /// <param name="error">The desired error level.</param>
        public static void TrainToError(IMLTrain train, double error)
        {

            int epoch = 1;

            Console.Out.WriteLine(@"Beginning training...");

            do
            {
                train.Iteration();

                Console.Out.WriteLine(@"Iteration #" + Format.FormatInteger(epoch)
                        + @" Error:" + Format.FormatPercent(train.Error)
                        + @" Target Error: " + Format.FormatPercent(error));
                epoch++;
            } while ((train.Error > error) && !train.TrainingDone);
            train.FinishTraining();
        }
 /// <inheritdoc/>
 public void Init(IMLTrain theTrain)
 {
     _train = theTrain;
     _calc = (IMLError) _train.Method;
     _eOpt = Double.PositiveInfinity;
     _stripOpt = Double.PositiveInfinity;
     _stop = false;
     _lastCheck = 0;
 }
Example #49
0
 /// <summary>
 /// Train, using the specified training method, display progress to a dialog
 /// box.
 /// </summary>
 /// <param name="train">The training method to use.</param>
 /// <param name="network">The network to train.</param>
 /// <param name="trainingSet">The training set to use.</param>
 public static void TrainDialog(IMLTrain train,
                                BasicNetwork network, IMLDataSet trainingSet)
 {
     var dialog = new TrainingDialog {Train = train};
     dialog.ShowDialog();
 }
 /// <summary>
 /// Initialize this strategy.
 /// </summary>
 ///
 /// <param name="train_0">The training algorithm.</param>
 public void Init(IMLTrain train_0)
 {
     _train = train_0;
     _setter = (IMomentum) train_0;
     _ready = false;
     _setter.Momentum = 0.0d;
     _currentMomentum = 0;
 }
 /// <inheritdoc/>
 public virtual void Init(IMLTrain train)
 {
     _started = true;
     _startedTime = DateTime.Now.Millisecond;
 }
 /// <summary>
 /// 
 /// </summary>
 ///
 public void ReportTraining(IMLTrain train)
 {
     Console.Out.WriteLine("Iteration #"
                           + Format.FormatInteger(train.IterationNumber)
                           + " Error:"
                           + Format.FormatPercent(train.Error)
                           + " elapsed time = "
                           + Format.FormatTimeSpan((int) (_stopwatch.ElapsedMilliseconds/Format.MiliInSec)));
 }
Example #53
0
        /// <summary>
        ///     Perform the training.
        /// </summary>
        /// <param name="train">The training method.</param>
        /// <param name="method">The ML method.</param>
        /// <param name="trainingSet">The training set.</param>
        private void PerformTraining(IMLTrain train, IMLMethod method,
            IMLDataSet trainingSet)
        {
            ValidateNetwork.ValidateMethodToData(method, trainingSet);
            double targetError = Prop.GetPropertyDouble(
                ScriptProperties.MlTrainTargetError);
            Analyst.ReportTrainingBegin();
            int maxIteration = Analyst.MaxIteration;

            if (train.ImplementationType == TrainingImplementationType.OnePass)
            {
                train.Iteration();
                Analyst.ReportTraining(train);
            }
            else
            {
                do
                {
                    train.Iteration();
                    Analyst.ReportTraining(train);
                } while ((train.Error > targetError)
                         && !Analyst.ShouldStopCommand()
                         && !train.TrainingDone
                         && ((maxIteration == -1) || (train.IterationNumber < maxIteration)));
            }
            train.FinishTraining();

            Analyst.ReportTrainingEnd();
        }
Example #54
0
 /// <inheritdoc/>
 public virtual void Init(IMLTrain train)
 {
     _started = true;
     _startedTime = DateTime.Now.Ticks;
 }
Example #55
0
        /// <summary>
        /// Initialize this strategy.
        /// </summary>
        ///
        /// <param name="train">The training algorithm.</param>
        public virtual void Init(IMLTrain train)
        {
            _train = train;

            if (!(train.Method is IMLResettable))
            {
                throw new TrainingError(
                    "To use the reset strategy the machine learning method must support MLResettable.");
            }

            _method = (IMLResettable) _train.Method;
        }
 /// <summary>
 /// Report training.
 /// </summary>
 ///
 /// <param name="train">The trainer.</param>
 public void ReportTraining(IMLTrain train)
 {
     foreach (IAnalystListener listener in _listeners)
     {
         listener.ReportTraining(train);
     }
 }
Example #57
0
 /// <summary>
 /// Create a hybrid strategy.
 /// </summary>
 ///
 /// <param name="altTrain">The alternate training algorithm.</param>
 /// <param name="minImprovement">The minimum improvement to switch algorithms.</param>
 /// <param name="tolerateMinImprovement"></param>
 /// <param name="alternateCycles"></param>
 public HybridStrategy(IMLTrain altTrain, double minImprovement,
                       int tolerateMinImprovement, int alternateCycles)
 {
     _altTrain = altTrain;
     _ready = false;
     _lastHybrid = 0;
     _minImprovement = minImprovement;
     _tolerateMinImprovement = tolerateMinImprovement;
     _alternateCycles = alternateCycles;
 }
Example #58
0
        /// <summary>
        /// Train the network, using the specified training algorithm, and send the
        /// output to the console.
        /// </summary>
        /// <param name="train">The training method to use.</param>
        /// <param name="network">The network to train.</param>
        /// <param name="trainingSet">The training set.</param>
        /// <param name="minutes">The number of minutes to train for.</param>
        public static void TrainConsole(IMLTrain train,
                                        BasicNetwork network, IMLDataSet trainingSet,
                                        int minutes)
        {
            int epoch = 1;
            long remaining;

            Console.WriteLine(@"Beginning training...");
            long start = Environment.TickCount;
            do
            {
                train.Iteration();

                long current = Environment.TickCount;
                long elapsed = (current - start)/1000;
                remaining = minutes - elapsed/60;

                Console.WriteLine(@"Iteration #" + Format.FormatInteger(epoch)
                                  + @" Error:" + Format.FormatPercent(train.Error)
                                  + @" elapsed time = " + Format.FormatTimeSpan((int) elapsed)
                                  + @" time left = "
                                  + Format.FormatTimeSpan((int) remaining*60));
                epoch++;
            } while (remaining > 0 && !train.TrainingDone);
            train.FinishTraining();
        }
Example #59
0
 /// <summary>
 /// 
 /// </summary>
 ///
 public virtual void Init(IMLTrain train_0)
 {
     _train = train_0;
 }
Example #60
0
 /// <summary>
 /// Initialize this strategy.
 /// </summary>
 ///
 /// <param name="train">The training algorithm.</param>
 public virtual void Init(IMLTrain train)
 {
     _mainTrain = train;
 }