void TrainNetworkForWeight()
        {
            inputWeight = LoadFromFile("Ideal_Input_Weight.cfg");

            weightNetwork.Randomize();

            ResilientBackpropagationLearning learning = new ResilientBackpropagationLearning(weightNetwork);

            learning.LearningRate = 0.5;

            outputWeight = LoadFromFile("Ideal_Output_Weight.cfg");

            bool needToStop = false;
            int  iteration  = 0;

            while (!needToStop)
            {
                double error = learning.RunEpoch(inputWeight, outputWeight);

                if (error == 0)
                {
                    break;
                }
                else if (iteration < 50000)
                {
                    iteration++;
                }
                else
                {
                    needToStop = true;
                }
            }
        }
Exemplo n.º 2
0
 public AccordNetwork()
 {
     network = new DeepBeliefNetwork(new BernoulliFunction(), inputLength, 1200, 600, 2);
     new NguyenWidrow(network).Randomize();
     network.UpdateVisibleWeights();
     unsuperVisedTeacher = GetUnsupervisedTeacherForNetwork(network);
     supervisedTeacher   = GetSupervisedTeacherForNetwork(network);
 }
        protected override ISupervisedLearning CreateTeacher()
        {
            var teacher = new ResilientBackpropagationLearning(_network)
            {
                LearningRate = _learningRate,
            };

            return(teacher);
        }
Exemplo n.º 4
0
 private void button2_Click_1(object sender, EventArgs e)
 {
     button2.Enabled = false;
     network2        = null;
     reprop          = null;
     network2        = new ActivationNetwork(new BipolarSigmoidFunction(param1), 100, 100, 90, 80, 70, 60, 50, 40, 30, 20, 10, 1);
     reprop          = new ResilientBackpropagationLearning(network3);
     network2.Randomize();
 }
Exemplo n.º 5
0
        public AttackStateNN()
        {
            var activationFunction = new Accord.Neuro.ActivationFunctions.GaussianFunction();

            network = new ActivationNetwork(new SigmoidFunction(0.01), 4 + 4 + 4 * 5 * 5 + 25, new[] { 100, 100, 1 });
            new GaussianWeights(network, 0.3).Randomize();
            teacher = new ResilientBackpropagationLearning(network);
            teacher.LearningRate = 0.1;
        }
Exemplo n.º 6
0
        public void ExecuteFold(int k)
        {
            int LengthOfInput  = this.FormattedData[0].Input.Count();
            int LengthOfOutput = this.FormattedData[0].Output.Count();

            ActivationNetwork NeuralNetwork = new ActivationNetwork(
                new SigmoidFunction(2),
                LengthOfInput,
                this.NumberOfHiddenLayerNeurons(LengthOfInput, LengthOfOutput),
                LengthOfOutput);

            NguyenWidrow weights = new NguyenWidrow(NeuralNetwork);

            weights.Randomize();

            ResilientBackpropagationLearning BackProp = new ResilientBackpropagationLearning(NeuralNetwork);

            BackProp.LearningRate = this.LearningRate;
            //BackProp.Momentum = 0.5;

            List <NetIO> TrainingData   = new List <NetIO>();
            List <NetIO> ValidationData = new List <NetIO>();

            ReadWrite.RemoveKFold(this.FormattedData, ref TrainingData, ref ValidationData, k);

            // for each epoch
            int epoch     = 0;
            int maxEpochs = int.MaxValue;
            EarlyStoppingTools netError = new EarlyStoppingTools(this.patience);

            do
            {
                ++epoch;

                double internalError = BackProp.RunEpoch(TrainingData.Select(l => l.Input.ToArray()).ToArray(),
                                                         TrainingData.Select(l => l.Output.ToArray()).ToArray());

                this.RssError = EarlyStoppingTools.RssError(NeuralNetwork,
                                                            ValidationData.Select(l => l.Input.ToArray()).ToArray(),
                                                            ValidationData.Select(l => l.Output.ToArray()).ToArray());

                //Console.WriteLine("Epochs: " + epoch);
                //Console.WriteLine("Training error: " + internalError);
                //Console.WriteLine("CV Error: " + this.RssError);
            } while (!netError.ExceedsPatience(RssError) && epoch < maxEpochs);

            Console.Write("Target: ");
            ValidationData[0].Output.ForEach(i => Console.Write(i));
            Console.WriteLine();
            Console.WriteLine("Result: " + string.Join(",", NeuralNetwork.Compute(ValidationData[0].Input.ToArray())));

            this.NumberOfEpochs = epoch;

            Console.WriteLine("Epochs required: " + epoch);
            Console.WriteLine("Error: " + RssError);
        }
Exemplo n.º 7
0
        public override double TrainOnDataSet(SamplesSet samplesSet, int epochs_count, double acceptableError, bool parallel = true)
        {
            //  Сначала надо сконструировать массивы входов и выходов
            double[][] inputs  = new double[samplesSet.Count][];
            double[][] outputs = new double[samplesSet.Count][];

            //  Теперь массивы из samplesSet группируем в inputs и outputs
            for (int i = 0; i < samplesSet.Count; ++i)
            {
                inputs[i]  = samplesSet[i].input;
                outputs[i] = samplesSet[i].output;
            }

            //  Текущий счётчик эпох
            int epoch_to_run = 0;

            //  Создаём "обучателя" - либо параллельного, либо последовательного
            ISupervisedLearning teacher;

            if (parallel)
            {
                teacher = new ParallelResilientBackpropagationLearning(network);
            }
            else
            {
                teacher = new ResilientBackpropagationLearning(network);
            }

            double error = double.PositiveInfinity;

            #if DEBUG
            StreamWriter errorsFile = File.CreateText("errors.csv");
            #endif

            stopWatch.Restart();

            while (epoch_to_run < epochs_count && error > acceptableError)
            {
                epoch_to_run++;
                error = teacher.RunEpoch(inputs, outputs);
                #if DEBUG
                errorsFile.WriteLine(error);
                #endif
                updateDelegate((epoch_to_run * 1.0) / epochs_count, error, stopWatch.Elapsed);
            }

            #if DEBUG
            errorsFile.Close();
            #endif

            updateDelegate(1.0, error, stopWatch.Elapsed);

            stopWatch.Stop();

            return(error);
        }
Exemplo n.º 8
0
        private ResilientBackpropagationLearning GetSupervisedTeacherForNetwork(DeepBeliefNetwork deepNetwork)
        {
            var teacher = new ResilientBackpropagationLearning(deepNetwork)
            {
                LearningRate = 0.1
                               //Momentum = 0.5
            };

            return(teacher);
        }
        public ResilientBackpropagationLearningForm()
        {
            InitializeComponent();

            watch = new Stopwatch();

            backgroundWorkerTrainer.Disposed                  += backgroundWorkerTrainer_Disposed;
            backgroundWorkerTrainer.DoWork                    += backgroundWorkerTrainer_DoWork;
            backgroundWorkerTrainer.ProgressChanged           += backgroundWorkerTrainer_ProgressChanged;
            backgroundWorkerTrainer.WorkerSupportsCancellation = true;
            backgroundWorkerTrainer.WorkerReportsProgress      = true;
            saveFileDialog1.Filter           = "Resilient network files (*.rbn)|*.rbn";
            saveFileDialog1.Title            = "Save Resilient network file";
            saveFileDialog1.InitialDirectory = null;
            saveFileDialog1.FileName         = null;

            openFileDialog1.Filter           = "Resilient network files (*.rbn)|*.rbn";
            openFileDialog1.Title            = "Load Resilient network file";
            openFileDialog1.InitialDirectory = null;
            openFileDialog1.FileName         = null;

            backgroundWorkerSignal.Disposed += backgroundWorkerSignal_Disposed;
            backgroundWorkerSignal.WorkerSupportsCancellation = true;
            backgroundWorkerSignal.WorkerReportsProgress      = true;
            backgroundWorkerSignal.DoWork             += backgroundWorkerSignal_DoWork;
            backgroundWorkerSignal.RunWorkerCompleted += backgroundWorkerSignal_RunWorkerCompleted;


            // initialize input and output values
            input = new double[4][] {
                new double[] { 0, 0 }, new double[] { 0, 1 },
                new double[] { 1, 0 }, new double[] { 1, 1 }
            };
            output = new double[4][] {
                new double[] { 0 }, new double[] { 1 },
                new double[] { 1 }, new double[] { 0 }
            };

            network = new ActivationNetwork(new SigmoidFunction(2), 2, 2, 1);

            teacher = new  ResilientBackpropagationLearning(network);

            //logg.Show();

            // pane used to draw your chart
            myPane = new GraphPane();

            // poing pair lists
            listPointsOne = new PointPairList();
        }
Exemplo n.º 10
0
        public override void Initialize()
        {
            var history = History("SPY", TimeSpan.FromDays(1000), Resolution.Daily);

            var highestClose  = history.Max(h => h.Close);
            var lowestClose   = history.Min(h => h.Close);
            var highestVolume = history.Max(h => h.Volume);
            var lowestVolume  = history.Min(h => h.Volume);

            var inputs = history.Select(h =>
                                        new[]
            {
                (double)((h.Close - lowestClose) / (highestClose - lowestClose)),
                (double)(h.Volume - lowestVolume) / (highestVolume - lowestVolume)
            }).ToArray();

            var classes = inputs.Take(inputs.Length - 1).Zip(inputs.Skip(1), (a, b) => b[0] < a[0] ? 0 : b[0] > a[0] ? 2 : 1).ToArray();

            var outputs = Jagged.OneHot(classes);

            var network = new ActivationNetwork(new SigmoidFunction(), 2, 3, 1);

            new NguyenWidrow(network).Randomize();

            var teacher2 = new ResilientBackpropagationLearning(network);
            var maxError = double.MaxValue;
            var error    = 0d;

            // Run supervised learning.
            while (error < maxError)
            {
                error = teacher2.RunEpoch(inputs, outputs);
                if (error < maxError)
                {
                    maxError = error;
                }
            }

            // Checks if the network has learned
            for (var i = 0; i < inputs.Length; i++)
            {
                var answer = network.Compute(inputs[i]);

                var expected = classes[i];
                int actual;
                answer.Max(out actual);
                // actual should be equal to expected
            }
        }
Exemplo n.º 11
0
        public double Train(double[][] input, double[][] outputs)
        {
            var teacher = new ResilientBackpropagationLearning(network);

            teacher.LearningRate = topResults.TrainingSpeed;

            double error = 0;

            for (int iteration = 0; iteration < topResults.Iterations; iteration++)
            {
                error = teacher.RunEpoch(input, outputs);
            }

            return(error);
        }
Exemplo n.º 12
0
        public double Train(double[][] input, double[][] outputs, int iterations, float rate)
        {
            var teacher = new ResilientBackpropagationLearning(network);

            teacher.LearningRate = rate;
            var inVal  = input;
            var outVal = outputs;

            double error = 0;

            for (int iteration = 0; iteration < iterations; iteration++)
            {
                error = teacher.RunEpoch(inVal, outVal);
            }

            return(error);
        }
Exemplo n.º 13
0
        public void RBPBuild(List <train> datalist)
        {
            double[][] inputs;
            double[][] outputs;
            double[][] matrix;

            GetData(out inputs, out outputs, out matrix, datalist);

            // create neural network
            network_rbp = new ActivationNetwork(
                new BipolarSigmoidFunction(),
                9, // two inputs in the network
                3, // two neurons in the first layer
                1  //ont neuron in the second layer
                );
            // Randomly initialize the network
            new NguyenWidrow(network_rbp).Randomize();

            // create teacher
            teacher_rbp = new ResilientBackpropagationLearning(network_rbp);

            int times = 0;

            // loop
            while (times++ < 50)
            {
                // run epoch of learning procedure
                double error = teacher_rbp.RunEpoch(inputs, outputs);
                // check error value to see if we need to stop
                // ...
            }


            // Checks if the network has learned

            /*  for (int i = 0; i < inputs.Length; i++)
             * {
             *    double[] answer = network.Compute(inputs[i]);
             *
             *    log(answer[0].ToString()) ;
             *
             *    // actual should be equal to expected
             * }*/
        }
Exemplo n.º 14
0
            public IForecastingModel TrainNewModel(double[][] iInput, double[][] iOutput)
            {
                int inputSize = iInput[0].Length, samplesNum = iOutput.Length;

                if (samplesNum != iInput.Length)
                {
                    throw new ArgumentException();
                }

                for (int i = 0; i < samplesNum; ++i)
                {
                    if (iInput[i].Length != inputSize || iOutput[i].Length != 1) //iInput isn't a square matrix or iOutput isn't a vector
                    {
                        throw new ArgumentException();
                    }
                }

                int[]  neuronsCount       = (int[])ModelParametersDict[NeuronsInLayersKey];
                string activationFunction = (string)ModelParametersDict[ActivationFunctionKey];
                long   maxIterNum         = (long)ModelParametersDict[MaxIterationsNumberKey];
                double stopError          = (double)ModelParametersDict[StopErrorKey];

                ActivationNetwork   netToTrain = new ActivationNetwork(ActivationFunctionsDict[activationFunction], inputSize, neuronsCount);
                DataNormalizer      normalizer = new DataNormalizer(iInput.Concat(iOutput).ToArray());
                IForecastingModel   aModel     = new ANNforecastingModel(netToTrain, normalizer);
                ISupervisedLearning teacher    = new ResilientBackpropagationLearning(netToTrain);

                double[][] trainInputSet, trainOutputSet;
                TrainingSubsetGenerator.GenerateRandomly(iInput, iOutput, out trainInputSet, out trainOutputSet, iMultiplier: TrainSubsetMultiplier);

                trainInputSet = normalizer.Normalize(trainInputSet); trainOutputSet = normalizer.Normalize(trainOutputSet);

                long   epochsCount = 0;
                double nextError = ErrorCalculator.CalculateMSE(aModel, iInput, iOutput), prevError;

                do
                {
                    prevError = nextError;
                    teacher.RunEpoch(trainInputSet, trainOutputSet);
                    nextError = ErrorCalculator.CalculateMSE(aModel, iInput, iOutput);
                }while (epochsCount++ <= maxIterNum && Math.Abs(prevError - nextError) >= stopError);
                return(aModel);
            }
Exemplo n.º 15
0
 public BackpropagationTrainer(NeuralNetwork network, int type, double learningRate)
 {
     if (type == resilient)
     {
         var rbpl = new ResilientBackpropagationLearning(network.ActivationNetwork);
         rbpl.LearningRate = learningRate;
         teacher           = (ISupervisedLearning)rbpl;
     }
     //TODO: find a way to implement "learningRate" with PRBL network!
     else if (type == parallelResilient)
     {
         teacher = new ParallelResilientBackpropagationLearning(network.ActivationNetwork);
     }
     else
     {
         var bpl = new BackPropagationLearning(network.ActivationNetwork);
         bpl.LearningRate = learningRate;
         teacher          = (ISupervisedLearning)bpl;
     }
 }
Exemplo n.º 16
0
 public NeuralNetwork(Color color, string path, bool keepLearning, int depth = 2, int times = 1000, double eps = 1e-5) : base(color)
 {
     if (File.Exists(path))
     {
         network = (ActivationNetwork)Network.Load(path);
     }
     else
     {
         network = new ActivationNetwork(new SigmoidFunction(2), 64, 32, 16, 1);
     }
     teacher            = new ResilientBackpropagationLearning(network);
     this.myDecisions   = 0;
     this.eps           = eps;
     this.path          = path;
     this.depth         = depth;
     this.times         = times;
     this.Substitute    = new AlphaBeta(color, depth);
     this.boardSnapshot = new List <double[]>();
     this.moveSnapshot  = new List <double[]>();
 }
Exemplo n.º 17
0
        //AForge.Neuro.ActivationNetwork network;
        //PerceptronLearning teacher;
        //BackPropagationLearning teacher;
        //ResilientBackpropagationLearning teacher;
        //EvolutionaryLearning teacher;
        public void InitNet()
        {
            // create perceptron
            int cnt_input = cnt_blocks_one_line * cnt_blocks_one_line;

            //network = new AForge.Neuro.ActivationNetwork(new AForge.Neuro.SigmoidFunction(-0.1),
            //cnt_input, cnt_input*2, cnt_input * 2, count_output);

            // create teacher
            //teacher = new BackPropagationLearning(network);


            network = new ActivationNetwork(new BipolarSigmoidFunction(),
                                            cnt_input, new[] { cnt_input *2, cnt_input / 2, count_output });
            new NguyenWidrow(network).Randomize();


            teacher = new ResilientBackpropagationLearning(network);

            //teacher.LearningRate = 0.1;
        }
Exemplo n.º 18
0
        /// <summary>
        /// Обучение сети одному образу
        /// </summary>
        /// <param name="sample"></param>
        /// <returns>Количество итераций для достижения заданного уровня ошибки</returns>
        public override int Train(Sample sample, bool parallel = true)
        {
            //  Создаём "обучателя" - либо параллельного, либо последовательного
            ISupervisedLearning teacher;

            if (parallel)
            {
                teacher = new ParallelResilientBackpropagationLearning(network);
            }
            else
            {
                teacher = new ResilientBackpropagationLearning(network);
            }

            int iters = 1;

            while (teacher.Run(sample.input, sample.output) > desiredErrorValue)
            {
                ++iters;
            }
            return(iters);
        }
Exemplo n.º 19
0
        public Network Learn(double[][] features, double[] targets, int?hiddenLayerCount = null)
        {
            Debug.Assert(features.Length > 0);
            var inputsCount = features[0].Length;

            var outputs      = targets.Select(item => new[] { item }).ToArray();
            var outputsCount = 1;

            // Create an activation network with the function and
            var network = new ActivationNetwork(
                new BipolarSigmoidFunction(),
                inputsCount,
                hiddenLayerCount.GetValueOrDefault(2 * inputsCount),
                outputsCount);

            // Randomly initialize the network
            new NguyenWidrow(network).Randomize();

            // Teach the network using parallel Rprop:
            var teacher = new ResilientBackpropagationLearning(network);
            // var teacher = new LevenbergMarquardtLearning(network);
            //var teacher = new EvolutionaryLearning(network, 100);

            // Iterate until stop criteria is met
            var    error = teacher.RunEpoch(features, outputs);
            double previous;
            var    iteration = 0;

            do
            {
                previous = error;
                iteration++;
                // Compute one learning iteration
                error = teacher.RunEpoch(features, outputs);
            }while (Math.Abs(previous - error) > 0.000000001 * previous);

            // learn result
            var results = new (double, double, double)[features.Length];
            public IForecastingModel TrainNewModel(double[][] iInput, double[][] iOutput)
            {
                int inputSize = iInput[0].Length, samplesNum = iOutput.Length;
                if (samplesNum != iInput.Length)
                    throw new ArgumentException();

                for (int i = 0; i < samplesNum;++i)
                    if (iInput[i].Length != inputSize || iOutput[i].Length != 1) //iInput isn't a square matrix or iOutput isn't a vector
                        throw new ArgumentException();

                int[] neuronsCount = (int[]) ModelParametersDict[NeuronsInLayersKey];
                string activationFunction = (string) ModelParametersDict[ActivationFunctionKey];
                long maxIterNum = (long) ModelParametersDict[MaxIterationsNumberKey];
                double stopError = (double)ModelParametersDict[StopErrorKey];

                ActivationNetwork netToTrain = new ActivationNetwork(ActivationFunctionsDict[activationFunction], inputSize, neuronsCount);
                DataNormalizer normalizer = new DataNormalizer(iInput.Concat(iOutput).ToArray());
                IForecastingModel aModel = new ANNforecastingModel(netToTrain, normalizer);
                ISupervisedLearning teacher = new ResilientBackpropagationLearning(netToTrain);

                double[][] trainInputSet, trainOutputSet;
                TrainingSubsetGenerator.GenerateRandomly(iInput, iOutput, out trainInputSet, out trainOutputSet, iMultiplier: TrainSubsetMultiplier);

                trainInputSet = normalizer.Normalize(trainInputSet); trainOutputSet = normalizer.Normalize(trainOutputSet);

                long epochsCount = 0;
                double nextError = ErrorCalculator.CalculateMSE(aModel, iInput, iOutput), prevError;
                do
                {
                    prevError = nextError;
                    teacher.RunEpoch(trainInputSet, trainOutputSet);
                    nextError = ErrorCalculator.CalculateMSE(aModel, iInput, iOutput);
                }
                while (epochsCount++ <= maxIterNum && Math.Abs(prevError - nextError) >= stopError);
                return aModel;
            }
Exemplo n.º 21
0
        /// <summary>
        /// <inheritdoc />
        /// </summary>
        public override void Train()
        {
            var inputs = data.GetSelectedInput(features);

            var outputs = data.GetExpectedClassificationOutput();

            network = new ActivationNetwork(new SigmoidFunction(), inputs[0].Length, 25, 1);

            var initialization = new NguyenWidrow(network);

            initialization.Randomize();

            var teacher = new ResilientBackpropagationLearning(network);

            var NetworkOutputs = new double[inputs.Length][];

            for (int i = 0; i < NetworkOutputs.Length; i++)
            {
                NetworkOutputs[i] = new double[1] {
                    outputs[i]
                };
            }

            double error = double.PositiveInfinity;

            int epoch = 0;

            while (error > 2.5 && epoch < 5000)
            {
                error = teacher.RunEpoch(inputs, NetworkOutputs);

                epoch++;
            }

            Save();
        }
 private void button2_Click_1(object sender, EventArgs e)
 {
     button2.Enabled = false;
     network2 = null;
     reprop = null;
     network2 = new ActivationNetwork(new BipolarSigmoidFunction(param1), 100, 100, 90, 80, 70, 60, 50, 40, 30, 20, 10, 1);
     reprop = new ResilientBackpropagationLearning(network3);
     network2.Randomize();
 }
Exemplo n.º 23
0
        private void listBox4_SelectedIndexChanged(object sender, EventArgs e)
        {
            if (Global.Model == null || listBox4.SelectedItem == null)
            {
                return;
            }

            var selected = ((DailyPrice)listBox4.SelectedItem).StockCode;

            toolStripStatusLabel3.Text = $"Predicting {((DailyPrice)listBox4.SelectedItem).StockCode}";
            IEnumerable <DailyPrice> priceForDate = Global.DataList
                                                    .Where(x => x.StockCode == selected)
                                                    .OrderBy(x => x.CloseDate);

            var currentList = priceForDate.ToList();

            chart2.Series[0].Points.DataBindY(priceForDate.Select(x => x.ClosePrice).ToArray());

            ActivationNetwork _model = null;

            //Training a new model for the stock
            if (checkBox1.Checked)
            {
                _model = new ActivationNetwork(new BipolarSigmoidFunction(2),
                                               Global.FeaturesCount, 15, 1); //hardcoded neurons count
                var _iterations  = 1000;
                var _teacher     = new ResilientBackpropagationLearning(_model);
                var _initializer = new NguyenWidrow(_model);
                _initializer.Randomize();

                var _inputs  = DataHelper.DataHelper.GetInputArray(currentList);
                var _outputs = DataHelper.DataHelper.GetOutputArray(currentList);

                for (int i = 0; i < _iterations; i++)
                {
                    var trainingError = _teacher.RunEpoch(_inputs, _outputs);
                    toolStripStatusLabel3.Text = $"Predicting {((DailyPrice)listBox4.SelectedItem).StockCode} {i}/{_iterations} | e={trainingError / _inputs.Length}";
                    Application.DoEvents();
                }
            }

            //end of training
            var firstItem  = priceForDate.Select(x => x.ClosePrice).FirstOrDefault();
            var lastItem   = priceForDate.Select(x => x.ClosePrice).LastOrDefault();
            var firstStock = priceForDate.FirstOrDefault();

            lbStockCurrent.Text = $"{selected} | {priceForDate.Count()} days: Profit: {lastItem - firstItem} VND | {Math.Round((lastItem / firstItem - 1) * 100, 2)}%\n" +
                                  $"Volatility: {Math.Round(firstStock.Volatility * 100, 2)}%\n";
            linkLabel1.Text = firstStock.StockCode;
            linkLabel1.Links.Clear();
            linkLabel1.Links.Add(new LinkLabel.Link()
            {
                LinkData = firstStock.URL
            });

            var    listPredict = priceForDate.ToList();
            int    dayCount    = listPredict.Count;
            double error       = 0.0;

            if (listPredict.Count > 1)
            {
                for (int i = 0; i < listPredict.Count - 1; i++)
                {
                    var previous = currentList[i];
                    var original = currentList[i + 1];
                    listPredict[i + 1] = PredictSingle(previous, Utils.GetNextDay(previous.CloseDate), _model);
                    error += Math.Sqrt(Math.Pow(listPredict[i + 1].Profit - original.Profit, 2));
                }
                error /= dayCount;
                var    startIdx         = listPredict.Count - 1;
                string predictedDetails = String.Empty;

                for (int i = 0; i < 5; i++)
                {
                    var newIdx       = startIdx + i;
                    var previous     = listPredict.LastOrDefault();
                    var predictValue = PredictSingle(previous, Utils.GetNextDay(previous.CloseDate));
                    listPredict.Add(predictValue);
                    predictedDetails += $"{predictValue.CloseDate} - {predictValue.ClosePrice} | {predictValue.ProfitPretified}%\n";
                }

                chart2.Series[1].Points.DataBindY(listPredict.Select(x => x.ClosePrice).ToArray());

                var firstPredict = listPredict[dayCount - 1].ClosePrice;
                var lastPredict  = Math.Round(listPredict.Select(x => x.ClosePrice).LastOrDefault());

                lbForecast.Text = $"Average error: {error}\n" +
                                  $"Last predicted price: {lastPredict}\n" +
                                  $"Change from now: {lastPredict - firstPredict} VND | {Math.Round((lastPredict / firstPredict - 1) * 100, 2)}%\n\n" +
                                  predictedDetails;
            }

            //moving average (4 days) as baseline
            int numDays     = 4;
            var averageList = new List <double>();

            for (int i = 0; i < listPredict.Count; i++)
            {
                if (i >= (numDays - 1))
                {
                    var average = 0.0d;
                    for (int a = 0; a < numDays; a++)
                    {
                        if (i < currentList.Count)
                        {
                            average += currentList[i - a].ClosePrice / numDays;
                        }
                        else
                        {
                            average += listPredict[i - a].ClosePrice / numDays;
                        }
                    }
                    averageList.Add(average);
                }
                else
                {
                    averageList.Add(double.NaN);
                }
            }
            chart2.Series[2].Points.DataBindY(averageList.ToArray());

            toolStripStatusLabel3.Text = $"Ready";
            checkBox1.Checked          = false;
        }
Exemplo n.º 24
0
 public void LoadNetworkFromFile(string filePath)
 {
     network             = DeepBeliefNetwork.Load(filePath);
     supervisedTeacher   = GetSupervisedTeacherForNetwork(network);
     unsuperVisedTeacher = GetUnsupervisedTeacherForNetwork(network);
 }
Exemplo n.º 25
0
 public NeuralNetworkManager(IActivationFunction function, int inputsCount, params int[] neuronsCount)
 {
     network = new ActivationNetwork(function, inputsCount, neuronsCount);
     teacher = new ResilientBackpropagationLearning(network);
 }
        /// <summary>
        /// <inheritdoc />
        /// </summary>
        public override void Train()
        {
            var inputsOriginal  = data.GetSelectedInput(features);
            var outputsOriginal = data.GetExpectedRegressionOutput();

            var tempInputs  = new List <double[]>();
            var tempOutputs = new List <double>();

            for (int i = 0; i < inputsOriginal.Length; i++)
            {
                if (positive && outputsOriginal[i] < 0.0)
                {
                    tempInputs.Add(inputsOriginal[i]);
                    tempOutputs.Add(outputsOriginal[i]);
                }

                if (!positive && outputsOriginal[i] > 0.0)
                {
                    tempInputs.Add(inputsOriginal[i]);
                    tempOutputs.Add(outputsOriginal[i]);
                }
            }

            var inputs  = tempInputs.ToArray();
            var outputs = tempOutputs.ToArray();

            var function = new SigmoidFunction();

            network = new ActivationNetwork(function, inputs[0].Length, 5, 1);

            var teacher = new ResilientBackpropagationLearning(network);

            var initialization = new NguyenWidrow(network);

            initialization.Randomize();

            var scaledOutputs = Vector.Scale(outputs, range, new DoubleRange(0.0, 1.0));

            var outputsNetwork = new double[outputs.Length][];

            for (int i = 0; i < outputs.Length; i++)
            {
                outputsNetwork[i] = new double[1] {
                    scaledOutputs[i]
                }
            }
            ;

            double error = Double.PositiveInfinity;

            double maxError = outputs.Length / 5e2;

            int epoch = 0;

            while (error > maxError && epoch < 5000)
            {
                error = teacher.RunEpoch(inputs, outputsNetwork);
            }

            Save();
        }
Exemplo n.º 27
0
        private async Task TrainModel(IProgress <TrainingProgress> progress)
        {
            if (Global.Model != null)
            {
                double learningRate = 1;
                double.TryParse(txLearnRate.Text, out learningRate);

                learningRate = Math.Max(learningRate, 0.1);

                //                BackPropagationLearning
                //              LevenbergMarquardtLearning
                //              ResilientBackpropagationLearning
                ISupervisedLearning teacher;

                switch (comboBox1.SelectedIndex)
                {
                case 0:
                    teacher = new BackPropagationLearning(Global.Model);
                    ((BackPropagationLearning)teacher).LearningRate = learningRate;
                    break;

                case 1:
                    teacher = new LevenbergMarquardtLearning(Global.Model, true);
                    ((LevenbergMarquardtLearning)teacher).LearningRate = learningRate;
                    break;

                case 2:
                    teacher = new ResilientBackpropagationLearning(Global.Model);
                    ((ResilientBackpropagationLearning)teacher).LearningRate = learningRate;
                    break;

                case 3:
                    teacher = new EvolutionaryLearning(Global.Model, 100);
                    break;

                default:
                    teacher = new LevenbergMarquardtLearning(Global.Model, true);
                    ((LevenbergMarquardtLearning)teacher).LearningRate = learningRate;
                    break;
                }
                //var teacher = new ResilientBackpropagationLearning(Global.Model);

                this.IsTraining = true;

                bool _training = true;
                var  sw        = Stopwatch.StartNew();

                bool isKeepRunning = false;

                await Task.Run(() => {
                    var retVal   = new TrainingProgress();
                    retVal.error = double.PositiveInfinity;

                    while (_training)
                    {
                        lock (syncLock)
                        {
                            _training = this.IsTraining;
                            var error = teacher.RunEpoch(Global.inputs, Global.outputs) / Global.inputs.Length;
                            if (!isKeepRunning && (error > retVal.error))
                            {
                                if (MessageBox.Show("Training increases error. Continue?",
                                                    "Confirmation", MessageBoxButtons.YesNo) == DialogResult.Yes)
                                {
                                    isKeepRunning = true;
                                }
                                else
                                {
                                    break;
                                }
                            }
                            retVal.error = error;
                        }

                        retVal.epochs++;
                        retVal.timeElapsed = sw.ElapsedMilliseconds;

                        //if (retVal.epochs % 50 == 0) // updates score every 50 epochs
                        //{
                        //    retVal.error = GetCurrentScore();
                        //}

                        progress.Report(retVal);
                    }
                });

                sw.Stop();
            }
        }
Exemplo n.º 28
0
 internal void Load(string path)
 {
     network = (ActivationNetwork)Network.Load(path);
     teacher = new ResilientBackpropagationLearning(network);
 }
        public BackPropogation()
        {
            InitializeComponent();

            activation_nework = new ActivationNetwork(new BipolarSigmoidFunction(), 50, 50,  10, 1);

            watch1 = new Stopwatch();

            watch2 = new Stopwatch();

            watch3 = new Stopwatch();

            backgroundWorkerTrainer.Disposed += backgroundWorkerTrainer_Disposed;
            backgroundWorkerTrainer.DoWork += backgroundWorkerTrainer_DoWork;
            backgroundWorkerTrainer.ProgressChanged += backgroundWorkerTrainer_ProgressChanged;
            backgroundWorkerTrainer.WorkerSupportsCancellation = true;
            backgroundWorkerTrainer.WorkerReportsProgress = true;
            saveFileDialog1.Filter = "feed forward network files (*.ffn)|*.ffn";
            saveFileDialog1.Title = "Save neural networkfile";
            saveFileDialog1.InitialDirectory = null;
            saveFileDialog1.FileName = null;

            openFileDialog1.Filter = "feed forward network files (*.ffn)|*.ffn";
            openFileDialog1.Title = "Load neural network file";
            openFileDialog1.InitialDirectory = null;
            openFileDialog1.FileName = null;

            backgroundWorkerSignal.Disposed += backgroundWorkerSignal_Disposed;
            backgroundWorkerSignal.WorkerSupportsCancellation = true;
            backgroundWorkerSignal.WorkerReportsProgress = true;
            backgroundWorkerSignal.DoWork += backgroundWorkerSignal_DoWork;
            backgroundWorkerSignal.RunWorkerCompleted += backgroundWorkerSignal_RunWorkerCompleted;//80, 70, 60, 50, 40,
            network1 = activation_nework;
            network2 = activation_nework; // new ActivationNetwork(new BipolarSigmoidFunction(), 50, 50, 40, 1);
            network3 = activation_nework; // new ActivationNetwork(new BipolarSigmoidFunction(), 50, 50, 40, 1);
            network4 = activation_nework; // new ActivationNetwork(new BipolarSigmoidFunction(), 50, 50, 40, 1);
            network5 = new ActivationNetwork(new BipolarSigmoidFunction(), 50,1);
            network6 = new ActivationNetwork(new BipolarSigmoidFunction(), 50, 1);
            teacher = new BackPropagationLearning(network1);
            evteacher = new EvolutionaryLearning(network2, 100);
            reprop = new ResilientBackpropagationLearning(network3);
            lbteacher = new LevenbergMarquardtLearning(network4);
            delta = new DeltaRuleLearning(network5);
            perceptron = new PerceptronLearning(network6);
            delta.LearningRate = 1;
            perceptron.LearningRate = 0.1;
            myPane = new GraphPane();
            listPointsOne = new PointPairList();

            myPane = zedGraphControl1.GraphPane;

            // set a title
            myPane.Title.Text = "Error VS Time";

            // set X and Y axis titles
            myPane.XAxis.Title.Text = "Time in Milliseconds";
            myPane.YAxis.Title.Text = "Error";
            myCurveOne = myPane.AddCurve("Learning curve", listPointsOne, Color.Red, SymbolType.None);
               // myCurveOne = myPane.AddCurve("Resilient Back Propagation", listPointstwo, Color.Green, SymbolType.None);
               // myCurveOne = myPane.AddCurve("Genetic Learning", listPointsthree, Color.Blue, SymbolType.None);
        }
Exemplo n.º 30
0
        public BackPropogation()
        {
            InitializeComponent();

            activation_nework = new ActivationNetwork(new BipolarSigmoidFunction(), 50, 50, 10, 1);

            watch1 = new Stopwatch();

            watch2 = new Stopwatch();

            watch3 = new Stopwatch();

            backgroundWorkerTrainer.Disposed                  += backgroundWorkerTrainer_Disposed;
            backgroundWorkerTrainer.DoWork                    += backgroundWorkerTrainer_DoWork;
            backgroundWorkerTrainer.ProgressChanged           += backgroundWorkerTrainer_ProgressChanged;
            backgroundWorkerTrainer.WorkerSupportsCancellation = true;
            backgroundWorkerTrainer.WorkerReportsProgress      = true;
            saveFileDialog1.Filter           = "feed forward network files (*.ffn)|*.ffn";
            saveFileDialog1.Title            = "Save neural networkfile";
            saveFileDialog1.InitialDirectory = null;
            saveFileDialog1.FileName         = null;

            openFileDialog1.Filter           = "feed forward network files (*.ffn)|*.ffn";
            openFileDialog1.Title            = "Load neural network file";
            openFileDialog1.InitialDirectory = null;
            openFileDialog1.FileName         = null;

            backgroundWorkerSignal.Disposed += backgroundWorkerSignal_Disposed;
            backgroundWorkerSignal.WorkerSupportsCancellation = true;
            backgroundWorkerSignal.WorkerReportsProgress      = true;
            backgroundWorkerSignal.DoWork             += backgroundWorkerSignal_DoWork;
            backgroundWorkerSignal.RunWorkerCompleted += backgroundWorkerSignal_RunWorkerCompleted; //80, 70, 60, 50, 40,
            network1                = activation_nework;
            network2                = activation_nework;                                            // new ActivationNetwork(new BipolarSigmoidFunction(), 50, 50, 40, 1);
            network3                = activation_nework;                                            // new ActivationNetwork(new BipolarSigmoidFunction(), 50, 50, 40, 1);
            network4                = activation_nework;                                            // new ActivationNetwork(new BipolarSigmoidFunction(), 50, 50, 40, 1);
            network5                = new ActivationNetwork(new BipolarSigmoidFunction(), 50, 1);
            network6                = new ActivationNetwork(new BipolarSigmoidFunction(), 50, 1);
            teacher                 = new BackPropagationLearning(network1);
            evteacher               = new EvolutionaryLearning(network2, 100);
            reprop                  = new ResilientBackpropagationLearning(network3);
            lbteacher               = new LevenbergMarquardtLearning(network4);
            delta                   = new DeltaRuleLearning(network5);
            perceptron              = new PerceptronLearning(network6);
            delta.LearningRate      = 1;
            perceptron.LearningRate = 0.1;
            myPane                  = new GraphPane();
            listPointsOne           = new PointPairList();

            myPane = zedGraphControl1.GraphPane;

            // set a title
            myPane.Title.Text = "Error VS Time";

            // set X and Y axis titles
            myPane.XAxis.Title.Text = "Time in Milliseconds";
            myPane.YAxis.Title.Text = "Error";
            myCurveOne = myPane.AddCurve("Learning curve", listPointsOne, Color.Red, SymbolType.None);
            // myCurveOne = myPane.AddCurve("Resilient Back Propagation", listPointstwo, Color.Green, SymbolType.None);
            // myCurveOne = myPane.AddCurve("Genetic Learning", listPointsthree, Color.Blue, SymbolType.None);
        }
        public void Train()
        {
            var samples = GenerateSamples(category.Compositions);
            double[][] inputs = new double[samples.Length][];
            double[][] outputs = new double[samples.Length][];
            for (int i = 0; i < samples.Length; i++)
            {
                inputs[i] = samples[i].Inputs;
                outputs[i] = samples[i].Outputs;
            }
            // Create a Bernoulli activation function
            //var function = new BernoulliFunction(alpha: 0.5);
            var function = new SigmoidFunction(2);
            // Create a Restricted Boltzmann Machine for 6 inputs and with 1 hidden neuron
            //network = new RestrictedBoltzmannMachine(function, inputsCount: MAX_INPUTS, hiddenNeurons: MAX_OUTPUTS);
            network = new ActivationNetwork(function, MAX_INPUTS, 11, MAX_OUTPUTS);

            // Create the learning algorithm for RBMs
            /*    var teacher = new ContrastiveDivergenceLearning(network)
            {
                Momentum = 0.1,
                LearningRate = 0.02
            };*/

            // create neural network
             /*     network = new ActivationNetwork(
                new SigmoidFunction( 2 ),
                2, // two inputs in the network
                10, // two neurons in the first layer
                2 ); // one neuron in the second layer*/

            var teacher = new ResilientBackpropagationLearning(network as ActivationNetwork);

            // learn 5000 iterations

            for (int i = 0; i < Epochs; i++)
            {
                var e = teacher.RunEpoch(inputs,outputs);

                Console.WriteLine("{0} : {1}", i / (double)Epochs * 100, e);
            }

            Save();
        }
Exemplo n.º 32
0
        async Task <KFoldData> kfold(int inputSize, int outputSize, int breadth, int depth, double trainingweights)
        {
            await Task.Delay(1).ConfigureAwait(false);

            double    bestKVal = double.MaxValue;
            KFoldData bestVal  = new KFoldData(0, 0, 0, 0, 0);

            for (int iterations = 10; iterations < 10000; iterations = iterations * 10)
            {
                int[] nodeArray = new int[depth + 1];
                for (int fillVal = 0; fillVal < depth; fillVal++)
                {
                    if (fillVal == 0) // depth - 1)
                    {
                        nodeArray[0] = outputSize;
                    }
                    else
                    {
                        nodeArray[fillVal] = breadth;
                    }
                }



                double kSumAvg = 0;
                for (int i = 0; i < 5; i++)
                {
                    var testNet     = new ActivationNetwork(new SigmoidFunction(), inputSize, nodeArray);
                    var testLearner = new ResilientBackpropagationLearning(testNet);
                    testLearner.LearningRate = trainingweights;

                    int length = dataset_in.GetLength(0) / 5;

                    var trainingArrayIn  = new double[dataset_in.GetLength(0) * 4 / 5][];
                    var trainingArrayOut = new double[dataset_out.GetLength(0) * 4 / 5][];
                    var testingArrayIn   = new double[dataset_in.GetLength(0) / 5][];
                    var testingArrayOut  = new double[dataset_out.GetLength(0) / 5][];

                    dataset_in.Take(i * length).ToArray().CopyTo(trainingArrayIn, 0);
                    dataset_in.Skip((i * length) + length).Take((length * 5) - (i * length + length)).ToArray().CopyTo(trainingArrayIn, i * length);

                    testingArrayIn = dataset_in.Skip(i * length).Take(length).ToArray();

                    dataset_out.Take(i * length).ToArray().CopyTo(trainingArrayOut, 0);
                    dataset_out.Skip((i * length) + length).Take((length * 5) - (i * length + length)).ToArray().CopyTo(trainingArrayOut, i * length);

                    testingArrayOut = dataset_out.Skip(i * length).Take(length).ToArray();


                    for (int iteration = 0; iteration < iterations; iteration++)
                    {
                        testLearner.RunEpoch(trainingArrayIn, trainingArrayOut);
                    }



                    double kSum = 0;
                    for (int k = 0; k < testingArrayIn.GetLength(0); k++)
                    {
                        var testResults = testNet.Compute(testingArrayIn[k]);
                        for (int j = 0; j < testResults.Length; j++)
                        {
                            kSum += Math.Abs(testResults[j] - testingArrayOut[k][j]);
                        }

                        kSumAvg += kSum;
                    }
                }

                kSumAvg = kSumAvg / dataset_in.GetLength(0);
                if (kSumAvg == 0)
                {
                    bestKVal = kSumAvg;
                    bestVal  = new KFoldData(breadth, depth, trainingweights, iterations, bestKVal);
                    Console.WriteLine("Thread Complete " + breadth + " " + depth + " " + trainingweights + " " + iterations + " " + bestKVal);
                    return(bestVal);
                }

                if (kSumAvg < bestKVal)
                {
                    bestKVal = kSumAvg;
                    bestVal  = new KFoldData(breadth, depth, trainingweights, iterations, bestKVal);
                }
            }
            Console.WriteLine("Thread Complete " + breadth + " " + depth + " " + trainingweights + " " + bestVal.Iterations + " " + bestKVal);

            return(bestVal);
            //return new Task<KFoldData>(() => helperFunction(inputSize, outputSize, breadth,depth,trainingweights));
        }