Activation network
Activation network is a base for multi-layer neural network with activation functions. It consists of activation layers.
Inheritance: Network
        public override ConfusionMatrix Execute()
        {
            //Create an network with one layer and one neuron in that layer
            var network = new ActivationNetwork(new ThresholdFunction(), 3, 1);

            //Bind the reference of the neuron
            var neuron = network.Layers[0].Neurons[0] as ActivationNeuron;

            //Create the Perceptron learning algorithm
            //Library perceptron implements a single layer linear classifier
            var teacher = new PerceptronLearning(network);

            teacher.LearningRate = 0.1;

            //Enrich the dimensions of the vectors, padding 1 to the end
            var richTraining = AlgorithmHelpers.PaddDimension(trainingSet);
            var richTesting = AlgorithmHelpers.PaddDimension(testSet);

            //Training the network until the error is small enough
            //or 500 hundred iterations have been computed
            int epochs = 0;
            while (true)
            {
                double error = teacher.RunEpoch(richTraining, trainingOutput);/// trainingSet.Length;
                ++epochs;
                if (error < 0.025 * trainingSet.Length || epochs == 500) break;
            }

            var predicted = richTesting
                   .Select(x => neuron.Compute(x))
                   .Select(x => Convert.ToInt32(x))
                   .ToArray();


            //Create a confusion matrix with the calculated parameters
            ConfusionMatrix cmatrix = new ConfusionMatrix(predicted, expected, POSITIVE, NEGATIVE);

            OnAlgorithmEnded(Enumerable.Repeat(neuron, 1), cmatrix);
            return cmatrix;
        }
 public AdditionalNeuralNetwork(int numberOfTags)
 {
     this.network = new ActivationNetwork(new SigmoidFunction(1.0d),numberOfTags,numberOfTags);
     this.teacher = new ISupervisedLearning[] {new DeltaRuleLearning(this.network),new PerceptronLearning(this.network),new BackPropagationLearning(this.network)};
     //this.teacher.LearningRate = 0.10d;
     //this.teacher.Momentum = 0.10d;
 }
Beispiel #3
0
 public void test() {
     // initialize input and output values
     var input = new double[4][] {
         new double[] { 0, 0 }, new double[] { 0, 1 },
         new double[] { 1, 0 }, new double[] { 1, 1 }
     };
     var output = new double[4][] {
         new double[] { 0 }, new double[] { 1 },
         new double[] { 1 }, new double[] { 1 }
     };
     // create neural network
     var network = new ActivationNetwork(
             new SigmoidFunction(2),
             2, // two inputs in the network
             //2, // two neurons in the first layer
             1); // one neuron in the second layer
     // create teacher
     var teacher =
             new BackPropagationLearning(network);
     // loop
     while (true) {
         // run epoch of learning procedure
         var error = teacher.RunEpoch(input, output);
         // check error value to see if we need to stop
         // ...
         if (error < 0.001) {
             break;
         }
     }
     Console.WriteLine(network.Compute(new double[] { 0, 0 })[0] + ","
                       + network.Compute(new double[] { 0, 1 })[0] + ","
                       + network.Compute(new double[] { 1, 0 })[0] + ","
                       + network.Compute(new double[] { 1, 1 })[0]);
 }
        public IA(Game game, int players)
            : base(game)
        {
            rndSeedGen = new Random();
            rndControl = new Random();
            rndMovControl = new Random();
            this.comidas = null;
            this.jugadores = null;
            this.numWeights = HIDDEN_UNITS0 * (INPUT_UNITS + 1) + HIDDEN_UNITS1 * (HIDDEN_UNITS0 + 1) + OUTPUT_UNITS * (HIDDEN_UNITS1 + 1);
            redes = new ActivationNetwork[players];
            for (int i = 0; i < redes.Length; i++)
            {
                redes[i] = new ActivationNetwork(new SigmoidFunction(400), INPUT_UNITS, HIDDEN_UNITS0, HIDDEN_UNITS1, OUTPUT_UNITS);
            }
            inputVector = new double[INPUT_UNITS];
            outputVector = new double[OUTPUT_UNITS];
            doneEvents = new ManualResetEvent[players];
            for (int i = 0; i < players; i++) doneEvents[i] = new ManualResetEvent(false);

            //Se puede jugar con los parametros de los rangos para modificar la evolucion de las redes
            //Tambien se puede modificar el metodo de seleccion.
            chromosomeGenerator = new UniformGenerator(new Range(-10f, 10f), rndSeedGen.Next(-100, 100));
            mutationAdditionGenerator = new UniformGenerator(new Range(-8f, 8f), rndSeedGen.Next(-100, 100));
            mutationMultiplierGenerator = new UniformGenerator(new Range(-8f, 8f), rndSeedGen.Next(-100, 100));
            fitnessFunction = new GameFitnessFunction();
            selectionMethod = new EliteSelection();
            padre = new gameChromosome(chromosomeGenerator, mutationMultiplierGenerator, mutationAdditionGenerator, numWeights);
            poblacion = new Population(WorldGame.JUGADORES, padre, fitnessFunction, selectionMethod);
        }
Beispiel #5
0
        public virtual void Prepare()
        {
            PrepareData();
            PrepareCharts();

            network = new ActivationNetwork(new Tanh(0.2),
                Sizes[0],
                Sizes.Skip(1).ToArray());

            network.ForEachWeight(z => rnd.NextDouble() * 2 - 1);

            teacher = new BackPropagationLearning(network);
            teacher.LearningRate = 1;

            Form = new Form()
            {
                Text = GetType().Name,
                Size = new Size(800, 600),
                FormBorderStyle = FormBorderStyle.FixedDialog,
                Controls =
                {
                    AreaChart,
                    HistoryChart
                }
            };
        }
Beispiel #6
0
 static void ForEachWeight(ActivationNetwork network, Func<double, double> modifier)
 {
     foreach (var l in network.Layers)
         foreach (var n in l.Neurons)
             for (int i = 0; i < n.Weights.Length; i++)
                 n.Weights[i] = modifier(n.Weights[i]);
 }
        public void Test()
        {
            ActivationNetwork network = new ActivationNetwork(
                new SigmoidFunction(),
                2, // two inputs in the network
                2, // two neurons in the first layer
                1); // one neuron in the second layer

            BackPropagationLearning teacher = new BackPropagationLearning(network);

            double lastError = double.MaxValue;
            int counter = 0;
            while (true)
            {
                counter++;
                var error = teacher.RunEpoch(input, output);
                if (lastError - error < 0.0000001 && error < 0.001)
                    break;
                lastError = error;
            }

            //var bla = network.Compute(input[0])[0];
            //var round = Math.Round(network.Compute(input[0])[0], 2);
            //var result = output[0][0];
            //Assert.IsTrue(Math.Abs(round - result) < double.Epsilon);
            Assert.IsTrue(Math.Abs(network.Compute(input[0])[0] - output[0][0]) < 0.03);
            Assert.IsTrue(Math.Abs(network.Compute(input[1])[0] - output[1][0]) < 0.03);
            Assert.IsTrue(Math.Abs(network.Compute(input[2])[0] - output[2][0]) < 0.03);
            Assert.IsTrue(Math.Abs(network.Compute(input[3])[0] - output[3][0]) < 0.03);
            Console.WriteLine($"Loop counter = {counter}.");
        }
Beispiel #8
0
        public void Learn()
        {
            var network = new ActivationNetwork(new BipolarSigmoidFunction(), Constants.StoneCount, 1);

            var teacher = new BackPropagationLearning(network);//new PerceptronLearning(network);

            var data = LoadData("4-6-2012-04-24.know");

            double error = 1.0;

            int index = 0;
            while (error > 0.001 && index < 100000) {
                error = teacher.RunEpoch(data.Item1, data.Item2);
                index++;
            }

            network.Save("4-6-2012-04-24.bp.net");

            var text = "□○○○●○○□○●●□□●□□";
            var i = ToDouble(text);//-2
            var o = network.Compute(i);

            var eval = o[0] * 2 * Constants.StoneCount - Constants.StoneCount;

            Console.WriteLine("{0} {1}", text, eval);
        }
Beispiel #9
0
        protected virtual void CreateNetwork()
        {
            network = new ActivationNetwork(new Tanh(1), 1, 5, 1);
            network.ForEachWeight(z => rnd.NextDouble() * 2 - 1);

            teacher = new BackPropagationLearning(network);
            teacher.LearningRate = 1;
        }
Beispiel #10
0
        protected override void CreateNetwork()
        {
            network = new ActivationNetwork(new Tanh(0.1), 1, 5, 1);
            network.ForEachWeight(z => rnd.NextDouble() * 2 - 1);

            teacher = new BackPropagationLearning(network);
            teacher.LearningRate = 1;

            teacher.Momentum = 0.3;
        }
Beispiel #11
0
 public NeuralNetworkBot1()
     : base(
         "Neural Network Bot I",
         "Runs a neural network neural network to compute a score out of different simulated moves",
         true)
 {
     _network = new ActivationNetwork(new SigmoidFunction(), 4*4, 4*4, 1);
     //_network.Randomize();
     //ActivationNetwork.Load();
     //_network.Save();
 }
        public EstimationResult Estimate(IEnumerable<IDateValue> dateValues)
        {
            var data = dateValues.ToArray();
            var samplesCount = data.Length - LayerWidth;
            var factor = 1.7 / data.Length;
            var yMin = data.Min(x => x.Value);

            var input = new double[samplesCount][];
            var output = new double[samplesCount][];

            for (var i = 0; i < samplesCount; i++)
            {
                input[i] = new double[LayerWidth];
                output[i] = new double[1];

                for (var j = 0; j < LayerWidth; j++)
                    input[i][j] = (data[i + j].Value - yMin) * factor - 0.85;

                output[i][0] = (data[i + LayerWidth].Value - yMin) * factor - 0.85;
            }

            var network = new ActivationNetwork(
                new BipolarSigmoidFunction(SigmoidAlphaValue),
                LayerWidth, LayerWidth * 2, 1);

            var teacher = new BackPropagationLearning(network)
            {
                LearningRate = LearningRate,
                Momentum = Momentum
            };

            var solutionSize = data.Length - LayerWidth;
            var solution = new double[solutionSize, 2];
            var networkInput = new double[LayerWidth];

            for (var j = 0; j < solutionSize; j++)
                solution[j, 0] = j + LayerWidth;

            TimesLoop.Do(Iterations, () =>
            {
                teacher.RunEpoch(input, output);

                for (int i = 0, n = data.Length - LayerWidth; i < n; i++)
                {
                    for (var j = 0; j < LayerWidth; j++)
                        networkInput[j] = (data[i + j].Value - yMin) * factor - 0.85;

                    solution[i, 1] = (network.Compute(networkInput)[0] + 0.85) / factor + yMin;
                }
            });

            return EstimationResult.Create(solution[0, 1], this);
        }
Beispiel #13
0
 public NeuralNetworkBot2()
     : base(
         "Neural Network Bot II",
         "Runs a neural network neural network to directly calculate outputs for each move",
         true)
 {
     _network = new ActivationNetwork(new SigmoidFunction(), 4*4, 4*4, 2);
     //_network.Randomize();
     Load();
     
     _teacher = new BackPropagationLearning(_network);
     _teacher.LearningRate = 0.05;
     _teacher.Momentum = 0.05;
 }
 public static void ChromosomeToNetwork(gameChromosome chromosome, ActivationNetwork network)
 {
     double[] values = chromosome.Value;
     int l = 0;
     for (int i = 0; i < network.LayersCount; i++)
         for (int j = 0; j < network[i].NeuronsCount; j++)
             for (int k = 0; k <= network[i][j].InputsCount; k++)
             {
                 if (k == 0)
                     network[i][j].Threshold = values[l];
                 else
                     network[i][j][k - 1] = values[l];
                 l++;
             }
 }
Beispiel #15
0
        static void Main(string[] args)
        {
            // initialize input and output values
            double[][] input = new double[4][] {
                new double[] {0, 0}, new double[] {0, 1},
                new double[] {1, 0}, new double[] {1, 1}
            };

            double[][] output = new double[4][] {
                new double[] {0}, new double[] {1},
                new double[] {1}, new double[] {0}
            };

            // create neural network
            ActivationNetwork network = new ActivationNetwork(
                new SigmoidFunction(1),
                2, // two inputs in the network
                2, // two neurons in the first layer
                1); // one neuron in the second layer
            // create teacher
            BackPropagationLearning teacher =
                new BackPropagationLearning(network);
            // loop
            for (int i = 0; i < 10000; i++)
            {
                // run epoch of learning procedure
                double error = teacher.RunEpoch(input, output);
                // check error value to see if we need to stop
                // ...
                Console.Out.WriteLine("#" + i + "\t" + error);
            }

            double[] ret1 = network.Compute(new double[] { 0, 0 });
            double[] ret2 = network.Compute(new double[] { 1, 0 });
            double[] ret3 = network.Compute(new double[] { 0, 1 });
            double[] ret4 = network.Compute(new double[] { 1, 1 });

            Console.Out.WriteLine();

            Console.Out.WriteLine("Eval(0, 0) = " + ret1[0]);
            Console.Out.WriteLine("Eval(1, 0) = " + ret2[0]);
            Console.Out.WriteLine("Eval(0, 1) = " + ret3[0]);
            Console.Out.WriteLine("Eval(1, 1) = " + ret4[0]);
            Console.ReadLine();
        }
Beispiel #16
0
        static void Main()
        {
            weights = new double[3];
            network = new ActivationNetwork(new SignumActivationFunction(), 2, 1);
            weights[0] = ((ActivationNeuron)network.Layers[0].Neurons[0]).Threshold = 0;
            weights[1] = network.Layers[0].Neurons[0].Weights[0] = 0.9;
            weights[2] = network.Layers[0].Neurons[0].Weights[1] = 0.2;

            learning = new PerceptronLearning(network);
            learning.LearningRate = 0.005;

            form = new MyForm() { WindowState = FormWindowState.Maximized };
            form.Paint += (s, a) => Redraw(a.Graphics);

            var timer = new System.Windows.Forms.Timer();
            timer.Interval = 10;
            timer.Tick += NextSample;
            timer.Start();
            Application.Run(form);
        }
Beispiel #17
0
        public double Evaluate(IChromosome chromosome)
        {
            // Конструираме невронна мрежа и изчисляваме резултата
            DoubleArrayChromosome dac = (DoubleArrayChromosome)chromosome;
            ActivationNetwork Network = new ActivationNetwork(
                     new BipolarSigmoidFunction(sigmoidAlphaValue),
                     mArchitecture[0], mArchitecture[1], mArchitecture[2]);

            int current = 0;
            int i = 0;

            // Тегла на скрит слой
            for (i = 0; i < mArchitecture[1]; i++)
            {
                for(int j=0; j < mArchitecture[0]; j++){
                    Network[0][i][j] = dac.Value[current++];
                }
            }

            // Тегла на изходен слой
            for (i = 0; i < mArchitecture[2]; i++)
            {
                for (int j = 0; j < mArchitecture[1]; j++)
                {
                    Network[1][i][j] = dac.Value[current++];
                }
            }

            double Sum = 0.0;
            for (int cnt = 0; cnt < mInput.Length; cnt++)
            {
                double[] predicted_output = Network.Compute(mInput[cnt]);
                for (int l = 0; l < predicted_output.Length; l++)
                {
                    Sum += (predicted_output[l] - mOutput[cnt][l]) * (predicted_output[l] - mOutput[cnt][l]);
                }

            }

            return 100-Sum;
        }
        public void TestGenetic()
        {
            ActivationNetwork network = new ActivationNetwork(new SigmoidFunction(), 2, 2, 1);
            EvolutionaryLearning superTeacher = new EvolutionaryLearning(network, 10);

            double lastError = double.MaxValue;
            int counter = 0;
            while (true)
            {
                counter++;
                var error = superTeacher.RunEpoch(input, output);
                if (lastError - error < 0.0000001 && error < 0.0001)
                    break;
                lastError = error;
            }

            Assert.IsTrue(Math.Abs(network.Compute(input[0])[0] - output[0][0]) < 0.03);
            Assert.IsTrue(Math.Abs(network.Compute(input[1])[0] - output[1][0]) < 0.03);
            Assert.IsTrue(Math.Abs(network.Compute(input[2])[0] - output[2][0]) < 0.03);
            Assert.IsTrue(Math.Abs(network.Compute(input[3])[0] - output[3][0]) < 0.03);
            Console.WriteLine($"Loop counter = {counter}.");
        }
        public void RunEpochTest1()
        {
            Accord.Math.Tools.SetupGenerator(0);

            double[][] input = 
            {
                new double[] { -1, -1 },
                new double[] { -1,  1 },
                new double[] {  1, -1 },
                new double[] {  1,  1 }
            };

            double[][] output =
            {
                new double[] { -1 },
                new double[] {  1 },
                new double[] {  1 },
                new double[] { -1 }
            };

            Neuron.RandGenerator = new ThreadSafeRandom(0);
            ActivationNetwork network = new ActivationNetwork(
                   new BipolarSigmoidFunction(2), 2, 2, 1);

            var teacher = new ParallelResilientBackpropagationLearning(network);

            double error = 1.0;
            while (error > 1e-5)
                error = teacher.RunEpoch(input, output);

            for (int i = 0; i < input.Length; i++)
            {
                double actual = network.Compute(input[i])[0];
                double expected = output[i][0];

                Assert.AreEqual(expected, actual, 0.01);
                Assert.IsFalse(Double.IsNaN(actual));
            }
        }
        public DiscreteNeuralNetworkByChord(List<NGram<Chord>[]> bad, List<NGram<Chord>[]> okay, List<NGram<Chord>[]> good, IActivationFunction function)
        {
            bad.NullCheck();
            okay.NullCheck();
            good.NullCheck();

            bad.Any().AssertTrue();
            okay.Any().AssertTrue();
            good.Any().AssertTrue();

            List<Tuple<double[], double[]>> input = new List<Tuple<double[], double[]>>(bad.Count + okay.Count + good.Count);

            input.AddRange(
                bad.Select(x => new Tuple<double[], double[]>(
                    x.SelectMany(y => y.SelectMany(p => ConvertChordIntoTrainingInput(p))).ToArray(),
                    Enumerable.Repeat<double>(DiscreteNeuralNetworkByChord.BADWEIGHT, bad.Count).ToArray())));

            input.AddRange(
                okay.Select(x => new Tuple<double[], double[]>(
                    x.SelectMany(y => y.SelectMany(p => ConvertChordIntoTrainingInput(p))).ToArray(),
                    Enumerable.Repeat<double>(OkayWeight, okay.Count).ToArray())));

            input.AddRange(
                good.Select(x => new Tuple<double[], double[]>(
                    x.SelectMany(y => y.SelectMany(p => ConvertChordIntoTrainingInput(p))).ToArray(),
                    Enumerable.Repeat<double>(DiscreteNeuralNetworkByChord.GOODWEIGHT, good.Count).ToArray())));

            this.Max = input.Max(x => x.Item1.Max());
            int minIndex = input.Min(x => x.Item1.Length);

            var normalized = input.Select(item => Tuple.Create(item.Item1.Take(minIndex).Select(x => x / this.Max).ToArray(), item.Item2.Take(minIndex).ToArray())).ToArray();

            this.trainingData = normalized.ToArray();

            this.ActivationNetwork = new ActivationNetwork(function, this.trainingData.Max(y => y.Item1.Length), (HiddenLayerSize == 0) ? 23 : HiddenLayerSize, 1);
            this.LearningMethod = new ResilientBackpropagationLearning(this.ActivationNetwork);
            this.ActivationNetwork.Randomize();
        }
        public NeuralNetworkOperations(int characterSize)
        {
            neuralNet = new ActivationNetwork(new BipolarSigmoidFunction(2.0f), characterSize, 400, characterCount);

            neuralNet.Randomize();
            teacher = new AForge.Neuro.Learning.BackPropagationLearning(neuralNet);
            teacher.LearningRate = 0.5f;
            teacher.Momentum = 0.1f;

            prepareDataForTeacher();

            //var letters = treningLetterListInput.Zip(treningLetterListOutput, (i,o) => new { treningLetterListInput = i, treningLetterListOutput = o });

            double err = 400.0f;

            int count = 0;

            while(err >= 30.0f)
            {
                err = teacher.RunEpoch(treningLetterListInput.ToArray(), treningLetterListOutput.ToArray());
                count++;
            }
        }
Beispiel #22
0
        public string Learn(string knowFile)
        {
            var network = new ActivationNetwork(new BipolarSigmoidFunction(), Constants.StoneCount, 1);

            var teacher = new BackPropagationLearning(network); //new PerceptronLearning(network);

            var data = LoadData(knowFile);

            double error = int.MaxValue;

            int index = 0;
            while (error > 1.0 && index++ < 5000) {
                error = teacher.RunEpoch(data.Item1, data.Item2);
            }

            var networkFile = knowFile + ".net";

            network.Save(networkFile);

            Console.WriteLine("Learn: {0}, Gen: {1}", knowFile, networkFile);

            return networkFile;
        }
        public void Test()
        {
            var network = new ActivationNetwork(new SigmoidFunction(), inputCount, firstLayerNeurons, secondLayerNeurons, thirdLayerNeurons, lastLayerNeurons);
            var teacher = new BackPropagationLearning(network);
            var lastError = double.MaxValue;
            var counter = 0;
            while (true)
            {
                counter++;
                var error = teacher.RunEpoch(input, output);
                if ((lastError - error < 0.00001 && error < 0.01) || counter > 1200000)
                    break;
                lastError = error;
            }

            var result1 = network.Compute(new double[] {1, 0, 1, 0, 1, 0, 1, 0});
            Console.WriteLine($"2 + 2, 2 * 2 = {result1[0]}, {result1[1]}");
            var result2 = network.Compute(new double[] {0, 1, 0, 1, 1, 0, 0, 1});
            Console.WriteLine($"1 + 1, 2 * 1 = {result2[0]}, {result2[1]}");
            var result3 = network.Compute(new double[] {1, 0, 1, 0, 0, 1, 0, 0});
            Console.WriteLine($"2 + 2, 1 * 0 = {result3[0]}, {result3[1]}");
            var result4 = network.Compute(new double[] {0, 1, 0, 0, 0, 1, 1, 0});
            Console.WriteLine($"1 + 0, 1 * 2 = {result4[0]}, {result4[1]}");
        }
Beispiel #24
0
        static void NeuralNetworkAccompanimentTest()
        {
            // initialize input and output values
            double[][] input = new double[4][] {
            new double[] {0, 0}, new double[] {0, 1},
            new double[] {1, 0}, new double[] {1, 1}
            };
            double[][] output = new double[4][] {
            new double[] {0}, new double[] {1},
            new double[] {1}, new double[] {0}
            };
            SigmoidFunction sig = new SigmoidFunction();

            Accord.Neuro.Networks.RestrictedBoltzmannMachine boltz = new Accord.Neuro.Networks.RestrictedBoltzmannMachine(200, 200);

            // create neural network
            ActivationNetwork network = new ActivationNetwork(
                new SigmoidFunction(2),
                200,
                20,
                200);
            //BackPropagationLearning teacher = new BackPropagationLearning(network);
            //LevenbergMarquardtLearning teacher = new LevenbergMarquardtLearning(network);
            Accord.Neuro.Learning.ParallelResilientBackpropagationLearning teacher = new ParallelResilientBackpropagationLearning(network);
            Accord.Neuro.Networks.DeepBeliefNetwork dpn = new Accord.Neuro.Networks.DeepBeliefNetwork(200, 20);

            // teacher.IncreaseFactor = 1.01;
            Composition c = Composition.LoadFromMIDI("test/other/ff7tifa.mid");

            MusicPlayer player = new MusicPlayer();
            //player.Play(c.Tracks[0]);

            List<double[]> inputs = new List<double[]>(); List<double[]> outputs = new List<double[]>();

            inputs.Add(GetDoublesFromNotes((c.Tracks[0].GetMainSequence() as MelodySequence).ToArray()));
            outputs.Add(GetDoublesFromNotes((c.Tracks[1].GetMainSequence() as MelodySequence).ToArray()));

            //  inputs.Add(GetDoublesFromNotes((c.Tracks[1].GetMainSequence() as MelodySequence).ToArray()));
            //    outputs.Add(GetDoublesFromNotes((c.Tracks[2].GetMainSequence() as MelodySequence).ToArray()));

            // inputs.Add(GetDoublesFromNotes((c.Tracks[0].GetMainSequence() as MelodySequence).ToArray()));
            // outputs.Add(GetDoublesFromNotes((c.Tracks[3].GetMainSequence() as MelodySequence).ToArray()));

            int its = 0;
            while (its++ < 10000)
            {

                double error = teacher.RunEpoch(inputs.ToArray(), outputs.ToArray());
                Console.WriteLine("{0}: Error - {1}", its, error);
            }

            var input_melody = (c.Tracks[0].GetMainSequence() as MelodySequence);
            var new_notes = network.Compute(GetDoublesFromNotes(input_melody.ToArray()));

            var new_mel = GetMelodyFromDoubles(new_notes);

            player.Play(new_mel);

            Console.ReadLine();
        }
        /// <summary>
        /// Mencari solusi model neural network
        /// </summary>
        private void searchSolution()
        {
            // Normalize Data
            switch (this.selectedActivationFunction)
            {
                case ActivationFunctionEnumeration.SemiLinearFunction:
                    this.activationFunction = new SemiLinearFunction();
                    this.normalizeData(0.1, 0.9);
                    break;
                case ActivationFunctionEnumeration.SigmoidFunction:
                    this.activationFunction = new SigmoidFunction();
                    this.normalizeData(0.1, 0.9);
                    break;
                case ActivationFunctionEnumeration.BipolarSigmoidFunction:
                    this.activationFunction = new BipolarSigmoidFunction();
                    this.normalizeData(-0.9, 0.9);
                    break;
                case ActivationFunctionEnumeration.HyperbolicTangentFunction:
                    this.activationFunction = new HyperbolicTangentFunction();
                    this.normalizeData(-0.9, 0.9);
                    break;
                default:
                    this.activationFunction = new BipolarSigmoidFunction();
                    this.normalizeData(-0.9, 0.9);
                    break;
            }

            //create network
            this.network = new ActivationNetwork
                (this.activationFunction, this.inputLayerNeurons, this.hiddenLayerNeurons, this.outputLayerNeurons);
            this.network.Randomize();

            this.learning = new BackPropagationLearning(this.network);
            this.learning.LearningRate = this.learningRate;
            this.learning.Momentum = this.momentum;

            //variable for looping
            //needToStop = false;
            double mse = 0.0, error = 0.0, mae=0.0;
            int iteration = 1;

            double msle = 0.0, mspe = 0.0, generalizationLoss = 0.0, pq = 0.0;
            double[] trainingErrors = new double[this.strip];
            for (int i = 0; i < this.strip; i++) trainingErrors[i] = double.MaxValue / strip;

            double lastMSE = double.MaxValue;

            int n = this.data.Length - this.network.InputsCount;
            int validationSet = (int)Math.Round(this.validationSetRatio * n);
            int trainingSet = n - validationSet;
            double[][] networkTrainingInput = new double[trainingSet][];
            double[][] networkTrainingOutput = new double[trainingSet][];
            for (int i = 0; i < trainingSet; i++)
            {
                networkTrainingInput[i] = new double[this.network.InputsCount];
                networkTrainingOutput[i] = new double[1];
            }
            for (int i = 0; i < trainingSet; i++)
            {
                for (int j = 0; j < this.network.InputsCount; j++)
                {
                    networkTrainingInput[i][j] = this.networkInput[i][j];
                }
                networkTrainingOutput[i][0] = this.networkOutput[i][0];
            }

            double[] solutionValidation = new double[validationSet];
            double[] inputForValidation = new double[this.network.InputsCount];
            double[] inputForValidationNetwork = new double[this.network.InputsCount];

            this.bestValidationError = double.MaxValue;
            this.bestNetworkWeight = new double[this.network.LayersCount][][];
            this.bestNetworkBias = new double[this.network.LayersCount][];
            this.bestSolution = new double[n];
            for (int i = 0; i < this.network.LayersCount; i++)
            {
                this.bestNetworkWeight[i] = new double[this.network[i].NeuronsCount][];
                this.bestNetworkBias[i] = new double[this.network[i].NeuronsCount];
                for (int j = 0; j < this.network[i].NeuronsCount; j++)
                {
                    this.bestNetworkWeight[i][j] = new double[this.network[i][j].InputsCount];
                }
            }
            //best network criterion
            double bestNetworkError = double.MaxValue, bestNetworkMSE = double.MaxValue, bestNetworkMAE = double.MaxValue;

            //training
            while (!needToStop)
            {
                double sse = 0.0;
                double sae = 0.0;
                double ssle = 0.0;
                double sspe = 0.0;
                if (this.useAdvanceEarlyStopping)
                    error = this.learning.RunEpoch(networkTrainingInput, networkTrainingOutput);
                else
                    error = this.learning.RunEpoch(this.networkInput, this.networkOutput);

                this.solutionData = new double[n];
                this.solutionToShow = new double[n, 2];

                if (this.useAdvanceEarlyStopping)
                {
                    //validation
                    //for (int j = 0; j < this.network.InputsCount; j++)
                    //{
                    //    inputForValidation[this.network.InputsCount - 1 - j] = this.data[this.data.Length - validationSet - 1 - j];
                    //}
                }
                else
                {
                    seriesChart.UpdateDataSeries("Validation", null);
                }

                this.validationSolutionToShow = new double[validationSet, 2];

                for (int i = 0; i < n; i++)
                {
                    this.solutionData[i] = (this.network.Compute(this.networkInput[i])[0]
                        - this.minNormalizedData) / this.factor + this.minData;

                    this.solutionToShow[i, 0] = i + this.network.InputsCount;
                    this.solutionToShow[i,1] = this.solutionData[i];

                    sse += Math.Pow(this.solutionData[i] - this.data[i + this.network.InputsCount], 2);
                    sae += Math.Abs(this.solutionData[i] - this.data[i + this.network.InputsCount]);

                    //calculate advance early stopping
                    if (this.useAdvanceEarlyStopping)
                    {
                        if (i < n - validationSet)
                        {
                            ssle += Math.Pow(this.solutionData[i] - this.data[i + this.network.InputsCount], 2);
                        }
                        else
                        {

                            if (i == n - validationSet)
                            {
                                for (int j = 0; j < this.network.InputsCount; j++)
                                {
                                    inputForValidation[this.network.InputsCount - 1 - j] = this.data[this.data.Length - (n - i) - 1 - j];
                                }
                            }

                            for (int j = 0; j < this.network.InputsCount; j++)
                            {
                                inputForValidationNetwork[j] = (inputForValidation[j] - this.minData) * this.factor + this.minNormalizedData;
                            }

                            solutionValidation[i - n + validationSet] = (this.network.Compute(inputForValidationNetwork)[0] - this.minNormalizedData) / this.factor + this.minData;

                            this.validationSolutionToShow[i - n + validationSet, 0] = i + this.network.InputsCount;
                            this.validationSolutionToShow[i - n + validationSet, 1] = solutionValidation[i - n + validationSet];

                            sspe += Math.Pow(this.data[i + this.network.InputsCount] - solutionValidation[i - n + validationSet], 2);

                            for (int j = 0; j < this.network.InputsCount - 1; j++)
                            {
                                inputForValidation[j] = inputForValidation[j + 1];
                            }

                            inputForValidation[this.network.InputsCount - 1] = solutionValidation[i - n + validationSet];

                        }
                    }

                }

                mse = sse / this.solutionData.Length;
                mae = sae / this.solutionData.Length;

                //Display it
                this.iterationBox.Text = iteration.ToString();
                this.maeBox.Text = mae.ToString("F5");
                this.mseBox.Text = mse.ToString("F5");
                this.errorBox.Text = error.ToString("F5");

                if (this.previewGrapicRadio.Checked)
                {
                    seriesChart.UpdateDataSeries("Predicted", this.solutionToShow);
                }
                else if (this.previewTextRadio.Checked)
                {
                    if (!this.useAdvanceEarlyStopping)
                    {
                        txtProgress.AppendText("Iteration: " + iteration.ToString() + "\tError: " + error.ToString("F5") + "\tMAE: " + mae.ToString("F5") + "\tMSE: " + mse.ToString("F5") + "\n");
                        txtProgress.ScrollToCaret();
                    }
                }

                if (this.useAdvanceEarlyStopping)
                {
                    //calculate advance early stopping 2
                    mspe = sspe / validationSet;
                    msle = ssle / (this.solutionData.Length - validationSet);

                    //save best weight
                    if (this.bestValidationError > mspe)
                    {
                        this.bestValidationError = mspe;
                        this.bestSolution = this.solutionData;

                        for (int i = 0; i < this.network.LayersCount; i++)
                            for (int j = 0; j < this.network[i].NeuronsCount; j++)
                                for (int k = 0; k < this.network[i][j].InputsCount; k++)
                                    this.bestNetworkWeight[i][j][k] = this.network[i][j][k];

                        for (int i = 0; i < this.network.LayersCount; i++)
                            for (int j = 0; j < this.network[i].NeuronsCount; j++)
                                this.bestNetworkBias[i][j] = this.network[i][j].Threshold;

                        bestNetworkError = error;
                        bestNetworkMAE = mae;
                        bestNetworkMSE = mse;

                    }
                    //calculate generalization loss &pq
                    generalizationLoss = 100 * (mspe / this.bestValidationError - 1);

                    trainingErrors[(iteration - 1) % this.strip] = msle;
                    double minStripTrainingError = double.MaxValue, sumStripTrainingError = 0.0;
                    for (int i = 0; i < this.strip; i++)
                    {
                        sumStripTrainingError += trainingErrors[i];
                        if (trainingErrors[i] < minStripTrainingError) minStripTrainingError = trainingErrors[i];
                    }
                    double trainingProgress = 1000 * ((sumStripTrainingError / (this.strip * minStripTrainingError)) - 1);
                    pq = generalizationLoss / trainingProgress;

                    //display advance early stopping
                    this.learningErrorBox.Text = msle.ToString("F5");
                    this.validationErrorBox.Text = mspe.ToString("F5");
                    this.generalizationLossBox.Text = generalizationLoss.ToString("F5");
                    this.pqBox.Text = pq.ToString("F5");
                    seriesChart.UpdateDataSeries("Validation", this.validationSolutionToShow);

                    if (this.previewTextRadio.Checked)
                    {

                        txtProgress.AppendText("Iteration: " + iteration.ToString() + "\tError: " + error.ToString("F5") + "\tMAE: " + mae.ToString("F5") + "\tMSE: " + mse.ToString("F5") + "\tLearning Error: " + msle.ToString("F5") + "\tValidation Error: " + mspe.ToString("F5") + "\tGeneralization Loss: " + generalizationLoss.ToString("F5") + "\tPQ: " + pq.ToString("F5") + "\n");
                        txtProgress.ScrollToCaret();

                    }

                    //stopping
                    switch (this.advanceStoppingMethod)
                    {
                        case AdvanceStoppingMethodEnumeration.GeneralizationLoss:
                            if (generalizationLoss > this.generalizationLossTreshold) needToStop = true;
                            break;
                        case AdvanceStoppingMethodEnumeration.ProgressQuotient:
                            if (pq > this.pqTreshold) needToStop = true;
                            break;
                    }

                }

                if (this.withCheckingCycle && iteration % this.checkingCycle == 0)
                {
                    switch (this.checkingMethod)
                    {
                        case CheckingMethodEnumeration.byMSEValue:
                            if (mse <= this.byMSEValueStopping) needToStop = true;
                            break;
                        case CheckingMethodEnumeration.byMSEChange:
                            if (lastMSE - mse <= this.byMSEChangeStopping) needToStop = true;
                            break;
                    }
                    lastMSE = mse;
                }
                if (iteration >= this.maxIteration)
                {
                    needToStop = true;
                }

                iteration++;
            }

            //restore weight
            if (this.useAdvanceEarlyStopping)
            {
                this.solutionData = this.bestSolution;

                for (int i = 0; i < this.network.LayersCount; i++)
                    for (int j = 0; j < this.network[i].NeuronsCount; j++)
                        for (int k = 0; k < this.network[i][j].InputsCount; k++)
                            this.network[i][j][k] = this.bestNetworkWeight[i][j][k];

                for (int i = 0; i < this.network.LayersCount; i++)
                    for (int j = 0; j < this.network[i].NeuronsCount; j++)
                        this.network[i][j].Threshold = this.bestNetworkBias[i][j];

                //best network criterion
                this.error = bestNetworkError;
                this.mse = bestNetworkMSE;
                this.mae = bestNetworkMAE;
            }
            else
            {
                this.error = error;
                this.mse = mse;
                this.mae = mae;
            }

            this.enableControls(true);
        }
Beispiel #26
0
        // Worker thread
        void SearchSolution()
        {
            // number of learning samples
            int samples = data.Length - predictionSize - windowSize;
            // data transformation factor
            double factor = 1.7 / chart.RangeY.Length;
            double yMin = chart.RangeY.Min;
            // prepare learning data
            double[][] input = new double[samples][];
            double[][] output = new double[samples][];

            for (int i = 0; i < samples; i++)
            {
                input[i] = new double[windowSize];
                output[i] = new double[1];

                // set input
                for (int j = 0; j < windowSize; j++)
                {
                    input[i][j] = (data[i + j] - yMin) * factor - 0.85;
                }
                // set output
                output[i][0] = (data[i + windowSize] - yMin) * factor - 0.85;
            }

            // create multi-layer neural network
            ActivationNetwork network = new ActivationNetwork(
                new BipolarSigmoidFunction(sigmoidAlphaValue),
                windowSize, windowSize * 2, 1);

            // create teacher
            var teacher = new ParallelResilientBackpropagationLearning(network);

            teacher.Reset(initialStep);

            // run at least one backpropagation epoch
            //teacher2.RunEpoch(input, output);

            // iterations
            int iteration = 1;

            // solution array
            int solutionSize = data.Length - windowSize;
            double[,] solution = new double[solutionSize, 2];
            double[] networkInput = new double[windowSize];

            // calculate X values to be used with solution function
            for (int j = 0; j < solutionSize; j++)
            {
                solution[j, 0] = j + windowSize;
            }

            // loop
            while (!needToStop)
            {
                // run epoch of learning procedure
                double error = teacher.RunEpoch(input, output) / samples;

                // calculate solution and learning and prediction errors
                double learningError = 0.0;
                double predictionError = 0.0;
                // go through all the data
                for (int i = 0, n = data.Length - windowSize; i < n; i++)
                {
                    // put values from current window as network's input
                    for (int j = 0; j < windowSize; j++)
                    {
                        networkInput[j] = (data[i + j] - yMin) * factor - 0.85;
                    }

                    // evalue the function
                    solution[i, 1] = (network.Compute(networkInput)[0] + 0.85) / factor + yMin;

                    // calculate prediction error
                    if (i >= n - predictionSize)
                    {
                        predictionError += Math.Abs(solution[i, 1] - data[windowSize + i]);
                    }
                    else
                    {
                        learningError += Math.Abs(solution[i, 1] - data[windowSize + i]);
                    }
                }
                // update solution on the chart
                chart.UpdateDataSeries("solution", solution);

                // set current iteration's info
                SetText(currentIterationBox, iteration.ToString());
                SetText(currentLearningErrorBox, learningError.ToString("F3"));
                SetText(currentPredictionErrorBox, predictionError.ToString("F3"));

                // increase current iteration
                iteration++;

                // check if we need to stop
                if ((iterations != 0) && (iteration > iterations))
                    break;
            }

            // show new solution
            for (int j = windowSize, k = 0, n = data.Length; j < n; j++, k++)
            {
                AddSubItem(dataList, j, solution[k, 1].ToString());
            }

            // enable settings controls
            EnableControls(true);
        }
Beispiel #27
0
        // Worker thread
        void SearchSolution()
        {
            // number of learning samples
            int samples = data.GetLength(0);
            // data transformation factor
            double yFactor = 1.7 / chart.RangeY.Length;
            double yMin = chart.RangeY.Min;
            double xFactor = 2.0 / chart.RangeX.Length;
            double xMin = chart.RangeX.Min;

            // prepare learning data
            double[][] input = new double[samples][];
            double[][] output = new double[samples][];

            for (int i = 0; i < samples; i++)
            {
                input[i] = new double[1];
                output[i] = new double[1];

                // set input
                input[i][0] = (data[i, 0] - xMin) * xFactor - 1.0;
                // set output
                output[i][0] = (data[i, 1] - yMin) * yFactor - 0.85;
            }

            // create multi-layer neural network
            ActivationNetwork network = new ActivationNetwork(
                new BipolarSigmoidFunction(sigmoidAlphaValue),
                1, neuronsInFirstLayer, 1);
            // create teacher
            BackPropagationLearning teacher = new BackPropagationLearning(network);
            // set learning rate and momentum
            teacher.LearningRate = learningRate;
            teacher.Momentum = momentum;

            // iterations
            int iteration = 1;

            // solution array
            double[,] solution = new double[50, 2];
            double[] networkInput = new double[1];

            // calculate X values to be used with solution function
            for (int j = 0; j < 50; j++)
            {
                solution[j, 0] = chart.RangeX.Min + (double)j * chart.RangeX.Length / 49;
            }

            // loop
            while (!needToStop)
            {
                // run epoch of learning procedure
                double error = teacher.RunEpoch(input, output) / samples;

                // calculate solution
                for (int j = 0; j < 50; j++)
                {
                    networkInput[0] = (solution[j, 0] - xMin) * xFactor - 1.0;
                    solution[j, 1] = (network.Compute(networkInput)[0] + 0.85) / yFactor + yMin;
                }
                chart.UpdateDataSeries("solution", solution);
                // calculate error
                double learningError = 0.0;
                for (int j = 0, k = data.GetLength(0); j < k; j++)
                {
                    networkInput[0] = input[j][0];
                    learningError += Math.Abs(data[j, 1] - ((network.Compute(networkInput)[0] + 0.85) / yFactor + yMin));
                }

                // set current iteration's info
                UpdateTextbox(currentIterationBox, iteration.ToString());
                //currentIterationBox.Text = iteration.ToString();
                UpdateTextbox(currentErrorBox, learningError.ToString("F3"));
                //currentErrorBox.Text = learningError.ToString("F3");

                // increase current iteration
                iteration++;

                // check if we need to stop
                if ((iterations != 0) && (iteration > iterations))
                    break;
            }

            // enable settings controls
            EnableControls(true);
        }
Beispiel #28
0
        // Worker thread
        void SearchSolution()
        {
            // initialize input and output values
            double[][] input = null;
            double[][] output = null;

            if (sigmoidType == 0)
            {
                // unipolar data
                input = new double[4][] {
                                            new double[] {0, 0},
                                            new double[] {0, 1},
                                            new double[] {1, 0},
                                            new double[] {1, 1}
                                        };
                output = new double[4][] {
                                             new double[] {0},
                                             new double[] {1},
                                             new double[] {1},
                                             new double[] {0}
                                         };
            }
            else
            {
                // biipolar data
                input = new double[4][] {
                                            new double[] {-1, -1},
                                            new double[] {-1,  1},
                                            new double[] { 1, -1},
                                            new double[] { 1,  1}
                                        };
                output = new double[4][] {
                                             new double[] {-1},
                                             new double[] { 1},
                                             new double[] { 1},
                                             new double[] {-1}
                                         };
            }

            // create perceptron
            ActivationNetwork network = new ActivationNetwork(
                (sigmoidType == 0) ?
                    (IActivationFunction)new SigmoidFunction(sigmoidAlphaValue) :
                    (IActivationFunction)new BipolarSigmoidFunction(sigmoidAlphaValue),
                2, 2, 1);
            // create teacher
            BackPropagationLearning teacher = new BackPropagationLearning(network);
            // set learning rate and momentum
            teacher.LearningRate = learningRate;
            teacher.Momentum = momentum;

            // iterations
            int iteration = 1;

            // statistic files
            StreamWriter errorsFile = null;

            try
            {
                // check if we need to save statistics to files
                if (saveStatisticsToFiles)
                {
                    // open files
                    errorsFile = File.CreateText("errors.csv");
                }

                // erros list
                List<double> errorsList = new List<double>();

                // loop
                while (!needToStop)
                {
                    // run epoch of learning procedure
                    double error = teacher.RunEpoch(input, output);
                    errorsList.Add(error);

                    // save current error
                    if (errorsFile != null)
                    {
                        errorsFile.WriteLine(error);
                    }

                    // show current iteration & error
                    UpdateTextbox(currentIterationBox, iteration.ToString());
                    //currentIterationBox.Text = iteration.ToString();
                    UpdateTextbox(currentErrorBox, error.ToString());
                    //currentErrorBox.Text = error.ToString();

                    iteration++;

                    // check if we need to stop
                    if (error <= learningErrorLimit)
                        break;
                }

                // show error's dynamics
                double[,] errors = new double[errorsList.Count, 2];

                for (int i = 0, n = errorsList.Count; i < n; i++)
                {
                    errors[i, 0] = i;
                    errors[i, 1] = errorsList[i];
                }

                errorChart.RangeX = new DoubleRange(0, errorsList.Count - 1);
                errorChart.UpdateDataSeries("error", errors);
            }
            catch (IOException)
            {
                MessageBox.Show("Failed writing file", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
            }
            finally
            {
                // close files
                if (errorsFile != null)
                    errorsFile.Close();
            }

            // enable settings controls
            EnableControls(true);
        }
        /// <summary>
        ///   Gets the number of parameters in a network.
        /// </summary>
        private static int getNumberOfParameters(ActivationNetwork network)
        {
            int sum = 0;

            for (int i = 0; i < network.Layers.Length; i++)
            {
                for (int j = 0; j < network.Layers[i].Neurons.Length; j++)
                {
                    // number of weights plus the bias value
                    sum += network.Layers[i].Neurons[j].InputsCount + 1;
                }
            }
            return sum;
        }
        /// <summary>
        ///   Initializes a new instance of the <see cref="LevenbergMarquardtLearning"/> class.
        /// </summary>
        /// 
        /// <param name="network">Network to teach.</param>
        /// <param name="useRegularization">True to use bayesian regularization, false otherwise.</param>
        /// <param name="method">The method by which the Jacobian matrix will be calculated.</param>
        /// 
        public LevenbergMarquardtLearning(ActivationNetwork network, bool useRegularization, JacobianMethod method)
        {
            this.network = network;
            this.numberOfParameters = getNumberOfParameters(network);
            this.outputCount = network.Layers[network.Layers.Length - 1].Neurons.Length;

            this.useBayesianRegularization = useRegularization;
            this.method = method;

            this.weights = new float[numberOfParameters];
            this.hessian = new float[numberOfParameters][];
            for (int i = 0; i < hessian.Length; i++)
                hessian[i] = new float[numberOfParameters];
            this.diagonal = new float[numberOfParameters];
            this.gradient = new float[numberOfParameters];
            this.jacobian = new float[numberOfParameters][];


            // Will use backpropagation method for Jacobian computation
            if (method == JacobianMethod.ByBackpropagation)
            {
                // create weight derivatives arrays
                this.weightDerivatives = new float[network.Layers.Length][][];
                this.thresholdsDerivatives = new float[network.Layers.Length][];

                // initialize arrays
                for (int i = 0; i < network.Layers.Length; i++)
                {
                    ActivationLayer layer = (ActivationLayer)network.Layers[i];

                    this.weightDerivatives[i] = new float[layer.Neurons.Length][];
                    this.thresholdsDerivatives[i] = new float[layer.Neurons.Length];

                    for (int j = 0; j < layer.Neurons.Length; j++)
                        this.weightDerivatives[i][j] = new float[layer.InputsCount];
                }
            }
            else // Will use finite difference method for Jacobian computation
            {
                // create differential coefficient arrays
                this.differentialCoefficients = createCoefficients(3);
                this.derivativeStepSize = new double[numberOfParameters];

                // initialize arrays
                for (int i = 0; i < numberOfParameters; i++)
                    this.derivativeStepSize[i] = derivativeStep;
            }
        }