Пример #1
0
        public NeutralNetwork(double[][] input, double[][] output, double[][] testInput, double[][] testOutput)
        {
            var network = new DeepBeliefNetwork(28 * 28, new int[] { 1000, 10 });

            new GaussianWeights(network).Randomize();
            network.UpdateVisibleWeights();

            var teacher = new DeepNeuralNetworkLearning(network)
            {
                Algorithm  = (ann, i) => new ParallelResilientBackpropagationLearning(ann),
                LayerIndex = network.Machines.Count - 1,
            };
            var layerData = teacher.GetLayerInput(input);

            for (int i = 0; i < 5000; i++)
            {
                teacher.RunEpoch(layerData, output);
            }
            network.UpdateVisibleWeights();
            var inputArr = new double[28 * 28];

            for (int i = 0; i < 28 * 28; i++)
            {
                inputArr[i] = testInput[0][i];
            }

            var a = network.Compute(testInput[0]);

            Console.WriteLine(Array.IndexOf(a, a.Max()));
        }
Пример #2
0
        public override void Create(int input, int layers, int neurons, int output)
        {
            IStochasticFunction function = new GaussianFunction();

            //Setup network
            switch (layers)
            {
            case 1:
                DeepAccordNetwork = new DeepBeliefNetwork(function, input, neurons, output);     //Activation function, input, hidden, hidden, output.
                break;

            case 2:
                DeepAccordNetwork = new DeepBeliefNetwork(function, input, neurons, neurons, output);     //Activation function, input, hidden, hidden, output.
                break;

            case 3:
                DeepAccordNetwork = new DeepBeliefNetwork(function, input, neurons, neurons, neurons, output);     //Activation function, input, hidden, hidden, output.
                break;

            case 4:
                DeepAccordNetwork = new DeepBeliefNetwork(function, input, neurons, neurons, neurons, neurons, output);     //Activation function, input, hidden, hidden, output.
                break;

            case 5:
                DeepAccordNetwork = new DeepBeliefNetwork(function, input, neurons, neurons, neurons, neurons, neurons, output);     //Activation function, input, hidden, hidden, output.
                break;
            }

            new GaussianWeights(DeepAccordNetwork, 0.1).Randomize();
            DeepAccordNetwork.UpdateVisibleWeights();
        }
Пример #3
0
        /// <summary>
        /// Override the Epoch() method
        /// </summary>
        /// <param name="trainingSet"></param>
        /// <param name="modalitiesMeanSquarredError"></param>
        /// <param name="globalMeanSquarred"></param>
        public override void Epoch(List <Dictionary <Signal, double[, ]> > trainingSet, out Dictionary <Signal, double> modalitiesMeanSquarredError, out double globalMeanSquarred)
        {
            //Convert to Accord format
            double[][] samples = new double[trainingSet.Count][];
            for (int i = 0; i < trainingSet.Count; i++)
            {
                samples[i] = concatenateTrainingSample(trainingSet[i]);
            }

            for (int _layerIndex = 0; _layerIndex < network.Layers.Length; _layerIndex++)
            {
                //Create the teacher for the layer
                teacher = new DeepBeliefNetworkLearning(network)
                {
                    Algorithm = (h, v, i) => new ContrastiveDivergenceLearning(h, v)
                    {
                        LearningRate = learningRate,
                        Momentum     = momentum,
                        Decay        = weightDecay,
                    },

                    LayerIndex = _layerIndex,
                };

                //Run the epoch for the layer
                double[][] sampleForLayer = teacher.GetLayerInput(samples);
                teacher.RunEpoch(sampleForLayer);
            }
            network.UpdateVisibleWeights();

            //Run manually a base class epoch to have the exact same error measurement as other algos
            learningLocked = true;
            base.Epoch(trainingSet, out modalitiesMeanSquarredError, out globalMeanSquarred);
            learningLocked = false;
        }
Пример #4
0
 public AccordNetwork()
 {
     network = new DeepBeliefNetwork(new BernoulliFunction(), inputLength, 1200, 600, 2);
     new NguyenWidrow(network).Randomize();
     network.UpdateVisibleWeights();
     unsuperVisedTeacher = GetUnsupervisedTeacherForNetwork(network);
     supervisedTeacher   = GetSupervisedTeacherForNetwork(network);
 }
Пример #5
0
        public static DeepBeliefNetwork CreateAndGetNetwork(int inputsCount)
        {
            DeepBeliefNetwork network = new DeepBeliefNetwork(inputsCount, hiddenNeurons, hiddenNeurons);

            new GaussianWeights(network, stdDev).Randomize();
            network.UpdateVisibleWeights();

            return(network);
        }
Пример #6
0
        public void TrainNetwork(double[][] input, double[][] output, int epochNumber = 5000)
        {
            var layerData = _teacher.GetLayerInput(input);

            for (int i = 0; i < epochNumber; i++)
            {
                _teacher.RunEpoch(layerData, output);
            }
            _network.UpdateVisibleWeights();
        }
Пример #7
0
    // Use this for initialization
    void Start()
    {
        // Setup the deep belief network and initialize with random weights.
        DeepBeliefNetwork network = new DeepBeliefNetwork(20, 10, 10);

        new GaussianWeights(network, 0.1).Randomize();
        network.UpdateVisibleWeights();

        Debug.Log("update");

        Debug.Log(network.Layers.Count());
    }
Пример #8
0
        private static DeepBeliefNetwork CreateNetworkToTeach(TrainerConfiguration configuration)
        {
            var inputs = configuration.InputsOutputsData.Inputs;

            // Setup the deep belief network and initialize with random weights.
            var network = new DeepBeliefNetwork(inputs.First().Length, configuration.Layers);

            new GaussianWeights(network).Randomize();
            network.UpdateVisibleWeights();

            return(network);
        }
Пример #9
0
        public NeutralNetwork()
        {
            _network = new DeepBeliefNetwork(784, new int[] { 1000, 10 });
            new GaussianWeights(_network).Randomize();
            _network.UpdateVisibleWeights();

            _teacher = new DeepNeuralNetworkLearning(_network)
            {
                Algorithm  = (ann, i) => new ParallelResilientBackpropagationLearning(ann),
                LayerIndex = _network.Machines.Count - 1,
            };
        }
        public double Learn(double[][] learnData, int[] learnLabel, double[][] testData, int[] testLabel)
        {
            Log.Write(this.GetType(), "Begin Learning");

            int nInputs  = learnData[0].Length;
            int nOutputs = learnLabel.Distinct().Count();

            double[][] labelDoubles = DictionaryTools.IntToDoubles(nOutputs, learnLabel);

            int[] layers =
            {
                nOutputs * 2, nOutputs
            };

            _neuralNetwork = new DeepBeliefNetwork(nInputs, layers);
            new GaussianWeights(_neuralNetwork).Randomize();
            _neuralNetwork.UpdateVisibleWeights();

            BackPropagationLearning learning = new BackPropagationLearning(_neuralNetwork);

            List <double> errorList = new List <double>();
            int           counter   = 1;

            while (true)
            {
                double error    = learning.RunEpoch(learnData, labelDoubles);
                double tmpError = 0;
                if (errorList.Count > 0)
                {
                    tmpError = errorList.Last();
                }
                errorList.Add(error);

                if (counter % 10 == 0)
                {
                    Log.Write(this.GetType(), $"Iteration {counter} | Score {Score(testData, testLabel)} | Error {error}");
                }

                if (Math.Abs(errorList.Last() - tmpError) < 0.01)
                {
                    break;
                }

                counter++;
            }

            double scoreResult = Score(testData, testLabel);

            Log.Write(this.GetType(), $"Final Score {scoreResult}");
            Log.Write(this.GetType(), "End Learning");

            return(scoreResult);
        }
Пример #11
0
        private static DeepBeliefNetwork createNetwork(double[][] inputs)
        {
            DeepBeliefNetwork network = new DeepBeliefNetwork(6, 2, 1);

            network.Machines[0].Hidden.Neurons[0].Weights[0] = 0.00461421;
            network.Machines[0].Hidden.Neurons[0].Weights[1] = 0.04337112;
            network.Machines[0].Hidden.Neurons[0].Weights[2] = -0.10839599;
            network.Machines[0].Hidden.Neurons[0].Weights[3] = -0.06234004;
            network.Machines[0].Hidden.Neurons[0].Weights[4] = -0.03017057;
            network.Machines[0].Hidden.Neurons[0].Weights[5] = 0.09520391;
            network.Machines[0].Hidden.Neurons[0].Threshold  = 0;

            network.Machines[0].Hidden.Neurons[1].Weights[0] = 0.08263872;
            network.Machines[0].Hidden.Neurons[1].Weights[1] = -0.118437;
            network.Machines[0].Hidden.Neurons[1].Weights[2] = -0.21710971;
            network.Machines[0].Hidden.Neurons[1].Weights[3] = 0.02332903;
            network.Machines[0].Hidden.Neurons[1].Weights[4] = 0.00953116;
            network.Machines[0].Hidden.Neurons[1].Weights[5] = 0.09870652;
            network.Machines[0].Hidden.Neurons[1].Threshold  = 0;

            network.Machines[0].Visible.Neurons[0].Threshold = 0;
            network.Machines[0].Visible.Neurons[1].Threshold = 0;
            network.Machines[0].Visible.Neurons[2].Threshold = 0;
            network.Machines[0].Visible.Neurons[3].Threshold = 0;
            network.Machines[0].Visible.Neurons[4].Threshold = 0;
            network.Machines[0].Visible.Neurons[5].Threshold = 0;

            network.UpdateVisibleWeights();


            DeepBeliefNetworkLearning target = new DeepBeliefNetworkLearning(network)
            {
                Algorithm = (h, v, i) => new ContrastiveDivergenceLearning(h, v)
            };

            for (int layer = 0; layer < 2; layer++)
            {
                target.LayerIndex = layer;

                double[][] layerInputs = target.GetLayerInput(inputs);

                int      iterations = 5000;
                double[] errors     = new double[iterations];
                for (int i = 0; i < iterations; i++)
                {
                    errors[i] = target.RunEpoch(layerInputs);
                }
            }

            return(network);
        }
    /// <summary>
    /// Machine Learning
    /// </summary>
    private void Training()
    {
        // Creating Network
        network = new DeepBeliefNetwork(
            new GaussianFunction(),                      // Activation function
            inputsCount: 4,                              // Input degree
            hiddenNeurons: new int[] { 1 });             // Output degree

        // Initialize the network weight with gaussian distribution
        new GaussianWeights(network).Randomize();
        network.UpdateVisibleWeights();

        // Creating DBN Learning Algorithm
        var teacher = new PerceptronLearning(network);

        // Start learning. Do it for 1000 times.
        for (int i = 0; i < 1000; i++)
        {
            teacher.RunEpoch(inputs, outputs);
        }

        // 重みの更新
        network.UpdateVisibleWeights();
    }
        public void Initialize(int inputCount, int hiddenLayerNodes, double usLR = 0.8, double usM = 0.8, double usD = 0.9, double sLR = 0.2, double sM = 0.21)
        {
            IsReady          = false;
            HiddenLayerNodes = hiddenLayerNodes;
            unsupervisedLR   = usLR;
            unsupervisedM    = usM;
            unsupervisedD    = usD;
            supervisedLR     = sLR;
            supervisedM      = sM;

            network = new DeepBeliefNetwork(inputCount, HiddenLayerNodes, 1);
            new GaussianWeights(network, 0.1).Randomize();
            network.UpdateVisibleWeights();

            CreateTeachers(network);
        }
Пример #14
0
        public MainViewModel()
        {
            // Create settings for Optidigits dataset
            Network = new DeepBeliefNetwork(new BernoulliFunction(), 1024, 50, 10);

            Database = new Optdigits()
            {
                IsNormalized = false
            };

            new GaussianWeights(Network).Randomize();
            Network.UpdateVisibleWeights();


            Learn    = new LearnViewModel(this);
            Use      = new UseViewModel(this);
            Dream    = new DreamViewModel(this);
            Discover = new DiscoverViewModel(this);

            NewLayerNeurons = 10;
        }
Пример #15
0
        public void train()
        {
            network = new DeepBeliefNetwork(inputsCount: inputs.Length,
                                            hiddenNeurons: new int[] { 4, outputs[0].Length });     // 隠れ層と出力層の次元
            // DNNの学習アルゴリズムの生成
            var teacher = new DeepNeuralNetworkLearning(network)
            {
                Algorithm  = (ann, i) => new ParallelResilientBackpropagationLearning(ann),
                LayerIndex = network.Machines.Count - 1
            };

            // 5000回学習
            var layerData = teacher.GetLayerInput(inputs);

            for (int i = 0; i < 5000; i++)
            {
                teacher.RunEpoch(layerData, outputs);
            }

            // 重みの更新
            network.UpdateVisibleWeights();
        }
Пример #16
0
        public static unsafe DeepBeliefNetwork Train(ref double[][] inputs, ref bool[] outputsClasses)
        {
            double[][] outputs = (from output in outputsClasses
                                  select output == true ? new double[] { 1, 0 } : new double[] { 0, 1 }).ToArray();

            DeepBeliefNetwork network = new DeepBeliefNetwork(inputsCount: inputs.Length,
                                                              hiddenNeurons: new int[] { 250, 200, 200, 25 });
            var teacher = new DeepNeuralNetworkLearning(network)
            {
                Algorithm  = (ann, i) => new ParallelResilientBackpropagationLearning(ann),
                LayerIndex = network.Machines.Count - 1
            };

            var layerData = teacher.GetLayerInput(inputs);

            for (int i = 0; i < 5000; i++)
            {
                teacher.RunEpoch(layerData, outputs);
            }
            network.UpdateVisibleWeights();
            return(network);
        }
Пример #17
0
        public override void addModality(Signal s, string label = null)
        {
            base.addModality(s, label);

            //Each time we add a modality the structure of the network changes

            //int[] wholeStructure = new int[hiddenLayers.Length + 2];
            //wholeStructure[0] = inputCount;
            //for (int i = 0; i < hiddenLayers.Length; i++)
            //    wholeStructure[i + 1] = hiddenLayers[i];
            //wholeStructure[hiddenLayers.Length+1] = inputCount;

            //int[] wholeStructure = new int[hiddenLayers.Length + 1];
            //wholeStructure[0] = inputCount;
            //for (int i = 0; i < hiddenLayers.Length; i++)
            //    wholeStructure[i + 1] = hiddenLayers[i];
            //wholeStructure[hiddenLayers.Length] = inputCount;

            network = new DeepBeliefNetwork(new BernoulliFunction(), InputCount, hiddenLayers);

            new GaussianWeights(network).Randomize();
            network.UpdateVisibleWeights();
        }
Пример #18
0
        public MainViewModel()
        {
            Network = new DeepBeliefNetwork(_activationFunction, 1024, 50, 10);

            Database = new OptdigitsDatabase()
            {
                IsNormalized = false
            };

            new GaussianWeights(Network).Randomize();
            Network.UpdateVisibleWeights();
            NewLayerNeurons = 10;

            ProcessCommand    = new RelayCommand(LearnHandler, () => Learn.CanStart);
            StartLearnCommand = new RelayCommand(StartLearnHandler);
            PauseLearnCommand = new RelayCommand(PauseLearnHandler);
            ResetLearnCommand = new RelayCommand(ResetLearnHandler);
            ComputeCommand    = new RelayCommand(ComputHandler);

            Use   = new UseViewModel(this);
            Learn = new LearnViewModel(this);
            Learn.OpenDatabase();
        }
Пример #19
0
        /// <summary>
        /// 指定した環境で学習を行う
        /// </summary>
        /// <param name="environments">環境のリスト</param>
        /// <param name="trainCount">学習回数</param>
        /// <returns></returns>
        public DeepBeliefNetwork Train(Environment[] environments, int trainCount)
        {
            var network = new DeepBeliefNetwork(
                _simulator.NumInputDimensions,                            // 入力層の次元数
                hiddenNeurons: new [] { 1, _simulator.NumOutputDimensions } // ネットワークの次元数 (隠れ層, 出力層)
                );
            var teacher = CreateTeacher(network);

            foreach (var env in environments)
            {
                var result = _simulator.Simulate(env);

                var data = teacher.GetLayerInput(result.Inputs);
                while (trainCount-- > 0)
                {
                    teacher.RunEpoch(data, result.Outputs);
                }

                network.UpdateVisibleWeights();
            }

            return(network);
        }
Пример #20
0
        static void Main(string[] args)
        {
            double[][] inputs;
            double[][] outputs;
            double[][] testInputs;
            double[][] testOutputs;

            const int SampleTrainingCount = 120;
            const int SampleTestCount     = 30;

            // Load ascii digits dataset.
            inputs = DataManager.LoadCSV(@"../../../data/iris.data", out outputs);
            //inputs = DataManager.Load(@"../../../data/data.txt", out outputs);

            // The first SampleTrainingCount data rows will be for training. The rest will be for testing.
            testInputs  = inputs.Skip(SampleTrainingCount).ToArray();
            testOutputs = outputs.Skip(SampleTrainingCount).ToArray();
            inputs      = inputs.Take(SampleTrainingCount).ToArray();
            outputs     = outputs.Take(SampleTrainingCount).ToArray();

            // Setup the deep belief network and initialize with random weights.
            DeepBeliefNetwork network = new DeepBeliefNetwork(inputs.First().Length, 10, 1);

            new GaussianWeights(network, 0.1).Randomize();
            network.UpdateVisibleWeights();

            // Setup the learning algorithm.
            DeepBeliefNetworkLearning teacher = new DeepBeliefNetworkLearning(network)
            {
                Algorithm = (h, v, i) => new ContrastiveDivergenceLearning(h, v)
                {
                    LearningRate = 0.1,
                    Momentum     = 0.5,
                    Decay        = 0.001,
                }
            };

            // Setup batches of input for learning.
            int batchCount = Math.Max(1, inputs.Length / 100);

            // Create mini-batches to speed learning.
            int[]        groups  = Accord.Statistics.Tools.RandomGroups(inputs.Length, batchCount);
            double[][][] batches = inputs.Subgroups(groups);
            // Learning data for the specified layer.
            double[][][] layerData;

            // Unsupervised learning on each hidden layer, except for the output layer.
            for (int layerIndex = 0; layerIndex < network.Machines.Count - 1; layerIndex++)
            {
                teacher.LayerIndex = layerIndex;
                layerData          = teacher.GetLayerInput(batches);
                for (int i = 0; i < 200; i++)
                {
                    double error = teacher.RunEpoch(layerData) / inputs.Length;
                    if (i % 10 == 0)
                    {
                        Console.WriteLine(i + ", Error = " + error);
                    }
                }
            }

            // Supervised learning on entire network, to provide output classification.
            var teacher2 = new BackPropagationLearning(network)
            {
                LearningRate = 0.1,
                Momentum     = 0.5
            };

            // Run supervised learning.
            for (int i = 0; i < SampleTrainingCount; i++)
            {
                double error = teacher2.RunEpoch(inputs, outputs) / inputs.Length;
                if (i % 10 == 0)
                {
                    Console.WriteLine(i + ", Error = " + error);
                }
            }

            // Test the resulting accuracy. SampleTestCount item
            int correct = 0;

            for (int i = 0; i < SampleTestCount; i++)
            {
                double[] outputValues = network.Compute(testInputs[i]);
                if (DataManager.FormatOutputResult(outputValues) == DataManager.FormatOutputResult(testOutputs[i]))
                {
                    correct++;
                }
            }

            Console.WriteLine("Correct " + correct + "/" + SampleTestCount + ", " + Math.Round(((double)correct / (double)SampleTestCount * 100), 2) + "%");
            Console.Write("Press any key to quit ..");
            Console.ReadKey();
        }
Пример #21
0
        public void ExampleTest1()
        {
            Accord.Math.Tools.SetupGenerator(0);

            // We'll use a simple XOR function as input.

            double[][] inputs =
            {
                new double[] { 0, 0 }, // 0 xor 0
                new double[] { 0, 1 }, // 0 xor 1
                new double[] { 1, 0 }, // 1 xor 0
                new double[] { 1, 1 }, // 1 xor 1
            };

            // XOR output, corresponding to the input.
            double[][] outputs =
            {
                new double[] { 0 }, // 0 xor 0 = 0
                new double[] { 1 }, // 0 xor 1 = 1
                new double[] { 1 }, // 1 xor 0 = 1
                new double[] { 0 }, // 1 xor 1 = 0
            };

            // Setup the deep belief network (2 inputs, 3 hidden, 1 output)
            DeepBeliefNetwork network = new DeepBeliefNetwork(2, 3, 1);

            // Initialize the network with Gaussian weights
            new GaussianWeights(network, 0.1).Randomize();

            // Update the visible layer with the new weights
            network.UpdateVisibleWeights();


            // Setup the learning algorithm.
            DeepBeliefNetworkLearning teacher = new DeepBeliefNetworkLearning(network)
            {
                Algorithm = (h, v, i) => new ContrastiveDivergenceLearning(h, v)
                {
                    LearningRate = 0.1,
                    Momentum     = 0.5,
                    Decay        = 0.001,
                }
            };



            // Unsupervised learning on each hidden layer, except for the output.
            for (int i = 0; i < network.Layers.Length - 1; i++)
            {
                teacher.LayerIndex = i;

                // Compute the learning data with should be used
                var layerInput = teacher.GetLayerInput(inputs);

                // Train the layer iteratively
                for (int j = 0; j < 5000; j++)
                {
                    teacher.RunEpoch(layerInput);
                }
            }



            // Supervised learning on entire network, to provide output classification.
            var backpropagation = new BackPropagationLearning(network)
            {
                LearningRate = 0.1,
                Momentum     = 0.5
            };

            // Run supervised learning.
            for (int i = 0; i < 5000; i++)
            {
                backpropagation.RunEpoch(inputs, outputs);
            }


            // Test the resulting accuracy.
            int correct = 0;

            for (int i = 0; i < inputs.Length; i++)
            {
                double[] outputValues = network.Compute(inputs[i]);
                double   outputResult = outputValues.First() >= 0.5 ? 1 : 0;

                if (outputResult == outputs[i].First())
                {
                    correct++;
                }
            }

            Assert.AreEqual(4, correct);
        }
        private void train_Click(object sender, EventArgs e)
        {
            double[][] inputs;
            double[][] outputs;
            double[][] testInputs;
            double[][] testOutputs;
            GetData(out inputs, out outputs, out testInputs, out testOutputs);

            Stopwatch sw = Stopwatch.StartNew();

            // Setup the deep belief network and initialize with random weights.
            _network = new DeepBeliefNetwork(inputs.First().Length, LAYERS);
            new GaussianWeights(_network, 0.1).Randomize();
            _network.UpdateVisibleWeights();

            // Setup the learning algorithm.
            DeepBeliefNetworkLearning teacher = new DeepBeliefNetworkLearning(_network)
            {
                Algorithm = (h, v, i) => new ContrastiveDivergenceLearning(h, v)
                {
                    LearningRate = 0.1,
                    Momentum     = 0.5,
                    Decay        = 0.001,
                }
            };

            // Setup batches of input for learning.
            int batchCount = Math.Max(1, inputs.Length / 100);

            // Create mini-batches to speed learning.
            int[]        groups  = Accord.Statistics.Tools.RandomGroups(inputs.Length, batchCount);
            double[][][] batches = inputs.Subgroups(groups);
            // Learning data for the specified layer.
            double[][][] layerData;

            // Unsupervised learning on each hidden layer, except for the output layer.
            for (int layerIndex = 0; layerIndex < _network.Machines.Count - 1; layerIndex++)
            {
                teacher.LayerIndex = layerIndex;
                layerData          = teacher.GetLayerInput(batches);
                for (int i = 0; i < UNSUPERVISED_EPOCHS; i++)
                {
                    double error = teacher.RunEpoch(layerData) / inputs.Length;
                    if (i % 10 == 0)
                    {
                        label1.Text = "Layer: " + layerIndex + " Epoch: " + i + ", Error: " + error;
                        label1.Refresh();
                    }
                }
            }

            // Supervised learning on entire network, to provide output classification.
            var teacher2 = new BackPropagationLearning(_network)
            {
                LearningRate = 0.1,
                Momentum     = 0.5
            };

            // Run supervised learning.
            for (int i = 0; i < SUPERVISED_EPOCHS; i++)
            {
                double error = teacher2.RunEpoch(inputs, outputs) / inputs.Length;
                if (i % 10 == 0)
                {
                    label1.Text = "Supervised: " + i + ", Error = " + error;
                    label1.Refresh();
                }
            }

            // Test the resulting accuracy.
            label1.Text = "";
            int correct = 0;

            for (int i = 0; i < testInputs.Length; i++)
            {
                double[] outputValues = _network.Compute(testInputs[i]);
                int      y            = GetResult(outputValues);
                int      t            = GetResult(testOutputs[i]);
                label1.Text += "predicted: " + y + " actual: " + t + "\n";
                label1.Refresh();
                if (y == t)
                {
                    correct++;
                }
            }
            sw.Stop();

            label1.Text  = "Correct " + correct + "/" + testInputs.Length + ", " + Math.Round(((double)correct / (double)testInputs.Length * 100), 2) + "%";
            label1.Text += "\nElapsed train+test time: " + sw.Elapsed;
            label1.Refresh();
        }
Пример #23
0
        public static void Excute2()
        {
            double[][] inputs;
            double[][] outputs;
            double[][] testInputs;
            double[][] testOutputs;

            // Load ascii digits dataset.
            inputs = DataManager.Load(@"data.txt", out outputs);

            // The first 500 data rows will be for training. The rest will be for testing. 第一个500数据用来训练,剩下的用来测试
            testInputs  = inputs.Skip(500).ToArray();
            testOutputs = outputs.Skip(500).ToArray();
            inputs      = inputs.Take(500).ToArray();
            outputs     = outputs.Take(500).ToArray();

            // Setup the deep belief network and initialize with random weights. 设置深度神经网络和初始化随机砝码
            DeepBeliefNetwork network = new DeepBeliefNetwork(inputs.First().Length, 10, 10); //网络的输入数量Length  每个层中隐藏的神经元的数量10, 10

            new GaussianWeights(network, 0.1).Randomize();                                    //高斯砝码 使用标准偏差。一般值在0.001—0.1范围内。 默认值为0.1。
            //Randomize 使用高斯分布的网络的权重
            network.UpdateVisibleWeights();                                                   //通过复制隐藏层中权重的反向来更新可见层的权重。

            // Setup the learning algorithm. 设置学习法则
            DeepBeliefNetworkLearning teacher = new DeepBeliefNetworkLearning(network);

            //自定义神经网络法则
            // 设置用于指定和创建深度网络的每个层的学习算法的配置函数。Algorithm
            teacher.Algorithm = (h, v, i) => {
                return(new ContrastiveDivergenceLearning(h, v)
                {
                    LearningRate = 0.1, //学习速率
                    Momentum = 0.5,     //动力
                    Decay = 0.001,      //腐烂
                });
            };

            //teacher.Algorithm = (h, v, i) => new ContrastiveDivergenceLearning(h, v)
            //{
            //    LearningRate = 0.1,
            //    Momentum = 0.5,
            //    Decay = 0.001,
            //};
            // Setup batches of input for learning.
            int batchCount = System.Math.Max(1, inputs.Length / 100);//设置学习次数

            // Create mini-batches to speed learning.
            int[]        groups  = Accord.Statistics.Classes.Random(inputs.Length, batchCount); //创建小批量 速度学习
            double[][][] batches = inputs.Separate(groups);                                     //分离
            // Learning data for the specified layer.
            double[][][] layerData;                                                             //为指定层 学习数据

            // Unsupervised learning on each hidden layer, except for the output layer.除了输出层之外,在每个隐藏层上进行无监督学习。
            //network.Machines.Count 在这个深网络的每一层上得到受限制的玻尔兹曼机器。
            for (int layerIndex = 0; layerIndex < network.Machines.Count - 1; layerIndex++)
            {
                teacher.LayerIndex = layerIndex;

                /*
                 * 获取训练数据所需的学习数据。
                 * 这个函数的返回应该被传递给No.Posial.SurvivyFieldWorksPr.RunEpoch(System,Pouth[2][])。
                 * 去实践一个学习时代。
                 */
                layerData = teacher.GetLayerInput(batches);
                for (int i = 0; i < 200; i++)//200次学习
                {
                    var    learningResult = teacher.RunEpoch(layerData);
                    double error          = learningResult / inputs.Length;//RunEpoch运行纪元  Returns sum of learning errors.
                    if (i % 10 == 0)
                    {
                        Console.WriteLine(i + ", Error = " + error);
                    }
                }
            }

            // Supervised learning on entire network, to provide output classification.对整个网络进行监督学习,提供输出分类。
            var teacher2 = new Neuro.Learning.BackPropagationLearning(network)
            {
                LearningRate = 0.1, //学习速率
                Momentum     = 0.5  //动力
            };

            // Run supervised learning.运行监督学习。
            for (int i = 0; i < 500; i++)//500次学习
            {
                double error = teacher2.RunEpoch(inputs, outputs) / inputs.Length;
                if (i % 10 == 0)
                {
                    Console.WriteLine(i + ", Error = " + error);
                }
            }

            // Test the resulting accuracy. 测试结果的准确性
            int correct = 0;

            for (int i = 0; i < inputs.Length; i++)
            {
                var cp  = testInputs[i].ToList();
                var cpn = testInputs[i + 1];
                foreach (var item in cpn)
                {
                    cp.Add(item);
                }
                double[] outputValues = network.Compute(cp.ToArray());

                //double[] outputValues = network.Compute(testInputs[i]);
                if (DataManager.FormatOutputResult(outputValues) == DataManager.FormatOutputResult(testOutputs[i]))
                {
                    correct++;
                }
            }

            Console.WriteLine("Correct " + correct + "/" + inputs.Length + ", " + System.Math.Round(((double)correct / (double)inputs.Length * 100), 2) + "%");
            Console.Write("Press any key to quit ..");
            Console.ReadKey();
        }
Пример #24
0
        public static void Learn(double[][] inputs, double[][] outputs)
        {
            var n           = (int)(count * 0.8);
            var testInputs  = inputs.Skip(n).ToArray();
            var testOutputs = outputs.Skip(n).ToArray();

            inputs  = inputs.Take(n).ToArray();
            outputs = outputs.Take(n).ToArray();

            var network = new DeepBeliefNetwork(inputs.First().Length, 10, 10);

            new GaussianWeights(network, 0.1).Randomize();
            network.UpdateVisibleWeights();

            // Setup the learning algorithm.
            var teacher = new DeepBeliefNetworkLearning(network)
            {
                Algorithm = (h, v, i) => new ContrastiveDivergenceLearning(h, v)
                {
                    LearningRate = 0.1,
                    Momentum     = 0.5,
                    Decay        = 0.001,
                }
            };
            // Setup batches of input for learning.
            int batchCount = Math.Max(1, inputs.Length / 100);

            // Create mini-batches to speed learning.
            int[]        groups  = Classes.Random(inputs.Length, batchCount);
            double[][][] batches = inputs.Subgroups(groups);
            // Learning data for the specified layer.
            double[][][] layerData;

            // Unsupervised learning on each hidden layer, except for the output layer.
            for (int layerIndex = 0; layerIndex < network.Machines.Count - 1; layerIndex++)
            {
                teacher.LayerIndex = layerIndex;
                layerData          = teacher.GetLayerInput(batches);
                for (int i = 0; i < 200; i++)
                {
                    double error = teacher.RunEpoch(layerData) / inputs.Length;
                    if (i % 10 == 0)
                    {
                        Console.WriteLine(i + ", Error = " + error);
                    }
                }
            }


            // Supervised learning on entire network, to provide output classification.
            var teacher2 = new BackPropagationLearning(network)
            {
                LearningRate = 0.1,
                Momentum     = 0.5
            };

            // Run supervised learning.
            for (int i = 0; i < n; i++)
            {
                double error = teacher2.RunEpoch(inputs, outputs) / inputs.Length;
                if (i % 10 == 0)
                {
                    Console.WriteLine(i + ", Error = " + error);
                }
            }

            // Test the resulting accuracy.
            int correct = 0;

            for (int i = 0; i < testInputs.Length; i++)
            {
                double[] outputValues = network.Compute(testInputs[i]);
                if (Compare(outputValues, testOutputs[i]))
                {
                    correct++;
                }
            }
            network.Save("deeplearning-countbits.net");
            Console.WriteLine("Correct " + correct + "/" + testInputs.Length + ", " + Math.Round(((double)correct / (double)testInputs.Length * 100), 2) + "%");
        }
Пример #25
0
        static double Neural_Network(bool show)
        {
            double       error      = new double();
            DataTable    entireData = DataController.MakeDataTable("../../drug_consumption.txt");
            Codification codebook   = new Codification(entireData);
            //"Alcohol", "Amfet", !!"Amyl", "Benzos", "Cofeine", "Cannabis", "Chocolate", "Coke", (1)"Crac", ///"Ecstasy", !!"Heroine",
            //    !!"Ketamine", //"LegalH", "LSD", !!"Meth", //"Mushrooms", "Nicotine", lol "Semeron", "VSA"
            string LookingFor = "Heroine";
            int    good       = 0;

            string[][] outputs;
            string[][] inputs = DataController.MakeString("../../drug_consumption_500.txt", out outputs);
            string[][] testOutputs;
            string[][] testInputs = DataController.MakeString("../../drug_consumption_500.txt", out testOutputs);

            DataTable outputs1     = DataController.MakeDataFromString(outputs, "output");
            DataTable inputs1      = DataController.MakeDataFromString(inputs, "input");
            DataTable testOutputs1 = DataController.MakeDataFromString(testOutputs, "output");
            DataTable testInputs1  = DataController.MakeDataFromString(testInputs, "input");

            DataTable Isymbols  = codebook.Apply(inputs1);
            DataTable Osymbols  = codebook.Apply(outputs1);
            DataTable TIsymbols = codebook.Apply(testInputs1);
            DataTable TOsymbols = codebook.Apply(testOutputs1);

            double[][] inputsD  = Isymbols.ToJagged <double>("Age", "Gender", "Education", "Country", "Eticnity", "Nscore", "Escore", "Oscore", "Ascore", "Cscore", "Impulsive", "SS");
            double[][] outputsD = Osymbols.ToJagged <double>(LookingFor);
            outputsD = DataController.convertDT(outputsD);
            double[][] inputsT  = TIsymbols.ToJagged <double>("Age", "Gender", "Education", "Country", "Eticnity", "Nscore", "Escore", "Oscore", "Ascore", "Cscore", "Impulsive", "SS");
            double[][] outputsT = TOsymbols.ToJagged <double>(LookingFor);
            outputsT = DataController.convertDT(outputsT);

            DeepBeliefNetwork network = new DeepBeliefNetwork(inputs.First().Length, 10, 7);

            new GaussianWeights(network, 0.1).Randomize();
            network.UpdateVisibleWeights();
            DeepBeliefNetworkLearning FirstLearner = new DeepBeliefNetworkLearning(network)
            {
                Algorithm = (h, v, i) => new ContrastiveDivergenceLearning(h, v)
                {
                    LearningRate = 0.1,
                    Momentum     = 0.5,
                    Decay        = 0.001,
                }
            };

            int batchCount = Math.Max(1, inputs.Length / 100);

            int[]        groupsNew  = Accord.Statistics.Classes.Random(inputsD.Length, batchCount);
            double[][][] batchesNew = Accord.Statistics.Classes.Separate(inputsD, groupsNew);
            double[][][] layerData;

            for (int layerIndex = 0; layerIndex < network.Machines.Count - 1; layerIndex++)
            {
                FirstLearner.LayerIndex = layerIndex;
                layerData = FirstLearner.GetLayerInput(batchesNew);
                for (int i = 0; i < 500; i++)
                {
                    error = FirstLearner.RunEpoch(layerData) / inputsD.Length;
                    if (i % 10 == 0 && show == true)
                    {
                        Console.WriteLine("Error value(" + LookingFor + ", test: " + i + ") = " + error);
                    }
                }
            }

            var SecondLearner = new BackPropagationLearning(network)
            {
                LearningRate = 0.15,
                Momentum     = 0.7
            };
            EvolutionaryLearning teacher = new EvolutionaryLearning(network, 100);

            for (int i = 0; i < 800; i++)
            {
                error = teacher.RunEpoch(inputsD, outputsD) / inputsD.Length;
                if (i % 50 == 0 && show == true)
                {
                    Console.WriteLine("Error value(" + LookingFor + ", test: " + i + ") = " + error);
                }
            }

            for (int i = 0; i < 800; i++)
            {
                error = SecondLearner.RunEpoch(inputsD, outputsD) / inputsD.Length;
                if (i % 10 == 0 && show == true)
                {
                    Console.WriteLine("Error value(" + LookingFor + ", test: " + i + ") = " + error);
                }
            }

            for (int i = 0; i < inputsD.Length; i++)
            {
                double[] outputValues = network.Compute(inputsT[i]);
                if (outputValues.ToList().IndexOf(outputValues.Max()) == outputsT[i].ToList().IndexOf(outputsT[i].Max()))
                {
                    good++;
                }
            }
            if (show == true)
            {
                Console.WriteLine("Poprawność - " + Math.Round(((double)good / (double)inputsD.Length * 100), 4) + "%");
                Console.ReadKey();
            }

            return(error);
        }
Пример #26
0
        public static void test()
        {
            //double[][] inputs;
            //double[][] outputs;
            //double[][] testInputs;
            //double[][] testOutputs;

            //// Load ascii digits dataset.
            //inputs = DataManager.Load(@"../../../data/data.txt", out outputs);

            //// The first 500 data rows will be for training. The rest will be for testing.
            //testInputs = inputs.Skip(500).ToArray();
            //testOutputs = outputs.Skip(500).ToArray();
            //inputs = inputs.Take(500).ToArray();
            //outputs = outputs.Take(500).ToArray();
            //double[][] inputs = new double[4][] {
            //    new double[] {0, 0}, new double[] {0, 1},
            //    new double[] {1, 0}, new double[] {1, 1}
            //};
            //double[][] outputs = new double[4][] {
            //    new double[] {1, 0}, new double[] {0, 1},
            //    new double[] {0, 1}, new double[] {1, 0}
            //};

            double[][] inputs =
            {
                //               input         output
                new double[] { 0, 1, 1, 0 }, //  0
                new double[] { 0, 1, 0, 0 }, //  0
                new double[] { 0, 0, 1, 0 }, //  0
                new double[] { 0, 1, 1, 0 }, //  0
                new double[] { 0, 1, 0, 0 }, //  0
                new double[] { 1, 0, 0, 0 }, //  1
                new double[] { 1, 0, 0, 0 }, //  1
                new double[] { 1, 0, 0, 1 }, //  1
                new double[] { 0, 0, 0, 1 }, //  1
                new double[] { 0, 0, 0, 1 }, //  1
                new double[] { 1, 1, 1, 1 }, //  2
                new double[] { 1, 0, 1, 1 }, //  2
                new double[] { 1, 1, 0, 1 }, //  2
                new double[] { 0, 1, 1, 1 }, //  2
                new double[] { 1, 1, 1, 1 }, //  2
            };

            double[][] outputs = // those are the class labels
            {
                new double[] { 1, 0, 0 },
                new double[] { 1, 0, 0 },
                new double[] { 1, 0, 0 },
                new double[] { 1, 0, 0 },
                new double[] { 1, 0, 0 },
                new double[] { 0, 1, 0 },
                new double[] { 0, 1, 0 },
                new double[] { 0, 1, 0 },
                new double[] { 0, 1, 0 },
                new double[] { 0, 1, 0 },
                new double[] { 0, 0, 1 },
                new double[] { 0, 0, 1 },
                new double[] { 0, 0, 1 },
                new double[] { 0, 0, 1 },
                new double[] { 0, 0, 1 },
            };


            // Setup the deep belief network and initialize with random weights.
            Console.WriteLine(inputs.First().Length);
            DeepBeliefNetwork network = new DeepBeliefNetwork(inputs.First().Length, 2, outputs.First().Length);

            new GaussianWeights(network, 0.1).Randomize();
            network.UpdateVisibleWeights();

            // Setup the learning algorithm.
            DeepBeliefNetworkLearning teacher = new DeepBeliefNetworkLearning(network)
            {
                Algorithm = (h, v, i) => new ContrastiveDivergenceLearning(h, v)
                {
                    LearningRate = 0.1,
                    Momentum     = 0.5,
                    Decay        = 0.001,
                }
            };

            // Setup batches of input for learning.
            int batchCount = Math.Max(1, inputs.Length / 100);

            // Create mini-batches to speed learning.
            int[]        groups  = Accord.Statistics.Tools.RandomGroups(inputs.Length, batchCount);
            double[][][] batches = inputs.Subgroups(groups);
            // Learning data for the specified layer.
            double[][][] layerData;

            // Unsupervised learning on each hidden layer, except for the output layer.
            for (int layerIndex = 0; layerIndex < network.Machines.Count - 1; layerIndex++)
            {
                teacher.LayerIndex = layerIndex;
                layerData          = teacher.GetLayerInput(batches);
                for (int i = 0; i < 50000; i++)
                {
                    double error = teacher.RunEpoch(layerData) / inputs.Length;
                    //if (i % 10 == 0)
                    //{
                    //    Console.WriteLine(i + ", Error = " + error);
                    //}
                }
            }

            // Supervised learning on entire network, to provide output classification.
            var teacher2 = new Accord.Neuro.Learning.BackPropagationLearning(network)
            {
                LearningRate = 0.1,
                Momentum     = 0.5
            };

            // Run supervised learning.
            for (int i = 0; i < 50000; i++)
            {
                double error = teacher2.RunEpoch(inputs, outputs) / inputs.Length;
                //if (i % 10 == 0)
                //{
                //    Console.WriteLine(i + ", Error = " + error);
                //}
            }

            // Test the resulting accuracy.
            //int correct = 0;
            //for (int i = 0; i < inputs.Length; i++)
            //{
            //    double[] outputValues = network.Compute(testInputs[i]);
            //    if (DataManager.FormatOutputResult(outputValues) == DataManager.FormatOutputResult(testOutputs[i]))
            //    {
            //        correct++;
            //    }
            //}

            //Console.WriteLine("Correct " + correct + "/" + inputs.Length + ", " + Math.Round(((double)correct / (double)inputs.Length * 100), 2) + "%");

            //double[] probs = network.GenerateOutput(new double[] { 0, 0 });
            //foreach (double p in probs)
            //{
            //    Console.Write(p + ", ");
            //}
            for (int i = 0; i < inputs.Length; i++)
            {
                double[] output = network.Compute(inputs[i]);
                int      imax; output.Max(out imax);
                Console.Write(imax + " -- ");
                foreach (double p in output)
                {
                    Console.Write(p + ", ");
                }
                Console.WriteLine("\n------------------");
            }
        }
Пример #27
0
        private static void Learn(string networkFile, double[][] inputs, double[][] outputs, double trainRate = 0.8)
        {
            var count          = inputs.Length;
            var n              = (int)(count * trainRate);
            var trainedInputs  = inputs.Take(n).ToArray();
            var trainedOutputs = outputs.Take(n).ToArray();
            var testInputs     = inputs.Skip(n).ToArray();
            var testOutputs    = outputs.Skip(n).ToArray();

            Console.WriteLine($"trained items: {trainedInputs.Length}, tested items: {testInputs.Length}");

            var network = new DeepBeliefNetwork(trainedInputs.First().Length, 10, trainedOutputs.First().Length);

            new GaussianWeights(network, 0.1).Randomize();
            network.UpdateVisibleWeights();

            // Setup the learning algorithm.
            var teacher = new DeepBeliefNetworkLearning(network)
            {
                Algorithm = (h, v, i) => new ContrastiveDivergenceLearning(h, v)
                {
                    LearningRate = 0.1,
                    Momentum     = 0.5,
                    Decay        = 0.001,
                }
            };
            // Setup batches of input for learning.
            int batchCount = Math.Max(1, trainedInputs.Length / 100);

            // Create mini-batches to speed learning.
            int[]        groups  = Classes.Random(trainedInputs.Length, batchCount);
            double[][][] batches = trainedInputs.Subgroups(groups);
            // Learning data for the specified layer.
            double[][][] layerData;

            // Unsupervised learning on each hidden layer, except for the output layer.
            for (int layerIndex = 0; layerIndex < network.Machines.Count - 1; layerIndex++)
            {
                teacher.LayerIndex = layerIndex;
                layerData          = teacher.GetLayerInput(batches);
                for (int i = 0; i < 200; i++)
                {
                    double error = teacher.RunEpoch(layerData) / trainedInputs.Length;
                    if (i % 10 == 0)
                    {
                        Console.WriteLine(i + ", Error = " + error);
                    }
                }
            }


            // Supervised learning on entire network, to provide output classification.
            var teacher2 = new BackPropagationLearning(network)
            {
                LearningRate = 0.1,
                Momentum     = 0.5
            };

            // Run supervised learning.
            for (int i = 0; i < Math.Min(2000, n); i++)
            {
                double error = teacher2.RunEpoch(trainedInputs, trainedOutputs) / trainedInputs.Length;
                if (i % 10 == 0)
                {
                    Console.WriteLine(i + ", Error = " + error);
                }
            }
            network.Save(networkFile);
            Console.WriteLine($"save network: {networkFile}");

            // Test the resulting accuracy.
            Test(networkFile, testInputs, testOutputs);
        }
Пример #28
0
        //開始學習
        public bool Run()
        {
            bool IsDone = false;

            try
            {
                FlowDatas db = new FlowDatas();
                (double[][] Inputs, double[][] Outputs)
                    = DeepLearningTools.FlowSampleToLearningData(db.FlowSampleStatistics.Where(c => c.BehaviorNumber != 0).ToArray());
                db.Dispose();
                //產生DBN網路
                DBNetwork = new DeepBeliefNetwork(Inputs.First().Length,
                                                  (int)((Inputs.First().Length + Outputs.First().Length) / 1.5),
                                                  (int)((Inputs.First().Length + Outputs.First().Length) / 2),
                                                  Outputs.First().Length);
                //亂數打亂整個網路參數
                new GaussianWeights(DBNetwork, 0.1).Randomize();
                DBNetwork.UpdateVisibleWeights();
                //設定無監督學習組態
                DeepBeliefNetworkLearning teacher
                    = new DeepBeliefNetworkLearning(DBNetwork)
                    {
                    Algorithm = (h, v, i) =>
                                new ContrastiveDivergenceLearning(h, v)
                    {
                        LearningRate = 0.01,
                        Momentum     = 0.5,
                        Decay        = 0.001,
                    }
                    };

                //設置批量輸入學習。
                int batchCount1 = Math.Max(1, Inputs.Length / 10);
                //創建小批量加速學習。
                int[] groups1
                    = Accord.Statistics.Classes.Random(Inputs.Length, batchCount1);
                double[][][] batches = Inputs.Subgroups(groups1);
                //學習指定圖層的數據。
                double[][][] layerData;
                //運行無監督學習。
                for (int layerIndex = 0; layerIndex < DBNetwork.Machines.Count - 1; layerIndex++)
                {
                    teacher.LayerIndex = layerIndex;
                    layerData          = teacher.GetLayerInput(batches);
                    for (int i = 0; i < 200; i++)
                    {
                        double error = teacher.RunEpoch(layerData) / Inputs.Length;
                        if (i % 10 == 0)
                        {
                            Console.WriteLine(i + ", Error = " + error);
                        }
                    }
                }

                //對整個網絡進行監督學習,提供輸出分類。
                var teacher2 = new ParallelResilientBackpropagationLearning(DBNetwork);

                double error1 = double.MaxValue;

                //運行監督學習。
                for (int i = 0; i < 500; i++)
                {
                    error1 = teacher2.RunEpoch(Inputs, Outputs) / Inputs.Length;
                    Console.WriteLine(i + ", Error = " + error1);

                    DBNetwork.Save(Path);
                    Console.WriteLine("Save Done");
                }

                DBNetwork.Save(Path);
                Console.WriteLine("Save Done");

                IsDone = true;
            }
            catch (Exception ex)
            {
                Debug.Write(ex.ToString());
            }

            return(IsDone);
        }
Пример #29
0
        static void Main(string[] args)
        {
#if Cluster
            // output file
            List <string> outputLines = new List <string>();

            DateTime timeStart = new DateTime();
            // Some example documents.
            string[] documents = new GetTweets().GetTweetsFromExcelFile("Train_NN.xlsx");

            // Apply TF*IDF to the documents and get the resulting vectors.
            double[][] inputs = TFIDF.Transform(documents, 0);
            Console.WriteLine("time to transformation " + (DateTime.Now - timeStart));
            outputLines.Add("time to transformation " + (DateTime.Now - timeStart));
            Console.WriteLine("TFIDF transformation done...");

            inputs = TFIDF.Normalize(inputs);
            Console.WriteLine("time to Normalization " + (DateTime.Now - timeStart));
            outputLines.Add("time to Normalization " + (DateTime.Now - timeStart));
            Console.WriteLine("TFIDF Normalization done...");
            //inputs = Accord.Math.Norm.Norm2(inputs);

            string[] topics = TFIDF.Topics(documents, 5);
            Console.WriteLine("time to topics " + (DateTime.Now - timeStart));
            outputLines.Add("time to topics " + (DateTime.Now - timeStart));
            Console.WriteLine("Topics gathered...");

            //Random random = new Random();
            //double[][] rand = new double[inputs.Length][];

            //for (int i = 0; i < inputs.Length; i++)
            //{

            //    rand[i] = new double[inputs[i].Length];
            //    for (int j = 0; j < inputs[i].Length; j++)
            //    {

            //        rand[i][j] = random.NextDouble();
            //    }
            //}
            //Console.WriteLine("time to generate random numbers " + (DateTime.Now - timeStart));
            //outputLines.Add("time to topics " + (DateTime.Now - timeStart));
            //Console.WriteLine("Randoms generated...");

            KMeans cluster = new KMeans(topics.Length, Distance.Cosine);

            //cluster.MaxIterations = 1;
            //cluster.Randomize(rand);
            int[] index = cluster.Compute(inputs);
            Console.WriteLine("time to cluster " + (DateTime.Now - timeStart));
            outputLines.Add("time to cluster " + (DateTime.Now - timeStart));
            Console.WriteLine("Clustering done...");
            //Accord.Statistics.Analysis.PrincipalComponentAnalysis pca = new Accord.Statistics.Analysis.PrincipalComponentAnalysis(inputs, Accord.Statistics.Analysis.AnalysisMethod.Center);
            //pca.Compute();
            //double[][] newinput = pca.Transform(inputs, 2);

            //ScatterplotBox.Show("KMeans Clustering of Tweets", newinput, index).Hold();



            for (double i = 0; i <= topics.Length; i++)
            {
                outputLines.Add(Convert.ToString(i + 1));
                List <string> topicDecider = new List <string>();
                string[]      topicString;

                int j = 0;
                foreach (int x in index)
                {
                    if (x == i + 1)
                    {
                        topicDecider.Add(documents[j]);
                    }
                    j++;
                }

                topicString = TFIDF.Topics(topicDecider.ToArray(), topicDecider.Count / 2);

                if (topicString.Length == 0)
                {
                    outputLines.Add("--------------------------------------------------------");
                    outputLines.Add("TOPIC: other");
                    outputLines.Add("--------------------------------------------------------");
                }
                else
                {
                    outputLines.Add("--------------------------------------------------------");
                    outputLines.Add("TOPIC: " + topicString[0]);
                    outputLines.Add("--------------------------------------------------------");
                }

                j = 0;
                foreach (int x in index)
                {
                    if (x == i + 1)
                    {
                        outputLines.Add("Tweet ID " + j + ":\t" + documents[j]);
                    }
                    j++;
                }
                outputLines.Add("");
                outputLines.Add("");
                outputLines.Add("");
                outputLines.Add("");
            }

            System.IO.File.WriteAllLines(@"Train_NN_2.txt", outputLines.ToArray());
            Console.WriteLine("Output is written...");
#else
            // output file
            List <string> outputLines = new List <string>();

            DateTime timeStart = new DateTime();
            // Some example documents.
            string[]   documents_Train = new GetTweets().GetTweetsFromExcelFile("Train_NN.xlsx");
            double[][] Train_Labels    = new GetTweets().GetLabelsFromExcelFile("Train_Labels.xlsx");

            // Apply TF*IDF to the documents and get the resulting vectors.
            double[][] inputs = TFIDF.Transform(documents_Train, 0);
            Console.WriteLine("time to transformation " + (DateTime.Now - timeStart));
            outputLines.Add("time to transformation " + (DateTime.Now - timeStart));
            Console.WriteLine("TFIDF transformation done...");

            inputs = TFIDF.Normalize(inputs);
            Console.WriteLine("time to Normalization " + (DateTime.Now - timeStart));
            outputLines.Add("time to Normalization " + (DateTime.Now - timeStart));
            Console.WriteLine("TFIDF Normalization done...");


            //double[][] inputs;
            double[][] train_input = new double[140][];
            double[][] outputs;
            double[][] testInputs  = new double[1000 - 140][];
            double[][] testOutputs = new double[1000 - 140][];

            for (int i = 0; i < 140; i++)
            {
                train_input[i] = new double[inputs[i].Length];
                for (int j = 0; j < inputs[i].Length; j++)
                {
                    train_input[i][j] = inputs[i][j];
                }
            }

            for (int i = 0; i < 1000 - 140; i++)
            {
                testInputs[i] = new double[inputs[i].Length];
                for (int j = 0; j < inputs[i].Length; j++)
                {
                    testInputs[i][j] = inputs[i][j];
                }
            }


            // The first 500 data rows will be for training. The rest will be for testing.
            //testInputs = inputs.Skip(500).ToArray();
            //testOutputs = outputs.Skip(500).ToArray();
            //inputs = inputs.Take(500).ToArray();
            //outputs = outputs.Take(500).ToArray();

            // Setup the deep belief network and initialize with random weights.
            DeepBeliefNetwork network = new DeepBeliefNetwork(train_input.First().Length, 7);
            new GaussianWeights(network, 0.1).Randomize();
            network.UpdateVisibleWeights();

            // Setup the learning algorithm.
            DeepBeliefNetworkLearning teacher = new DeepBeliefNetworkLearning(network)
            {
                Algorithm = (h, v, i) => new ContrastiveDivergenceLearning(h, v)
                {
                    LearningRate = 0.1,
                    Momentum     = 0.5,
                    Decay        = 0.001,
                }
            };

            // Setup batches of input for learning.
            int batchCount = Math.Max(1, train_input.Length / 100);
            // Create mini-batches to speed learning.
            int[]        groups  = Accord.Statistics.Tools.RandomGroups(train_input.Length, batchCount);
            double[][][] batches = train_input.Subgroups(groups);
            // Learning data for the specified layer.
            double[][][] layerData;

            // Unsupervised learning on each hidden layer, except for the output layer.
            for (int layerIndex = 0; layerIndex < network.Machines.Count - 1; layerIndex++)
            {
                teacher.LayerIndex = layerIndex;
                layerData          = teacher.GetLayerInput(batches);
                for (int i = 0; i < 200; i++)
                {
                    double error = teacher.RunEpoch(layerData) / train_input.Length;
                    if (i % 10 == 0)
                    {
                        Console.WriteLine(i + ", Error = " + error);
                    }
                }
            }

            // Supervised learning on entire network, to provide output classification.
            var teacher2 = new BackPropagationLearning(network)
            {
                LearningRate = 0.1,
                Momentum     = 0.5
            };

            //Transpose
            double[][] Train_Labels_T = new double[140][];
            for (int i = 0; i < 140; i++)
            {
                Train_Labels_T[i] = new double[7];
                for (int j = 0; j < 7; j++)
                {
                    Train_Labels_T[i][j] = Train_Labels[j][i];
                }
            }

            // Run supervised learning.
            for (int i = 0; i < 500; i++)
            {
                double error = teacher2.RunEpoch(train_input, Train_Labels_T) / train_input.Length;
                if (i % 10 == 0)
                {
                    Console.WriteLine(i + ", Error = " + error);
                }
            }
            outputLines.Add("time to Training " + (DateTime.Now - timeStart));
            // Test the resulting accuracy.
            double[][] outputValues = new double[testInputs.Length][];
            for (int i = 0; i < testInputs.Length; i++)
            {
                outputValues[i] = network.Compute(testInputs[i]);
            }
            outputLines.Add("time to Testing/clustering " + (DateTime.Now - timeStart));
            outputLines.Add("");
            outputLines.Add("");
            outputLines.Add("");

            List <string> class1 = new List <string>();
            List <string> class2 = new List <string>();
            List <string> class3 = new List <string>();
            List <string> class4 = new List <string>();
            List <string> class5 = new List <string>();
            List <string> class6 = new List <string>();
            List <string> class7 = new List <string>();

            //creating output file
            for (int i = 0; i < documents_Train.Length; i++)
            {
                if (i < 10 && i > -1)
                {
                    if (i == 0)
                    {
                        class1.Add("-------------------------------");
                        class1.Add("TOPIC: WEATHER");
                        class1.Add("-------------------------------");
                    }
                    class1.Add("Training_Tweet:\t" + documents_Train[i]);
                }
                if (i < 20 && i > 9)
                {
                    if (i == 10)
                    {
                        class2.Add("-------------------------------");
                        class2.Add("TOPIC: MUSIC");
                        class2.Add("-------------------------------");
                    }
                    class2.Add("Training_Tweet:\t" + documents_Train[i]);
                }
                if (i < 30 && i > 19)
                {
                    if (i == 20)
                    {
                        class3.Add("-------------------------------");
                        class3.Add("TOPIC: ITALY");
                        class3.Add("-------------------------------");
                    }
                    class3.Add("Training_Tweet:\t" + documents_Train[i]);
                }
                if (i < 40 && i > 29)
                {
                    if (i == 30)
                    {
                        class4.Add("-------------------------------");
                        class4.Add("TOPIC: FOOD");
                        class4.Add("-------------------------------");
                    }
                    class4.Add("Training_Tweet:\t" + documents_Train[i]);
                }
                if (i < 50 && i > 39)
                {
                    if (i == 40)
                    {
                        class5.Add("-------------------------------");
                        class5.Add("TOPIC: FASHION");
                        class5.Add("-------------------------------");
                    }
                    class5.Add("Training_Tweet:\t" + documents_Train[i]);
                }
                if (i < 60 && i > 49)
                {
                    if (i == 50)
                    {
                        class6.Add("-------------------------------");
                        class6.Add("TOPIC: FOOTBALL");
                        class6.Add("-------------------------------");
                    }
                    class6.Add("Training_Tweet:\t" + documents_Train[i]);
                }
                if (i < 140 && i > 59)
                {
                    if (i == 60)
                    {
                        class7.Add("-------------------------------");
                        class7.Add("TOPIC: OTHER");
                        class7.Add("-------------------------------");
                    }
                    class7.Add("Training_Tweet:\t" + documents_Train[i]);
                }
                if (i >= 140)
                {
                    int what;
                    what = outputValues[i - 140].IndexOf(outputValues[i - 140].Max());
                    switch (what)
                    {
                    case 0:
                        class1.Add("Test_Tweet:\t" + documents_Train[i]);
                        break;

                    case 1:
                        class2.Add("Test_Tweet:\t" + documents_Train[i]);
                        break;

                    case 2:
                        class3.Add("Test_Tweet:\t" + documents_Train[i]);
                        break;

                    case 3:
                        class4.Add("Test_Tweet:\t" + documents_Train[i]);
                        break;

                    case 4:
                        class5.Add("Test_Tweet:\t" + documents_Train[i]);
                        break;

                    case 5:
                        class6.Add("Test_Tweet:\t" + documents_Train[i]);
                        break;

                    case 6:
                        class7.Add("Test_Tweet:\t" + documents_Train[i]);
                        break;
                    }
                }
            }

            outputLines.Add("");
            outputLines.Add("");
            outputLines.Add("");
            outputLines.AddRange(class1);
            outputLines.Add("");
            outputLines.Add("");
            outputLines.Add("");
            outputLines.AddRange(class2);
            outputLines.Add("");
            outputLines.Add("");
            outputLines.Add("");
            outputLines.AddRange(class3);
            outputLines.Add("");
            outputLines.Add("");
            outputLines.Add("");
            outputLines.AddRange(class4);
            outputLines.Add("");
            outputLines.Add("");
            outputLines.Add("");
            outputLines.AddRange(class5);
            outputLines.Add("");
            outputLines.Add("");
            outputLines.Add("");
            outputLines.AddRange(class6);
            outputLines.Add("");
            outputLines.Add("");
            outputLines.Add("");
            outputLines.AddRange(class7);
            outputLines.Add("");
            outputLines.Add("");
            outputLines.Add("");


            System.IO.File.WriteAllLines(@"Train_NN_With_Test_2.txt", outputLines.ToArray());

            Console.Write("Press any key to quit ..");
#endif

            Console.ReadKey();
        }