private void learnLayerSupervised() { if (!Main.CanClassify) { return; } Dispatcher dispatcher = Dispatcher.CurrentDispatcher; new Task(() => { DeepNeuralNetworkLearning teacher = new DeepNeuralNetworkLearning(Main.Network) { Algorithm = (ann, i) => new ParallelResilientBackpropagationLearning(ann), LayerIndex = Main.Network.Layers.Length - 1, }; double[][] inputs, outputs; Main.Database.Training.GetInstances(out inputs, out outputs); // Gather learning data for the layer double[][] layerData = teacher.GetLayerInput(inputs); // Start running the learning procedure for (int i = 0; i < Epochs && !shouldStop; i++) { double error = teacher.RunEpoch(layerData, outputs); dispatcher.BeginInvoke((Action <int, double>)updateError, DispatcherPriority.ContextIdle, i + 1, error); } Main.Network.UpdateVisibleWeights(); IsLearning = false; }).Start(); }
public NeutralNetwork(double[][] input, double[][] output, double[][] testInput, double[][] testOutput) { var network = new DeepBeliefNetwork(28 * 28, new int[] { 1000, 10 }); new GaussianWeights(network).Randomize(); network.UpdateVisibleWeights(); var teacher = new DeepNeuralNetworkLearning(network) { Algorithm = (ann, i) => new ParallelResilientBackpropagationLearning(ann), LayerIndex = network.Machines.Count - 1, }; var layerData = teacher.GetLayerInput(input); for (int i = 0; i < 5000; i++) { teacher.RunEpoch(layerData, output); } network.UpdateVisibleWeights(); var inputArr = new double[28 * 28]; for (int i = 0; i < 28 * 28; i++) { inputArr[i] = testInput[0][i]; } var a = network.Compute(testInput[0]); Console.WriteLine(Array.IndexOf(a, a.Max())); }
private DeepNeuralNetworkLearning CreateTeacher(DeepBeliefNetwork network) { var teacher = new DeepNeuralNetworkLearning(network); teacher.Algorithm = (activationNetwork, index) => new ParallelResilientBackpropagationLearning(activationNetwork); teacher.LayerIndex = network.Machines.Count - 1; return(teacher); }
public NeutralNetwork() { _network = new DeepBeliefNetwork(784, new int[] { 1000, 10 }); new GaussianWeights(_network).Randomize(); _network.UpdateVisibleWeights(); _teacher = new DeepNeuralNetworkLearning(_network) { Algorithm = (ann, i) => new ParallelResilientBackpropagationLearning(ann), LayerIndex = _network.Machines.Count - 1, }; }
public void Run() { // (inputsCount, layers) var neuralNetwork = new DeepBeliefNetwork(2, 2); // neural network'u parametre olarak geçiyoruz var deepLearning = new DeepNeuralNetworkLearning(neuralNetwork); // algorithm'ini bir lambda expression ile atıyoruz deepLearning.Algorithm = (activationNetwork, index) => new BackPropagationLearning(activationNetwork); deepLearning.Run(new double[2], new double[2]); }
public void Train(double[][] i, double[][] o = null, int outputLength = 10, int hiddenLayer = -1) { if (n == null) { if (File.Exists(p)) { n = DeepBeliefNetwork.Load(p); } else { outputLength = (o == null) ? outputLength : o[0].Length; hiddenLayer = (hiddenLayer == -1) ? (int)Math.Log(i[0].Length, outputLength) : hiddenLayer; List <int> layers = new List <int>(); for (int j = 0; j < hiddenLayer; j++) { layers.Add(i[0].Length); } layers.Add(outputLength); n = new DeepBeliefNetwork(new BernoulliFunction(), i[0].Length, layers.ToArray()); new GaussianWeights(n).Randomize(); } } dynamic t; if (o == null) { t = new DeepBeliefNetworkLearning(n) { Algorithm = (h, v, j) => new ContrastiveDivergenceLearning(h, v), LayerIndex = n.Machines.Count - 1, }; while (true) { e = t.RunEpoch(t.GetLayerInput(i)); } } else { t = new DeepNeuralNetworkLearning(n) { Algorithm = (ann, j) => new ParallelResilientBackpropagationLearning(ann), LayerIndex = n.Machines.Count - 1, }; while (true) { e = t.RunEpoch(t.GetLayerInput(i), o); } } }
public void train() { network = new DeepBeliefNetwork(inputsCount: inputs.Length, hiddenNeurons: new int[] { 4, outputs[0].Length }); // 隠れ層と出力層の次元 // DNNの学習アルゴリズムの生成 var teacher = new DeepNeuralNetworkLearning(network) { Algorithm = (ann, i) => new ParallelResilientBackpropagationLearning(ann), LayerIndex = network.Machines.Count - 1 }; // 5000回学習 var layerData = teacher.GetLayerInput(inputs); for (int i = 0; i < 5000; i++) { teacher.RunEpoch(layerData, outputs); } // 重みの更新 network.UpdateVisibleWeights(); }
public static unsafe DeepBeliefNetwork Train(ref double[][] inputs, ref bool[] outputsClasses) { double[][] outputs = (from output in outputsClasses select output == true ? new double[] { 1, 0 } : new double[] { 0, 1 }).ToArray(); DeepBeliefNetwork network = new DeepBeliefNetwork(inputsCount: inputs.Length, hiddenNeurons: new int[] { 250, 200, 200, 25 }); var teacher = new DeepNeuralNetworkLearning(network) { Algorithm = (ann, i) => new ParallelResilientBackpropagationLearning(ann), LayerIndex = network.Machines.Count - 1 }; var layerData = teacher.GetLayerInput(inputs); for (int i = 0; i < 5000; i++) { teacher.RunEpoch(layerData, outputs); } network.UpdateVisibleWeights(); return(network); }
static void Main(string[] args) { //Generate the training data int keySize = 64; int messageSize = 64; int trainingSetSize = 100; List <Triplet> trainingSet = GenerateDESDataset(trainingSetSize, keySize, messageSize); double[][] inputTraining, outputTraining; Triplet.Transform2IO(trainingSet, out inputTraining, out outputTraining); //Generate the test data List <Triplet> testSet = GenerateDESDataset(trainingSetSize, keySize, messageSize); double[][] inputTest, outputTest; Triplet.Transform2IO(testSet, out inputTest, out outputTest); //Find the right sizes, not sure why I have to do that :-/ int inputSize = trainingSet.First().original.Count() + trainingSet.First().encrypted.Count(); int outputSize = trainingSet.First().key.Count(); //Create a network var function = new SigmoidFunction(2.0); //ActivationNetwork network = new ActivationNetwork(function, inputSize, 25, outputSize); //ParallelResilientBackpropagationLearning teacher = new ParallelResilientBackpropagationLearning(network); DeepBeliefNetwork network = new DeepBeliefNetwork(inputSize, 10, outputSize); Accord.Neuro.Learning.DeepNeuralNetworkLearning teacher = new DeepNeuralNetworkLearning(network); //Train the network int epoch = 0; double stopError = 0.1; int resets = 0; double minimumErrorReached = double.PositiveInfinity; while (minimumErrorReached > stopError && resets < 1) { network.Randomize(); //teacher.Reset(0.0125); double errorTrain = double.PositiveInfinity; for (epoch = 0; epoch < 500000 && errorTrain > stopError; epoch++) { errorTrain = teacher.RunEpoch(inputTraining, outputTraining) / (double)trainingSetSize; //Console.WriteLine("Epoch " + epoch + " = \t" + error); if (errorTrain < minimumErrorReached) { minimumErrorReached = errorTrain; network.Save("cryptoDESNetwork.mlp"); } Console.Clear(); Console.WriteLine("Epoch : " + epoch); Console.WriteLine("Train Set Error : " + errorTrain.ToString("N2")); double errorTest = teacher.ComputeError(inputTest, outputTest) / (double)inputTest.Count(); Console.WriteLine("Test Set Error : " + errorTest.ToString("N2")); } //Console.Write("Reset (" + error+")->"); resets++; } Console.WriteLine(); //Compute the reall error foreach (Triplet tReal in testSet) { double[] rIn, rOut, pOut; byte[] brMsg, brEncrypted, brKey; tReal.ToBytes(out brMsg, out brEncrypted, out brKey); tReal.ToIO(out rIn, out rOut); pOut = network.Compute(rIn); Triplet tPredicted = new Triplet(rIn, pOut, messageSize); byte[] bpMsg, bpEncrypted, bpKey; tPredicted.ToBytes(out bpMsg, out bpEncrypted, out bpKey); int wrongBytes = 0; for (int i = 0; i < keySize / 8; i++) { if (brKey[i] != bpKey[i]) { wrongBytes++; } } Console.WriteLine("Wrong bytes = " + wrongBytes); //Console.WriteLine("REAL = \n" + tReal.GetBytesForm()); //Console.WriteLine("Predicted = \n" + tPredicted.GetBytesForm()); } Console.ReadKey(); }