private static void TrainNetwork(LearningData learningData, String networkPath) { ActivationNetwork network = new ActivationNetwork( UseBipolar ? (IActivationFunction) new BipolarSigmoidFunction(1) : (IActivationFunction) new SigmoidFunction(), 784, 784, 10); network.Randomize(); Int32 epochIndex = 0; new NguyenWidrow(network).Randomize(); //// create teacher //PerceptronLearning teacher = new PerceptronLearning(network);// new BackPropagationLearning(network); //PerceptronLearning teacher = new PerceptronLearning(network);// new BackPropagationLearning(network); ParallelResilientBackpropagationLearning teacher = new ParallelResilientBackpropagationLearning(network); //teacher.LearningRate = 0.0125; ////teacher.Momentum = 0.5f; Double error = Double.MaxValue; Double previousError = Double.MaxValue; Stopwatch sw = new Stopwatch(); Int32 counter = 100; // loop while (counter > 0) { sw.Reset(); sw.Start(); // run epoch of learning procedure error = teacher.RunEpoch(learningData.Input, learningData.Output); sw.Stop(); //if (error > previousError) //{ // teacher.LearningRate = teacher.LearningRate * 0.5f; //} Console.WriteLine(String.Format("{0} {1} {2}", epochIndex, error, sw.Elapsed.TotalSeconds)); epochIndex++; previousError = error; counter--; } network.Save(networkPath); //Double[] output = network.Compute(learningData.Input[0]); }
public override void Learn() { var inputs = new double[this.TrainingData.Count][]; var outputs = new double[this.TrainingData.Count][]; for (var i = 0; i < this.TrainingData.Count; i++) { var(x, y) = this.TrainingData[i]; inputs[i] = x; var o = new double[OutputCount]; for (var j = 0; j < o.Length; j++) { o[j] = j == (int)y ? 1 : -1; } outputs[i] = o; } new NguyenWidrow(this._network).Randomize(); var teacher = new ParallelResilientBackpropagationLearning(this._network); double error; var count = 0; do { error = teacher.RunEpoch(inputs, outputs); Console.WriteLine("{0}回目: {1}", ++count, error); } while (error > 1e-5); }
private void BtnLearn_Click(object sender, EventArgs e) { lblStatus.Text = "Hãy chờ vài phút!"; lblStatus.ForeColor = Color.Red; lblStatus.Visible = true; int rows = dgvLearnSet.Rows.Count; double[][] input = new double[rows - 1][]; double[][] output = new double[rows - 1][]; teacherBL = new ParallelResilientBackpropagationLearning(network); double error = int.MaxValue; double lbl; for (int i = 0; i < rows - 1; i++) { input[i] = (double[])dgvLearnSet.Rows[i].Cells[3].Value; lbl = Convert.ToDouble(dgvLearnSet.Rows[i].Cells[2].Value); output[i] = Out(Convert.ToInt32(dgvLearnSet.Rows[i].Cells[2].Value)); } while (true) { error = teacherBL.RunEpoch(input, output); if (error < 0.1) { break; } } btnVerify.Enabled = true; btnClassify.Enabled = true; lblStatus.Text = "Đã huấn luyện xong!"; lblStatus.ForeColor = Color.DarkGreen; }
// Use this for initialization void Start() { _neuralNetwork = new ActivationNetwork(new SigmoidFunction(sigmoidAlphaValue), 2, 2, 1); _neuralTeacher = new ParallelResilientBackpropagationLearning(_neuralNetwork); // set learning rate and momentum //_neuralTeacher.Reset(initialStep); }
public override double TrainOnDataSet(SamplesSet samplesSet, int epochs_count, double acceptableError, bool parallel = true) { // Сначала надо сконструировать массивы входов и выходов double[][] inputs = new double[samplesSet.Count][]; double[][] outputs = new double[samplesSet.Count][]; // Теперь массивы из samplesSet группируем в inputs и outputs for (int i = 0; i < samplesSet.Count; ++i) { inputs[i] = samplesSet[i].input; outputs[i] = samplesSet[i].output; } // Текущий счётчик эпох int epoch_to_run = 0; // Создаём "обучателя" - либо параллельного, либо последовательного ISupervisedLearning teacher; if (parallel) { teacher = new ParallelResilientBackpropagationLearning(network); } else { teacher = new ResilientBackpropagationLearning(network); } double error = double.PositiveInfinity; #if DEBUG StreamWriter errorsFile = File.CreateText("errors.csv"); #endif stopWatch.Restart(); while (epoch_to_run < epochs_count && error > acceptableError) { epoch_to_run++; error = teacher.RunEpoch(inputs, outputs); #if DEBUG errorsFile.WriteLine(error); #endif updateDelegate((epoch_to_run * 1.0) / epochs_count, error, stopWatch.Elapsed); } #if DEBUG errorsFile.Close(); #endif updateDelegate(1.0, error, stopWatch.Elapsed); stopWatch.Stop(); return(error); }
public Network(int input_sz, int output_sz = 10) { input_size = input_sz; output_size = output_sz; net = new ActivationNetwork(new Accord.Neuro.BipolarSigmoidFunction(), input_size, input_size * 3, input_size * 2, input_size, 100, output_size); backprop = new ParallelResilientBackpropagationLearning(net); nguen = new NguyenWidrow(net); nguen.Randomize(); }
public override void addModality(Signal s, string label = null) { base.addModality(s, label); int[] hiddenWithOutput = new int[hiddenLayers.Count() + 1]; hiddenLayers.CopyTo(hiddenWithOutput, 0); hiddenWithOutput[hiddenLayers.Count()] = InputCount; network = new ActivationNetwork(new SigmoidFunction(sigmoidAlphaValue), InputCount, hiddenWithOutput); teacher = new ParallelResilientBackpropagationLearning(network); // set learning rate and momentum teacher.Reset(initialStep); }
public double TarinNetwork(float[][] inputs, float[][] outputs, int samplingCellsCount, int inputNeuronsCount, int outputNeuronsCount, int hiddenLayerNeuronsCount, double learningRate, int iterations, System.Windows.Forms.Label labelTrainTest, System.Windows.Forms.Label labelTrainningAccuracy, System.Windows.Forms.Label labelValidationAccuracy, ZedGraphControl zedGraphControl) { network = new ActivationNetwork( new SigmoidFunction(), inputNeuronsCount, hiddenLayerNeuronsCount, outputNeuronsCount); ParallelResilientBackpropagationLearning teacher = new ParallelResilientBackpropagationLearning(network); teacher.LearningRate = learningRate; double[][] trainInput = GeneralOpertor.GetArray(inputs, 0.8, true, inputNeuronsCount); double[][] testInput = GeneralOpertor.GetArray(inputs, 0.8, false, inputNeuronsCount); double[][] trainOutput = GeneralOpertor.GetArray(outputs, 0.8, true, outputNeuronsCount); double[][] testOutput = GeneralOpertor.GetArray(outputs, 0.8, false, outputNeuronsCount); int iteration = 0; double error = 1; zedGraphControl.GraphPane.CurveList.Clear(); PointPairList list = new PointPairList(); while ((error > 0.01) && (iteration <= iterations)) { error = teacher.RunEpoch(trainInput, trainOutput) * 2 / samplingCellsCount; if (iteration == 0) { list.Add(iteration, error); zedGraphControl.GraphPane.AddCurve("Error", list, System.Drawing.Color.FromArgb(255, 0, 0)); zedGraphControl.GraphPane.AxisChange(); } else if ((iteration % 30 == 0) || (iteration == iterations)) { zedGraphControl.GraphPane.CurveList[0].AddPoint(iteration, error); zedGraphControl.GraphPane.AxisChange(); labelTrainTest.Text = "Iteration:" + iteration + ", Error:" + error.ToString("0.00000"); Application.DoEvents(); } zedGraphControl.Refresh(); iteration++; } double accuracy = GetAccuracy(trainInput, trainOutput, network); labelTrainningAccuracy.Text = (accuracy * 100).ToString("0.000") + " %"; accuracy = GetAccuracy(testInput, testOutput, network); labelValidationAccuracy.Text = (accuracy * 100).ToString("0.000") + " %"; return(error); }
/// <summary> /// 训练神经网络 /// </summary> public void Train() { updateConsoleEvent("---------神经网络训练开始------"); var samples = getSamples(this.numOfSample); double[][] inputs = (from cell in samples select getOneInput(cell)).ToArray <double[]>(); int[] classes = (from cell in samples select getOneClass(cell)).ToArray <int>(); double[][] outputs = Accord.Statistics.Tools.Expand(classes, 0, +1); // Create an activation function for the net //var function = new BipolarSigmoidFunction(); var function = new SigmoidFunction(); // Create an activation network with the function and // 4 inputs, 5 hidden neurons and 3 possible outputs: int numOfInput = inputs[0].Length; int numOfHidden = numOfInput * 2 / 3; int numOfOut = outputs[0].Length; this.network = new ActivationNetwork(function, numOfInput, numOfHidden, numOfOut); // Randomly initialize the network new NguyenWidrow(this.network).Randomize(); // Teach the network using parallel Rprop: var teacher = new ParallelResilientBackpropagationLearning(this.network); double correctRate = 0.0; int times = this.timesOfTrain; int cnt = 0; while (cnt < times) { teacher.RunEpoch(inputs, outputs); if (cnt % 10 == 0) { correctRate = GetError(inputs, outputs, classes); updateConsoleEvent("正确率:" + correctRate * 100 + "%"); } cnt++; } updateConsoleEvent("训练结束。最终正确率:" + correctRate * 100 + "%"); updateConsoleEvent("---------神经网络训练结束------"); }
public void CreateActivationNetworkTest() { double[][] inputs = { new double[] { 1, 1, 1, 0, 0, 0 }, new double[] { 1, 0, 1, 0, 0, 0 }, new double[] { 1, 1, 1, 0, 0, 0 }, new double[] { 0, 0, 1, 1, 1, 0 }, new double[] { 0, 0, 1, 1, 0, 0 }, new double[] { 0, 0, 1, 1, 1, 0 } }; double[][] outputs = { new double[] { 0 }, new double[] { 0 }, new double[] { 0 }, new double[] { 1 }, new double[] { 1 }, new double[] { 1 }, }; RestrictedBoltzmannMachine network = createNetwork(inputs); ActivationNetwork ann = network.ToActivationNetwork(new SigmoidFunction(1), outputs: 1); ParallelResilientBackpropagationLearning teacher = new ParallelResilientBackpropagationLearning(ann); for (int i = 0; i < 100; i++) { teacher.RunEpoch(inputs, outputs); } double[] actual = new double[outputs.Length]; for (int i = 0; i < inputs.Length; i++) { actual[i] = ann.Compute(inputs[i])[0]; } Assert.AreEqual(0, actual[0], 1e-10); Assert.AreEqual(0, actual[1], 1e-10); Assert.AreEqual(0, actual[2], 1e-10); Assert.AreEqual(1, actual[3], 1e-10); Assert.AreEqual(1, actual[4], 1e-10); Assert.AreEqual(1, actual[5], 1e-10); }
public void CreateActivationNetworkTest() { double[][] inputs = { new double[] { 1, 1, 1, 0, 0, 0 }, new double[] { 1, 0, 1, 0, 0, 0 }, new double[] { 1, 1, 1, 0, 0, 0 }, new double[] { 0, 0, 1, 1, 1, 0 }, new double[] { 0, 0, 1, 1, 0, 0 }, new double[] { 0, 0, 1, 1, 1, 0 } }; double[][] outputs = { new double[] { 0 }, new double[] { 0 }, new double[] { 0 }, new double[] { 1 }, new double[] { 1 }, new double[] { 1 }, }; DeepBeliefNetwork network = createNetwork(inputs); ParallelResilientBackpropagationLearning teacher = new ParallelResilientBackpropagationLearning(network); for (int i = 0; i < 100; i++) { teacher.RunEpoch(inputs, outputs); } double[] actual = new double[outputs.Length]; for (int i = 0; i < inputs.Length; i++) { actual[i] = network.Compute(inputs[i])[0]; } Assert.AreEqual(0, actual[0], 1e-10); Assert.AreEqual(0, actual[1], 1e-10); Assert.AreEqual(0, actual[2], 1e-10); Assert.AreEqual(1, actual[3], 1e-10); Assert.AreEqual(1, actual[4], 1e-10); Assert.AreEqual(1, actual[5], 1e-10); }
public void Train(int epoch, FinishEpoch finishEpochEvent, FinishLearning finish) { new NguyenWidrow(network).Randomize(); ParallelResilientBackpropagationLearning teacher = new ParallelResilientBackpropagationLearning(network); double[,] s = new double[outputs.Length, 2]; int iteration = 1; while (true) { double error = teacher.RunEpoch(inputs, outputs) / outputs.Length; // calculate solution for (int j = 0; j < outputs.Length; j++) { double y = network.Compute(inputs[j])[0]; s[j, 1] = y.Scale(-1, 1, -100, 100); s[j, 0] = j; } // calculate error double learningError = 0.0; for (int j = 0; j < outputs.Length; j++) { double[] x = inputs[j]; double expected = outputs[j][0]; double actual = network.Compute(x)[0]; learningError += Math.Abs(expected - actual); } finishEpochEvent(s, error); // increase current iteration iteration++; // check if we need to stop if ((epoch != 0) && (iteration > epoch)) { break; } } finish(); }
public void RunEpochTest1() { Accord.Math.Tools.SetupGenerator(0); double[][] input = { new double[] { -1, -1 }, new double[] { -1, 1 }, new double[] { 1, -1 }, new double[] { 1, 1 } }; double[][] output = { new double[] { -1 }, new double[] { 1 }, new double[] { 1 }, new double[] { -1 } }; Neuron.RandGenerator = new ThreadSafeRandom(0); ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(2), 2, 2, 1); var teacher = new ParallelResilientBackpropagationLearning(network); double error = 1.0; while (error > 1e-5) { error = teacher.RunEpoch(input, output); } for (int i = 0; i < input.Length; i++) { double actual = network.Compute(input[i])[0]; double expected = output[i][0]; Assert.AreEqual(expected, actual, 0.01); Assert.IsFalse(Double.IsNaN(actual)); } }
public void TrainNN(List <double[]> inputs, List <int> outputs, List <double> weights = null) { var tempInputs = _inputs.Take(_inputs.Count).Concat(inputs).ToArray(); tempInputs = Accord.Statistics.Tools.ZScores(tempInputs); var trainingInputs = tempInputs.Skip(_inputs.Count).Take(inputs.Count).ToArray(); var trainingOutputs = Jagged.OneHot(outputs.ToArray()); var network = new ActivationNetwork(new GaussianFunction(), trainingInputs.First().Length, 5, 2); _dbn = network; // Initialize the network with Gaussian weights new GaussianWeights(network, 0.1).Randomize(); // Setup the learning algorithm. var teacher = new ParallelResilientBackpropagationLearning(network); double error = Double.MaxValue; for (int i = 0; i < 5000; i++) { error = teacher.RunEpoch(trainingInputs, trainingOutputs); } // Test the resulting accuracy. int correct = 0; for (int i = 0; i < trainingInputs.Length; i++) { double[] outputValues = network.Compute(inputs[i]); double outputResult = outputValues.First() >= 0.5 ? 1 : 0; if (outputResult == trainingOutputs[i].First()) { correct++; } } Console.WriteLine("DBN Correct: {0} Total: {1} Accuracy: {2}, Training Error: {3}", correct, trainingOutputs.Length, (double)correct / (double)trainingOutputs.Length, error); }
/// <summary> /// Constructs a new SOM node. /// </summary> /// <param name="inputDim">The dimension of the input</param> /// <param name="mapSize">The dimension of the map</param> /// <param name="useOnlyWinnerAsOutput">Should only the position of the winner be used as an output. False means the whole map activity will be used.</param> public IONodeAFMLP(Point2D inputDim, Point2D outputDim, int[] hiddenLayersBottomUp, int[] hiddenLayerTopDown) : base(inputDim, outputDim) { int inputCount = (int)inputDim.X * (int)inputDim.Y; int outputCount = (int)outputDim.X * (int)outputDim.Y; int[] hiddenWithOutput = new int[hiddenLayersBottomUp.Count() + 1]; hiddenLayersBottomUp.CopyTo(hiddenWithOutput, 0); hiddenWithOutput[hiddenLayersBottomUp.Count()] = outputCount; bottomUpNet = new ActivationNetwork(new SigmoidFunction(sigmoidAlphaValue), inputCount, hiddenWithOutput); bottomUpTeacher = new ParallelResilientBackpropagationLearning(bottomUpNet); int[] hiddenWithInput = new int[hiddenLayerTopDown.Count() + 1]; hiddenLayerTopDown.CopyTo(hiddenWithInput, 0); hiddenWithInput[hiddenLayerTopDown.Count()] = inputCount; topDownNet = new ActivationNetwork(new SigmoidFunction(sigmoidAlphaValue), outputCount, hiddenWithInput); topDownTeacher = new ParallelResilientBackpropagationLearning(topDownNet); // set learning rate and momentum bottomUpTeacher.Reset(initialStep); topDownTeacher.Reset(initialStep); }
/// <summary> /// Обучение сети одному образу /// </summary> /// <param name="sample"></param> /// <returns>Количество итераций для достижения заданного уровня ошибки</returns> public override int Train(Sample sample, bool parallel = true) { // Создаём "обучателя" - либо параллельного, либо последовательного ISupervisedLearning teacher; if (parallel) { teacher = new ParallelResilientBackpropagationLearning(network); } else { teacher = new ResilientBackpropagationLearning(network); } int iters = 1; while (teacher.Run(sample.input, sample.output) > desiredErrorValue) { ++iters; } return(iters); }
public EgoAlloValue(IEqualityComparer <int[]> StateComparer, IEqualityComparer <int[]> ActionComparer, List <int[]> AvailableActions, int[] StartState, params object[] parameters) : base(StateComparer, ActionComparer, AvailableActions, StartState, parameters) { if (parameters.Length > 0) { fullPredictionMode = (bool)parameters[0]; } stateComparer = StateComparer; actionComparer = ActionComparer; availableActions = AvailableActions; alloModel = new ModelBasedValue <int[], int[]>(StateComparer, ActionComparer, availableActions, StartState, true) { defaultQ = 10.3 }; egoModel = new ModelFreeValue <int[], int[]>(StateComparer, actionComparer, availableActions, StartState) { alpha = 0.9 }; network = new ActivationNetwork(new BipolarSigmoidFunction(2), 10, 10, 3); teacher = new ParallelResilientBackpropagationLearning(network); }
static void Main(string[] args) { if (Directory.Exists("./Source/Network") == false) { Directory.CreateDirectory("./Source/Network"); } cfg = new Config("Classifier.config"); log("info", "Конфиг:" + cfg.ToString()); //File.WriteAllText("Classifier.config", JSonParser.Save(cfg, typeof(Config))); System.Globalization.CultureInfo customCulture = (System.Globalization.CultureInfo)System.Threading.Thread.CurrentThread.CurrentCulture.Clone(); customCulture.NumberFormat.NumberDecimalSeparator = "."; System.Threading.Thread.CurrentThread.CurrentCulture = customCulture; // Convert the DataTable to input and output vectors foreach (var file in cfg.Input) { log("info", "Конфиг:" + cfg.ToString()); List <Tuple <double[], double> > dataset = new List <Tuple <double[], double> >(); string root = Path.GetFileNameWithoutExtension(file); using (CSVParser parser = new CSVParser().Load(file)) { string[] buff; double[] inputBuff; double outputBuff; while ((buff = parser.Next()) != null) { inputBuff = buff.Take(buff.Length - 1).ToArray().ToDouble(); outputBuff = Double.Parse(buff.Skip(buff.Length - 1).ToArray()[0]); dataset.Add(new Tuple <double[], double>(inputBuff, outputBuff)); } } dataset = dataset .Where(x => cfg.Filter.IsInside(x.Item2)) .Take(cfg.Filter.Max) .ToList(); log("info", "Конечный размер датасета:" + dataset.Count); if (cfg.Network.Shuffle) { dataset.Shuffle(); } var trainData = dataset .Take((int)(dataset.Count * (1 - cfg.Network.ValidationPercent))) .ToArray(); var validData = dataset .Skip((int)(dataset.Count * (1 - cfg.Network.ValidationPercent))) .ToArray(); var trainInput = trainData.Select(x => x.Item1).ToArray(); var trainOutput = trainData.Select(x => new double[] { x.Item2 }).ToArray(); var validInput = validData.Select(x => x.Item1).ToArray(); var validOutput = validData.Select(x => new double[] { x.Item2 }).ToArray(); var topology = new List <int>(cfg.Network.Layers) { 1 }; var network = new ActivationNetwork( new SigmoidFunction(), trainInput[0].Length, topology.ToArray()); var teacher = new ParallelResilientBackpropagationLearning(network); LogInfo current = new LogInfo() { error = double.PositiveInfinity, iteration = 0, percent = 0, validError = double.PositiveInfinity }; LogInfo better = current; double previous; do { previous = current.error; current.error = teacher.RunEpoch(trainInput, trainOutput); if (cfg.MoreInfoLog) { int[] answers = validInput.Apply(network.Compute).GetColumn(0). Apply(x => x > 0.5 ? 1 : 0); current.validError = teacher.ComputeError(validInput, validOutput); int[] outputs = validOutput.Apply(x => x[0] > 0.5 ? 1 : 0); int pos = 0; for (int j = 0; j < answers.Length; j++) { if (answers[j] == outputs[j]) { pos++; } } current.validPercent = (double)pos / (double)answers.Length; answers = trainInput.Apply(network.Compute).GetColumn(0). Apply(x => x > 0.5 ? 1 : 0); outputs = trainOutput.Apply(x => x[0] > 0.5 ? 1 : 0); pos = 0; for (int j = 0; j < answers.Length; j++) { if (answers[j] == outputs[j]) { pos++; } } current.percent = (double)pos / (double)answers.Length; log(current.iteration, current.error, current.validError, current.percent, current.validPercent); } else { smalllog(current.iteration, current.error); } if (current.error < cfg.Cancelation.Error) { break; } if (Math.Abs(previous - current.error) < cfg.Cancelation.Step) { break; } if (current.iteration == cfg.Cancelation.MaxEpoch) { break; } if (current.percent >= cfg.Validation.Percent) { break; } current.iteration++; if (better.validPercent < current.validPercent) { better = current; SaveNetwork($"Best_{root}", validInput, validOutput, network, better, root); } better.WriteTop(); } while (true); SaveNetwork(root, trainInput, trainOutput, network, current, root); } }
// Worker thread void SearchSolution() { // number of learning samples int samples = data.Length - predictionSize - windowSize; // data transformation factor double factor = 1.7 / chart.RangeY.Length; double yMin = chart.RangeY.Min; // prepare learning data double[][] input = new double[samples][]; double[][] output = new double[samples][]; for (int i = 0; i < samples; i++) { input[i] = new double[windowSize]; output[i] = new double[1]; // set input for (int j = 0; j < windowSize; j++) { input[i][j] = (data[i + j] - yMin) * factor - 0.85; } // set output output[i][0] = (data[i + windowSize] - yMin) * factor - 0.85; } // create multi-layer neural network ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(sigmoidAlphaValue), windowSize, windowSize * 2, 1); // create teacher var teacher = new ParallelResilientBackpropagationLearning(network); teacher.Reset(initialStep); // run at least one backpropagation epoch //teacher2.RunEpoch(input, output); // iterations int iteration = 1; // solution array int solutionSize = data.Length - windowSize; double[,] solution = new double[solutionSize, 2]; double[] networkInput = new double[windowSize]; // calculate X values to be used with solution function for (int j = 0; j < solutionSize; j++) { solution[j, 0] = j + windowSize; } // loop while (!needToStop) { // run epoch of learning procedure double error = teacher.RunEpoch(input, output) / samples; // calculate solution and learning and prediction errors double learningError = 0.0; double predictionError = 0.0; // go through all the data for (int i = 0, n = data.Length - windowSize; i < n; i++) { // put values from current window as network's input for (int j = 0; j < windowSize; j++) { networkInput[j] = (data[i + j] - yMin) * factor - 0.85; } // evalue the function solution[i, 1] = (network.Compute(networkInput)[0] + 0.85) / factor + yMin; // calculate prediction error if (i >= n - predictionSize) { predictionError += Math.Abs(solution[i, 1] - data[windowSize + i]); } else { learningError += Math.Abs(solution[i, 1] - data[windowSize + i]); } } // update solution on the chart chart.UpdateDataSeries("solution", solution); // set current iteration's info SetText(currentIterationBox, iteration.ToString()); SetText(currentLearningErrorBox, learningError.ToString("F3")); SetText(currentPredictionErrorBox, predictionError.ToString("F3")); // increase current iteration iteration++; // check if we need to stop if ((iterations != 0) && (iteration > iterations)) { break; } } // show new solution for (int j = windowSize, k = 0, n = data.Length; j < n; j++, k++) { AddSubItem(dataList, j, solution[k, 1].ToString()); } // enable settings controls EnableControls(true); }
// Worker thread void SearchSolution() { // number of learning samples int samples = data.GetLength(0); // data transformation factor double yFactor = 1.7 / chart.RangeY.Length; double yMin = chart.RangeY.Min; double xFactor = 2.0 / chart.RangeX.Length; double xMin = chart.RangeX.Min; // prepare learning data double[][] input = new double[samples][]; double[][] output = new double[samples][]; for (int i = 0; i < samples; i++) { input[i] = new double[1]; output[i] = new double[1]; // set input input[i][0] = (data[i, 0] - xMin) * xFactor - 1.0; // set output output[i][0] = (data[i, 1] - yMin) * yFactor - 0.85; } // create multi-layer neural network ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(sigmoidAlphaValue), 1, neuronsInFirstLayer, 1); if (useNguyenWidrow) { NguyenWidrow initializer = new NguyenWidrow(network); initializer.Randomize(); } // create teacher var teacher = new ParallelResilientBackpropagationLearning(network); // iterations int iteration = 1; // solution array double[,] solution = new double[50, 2]; double[] networkInput = new double[1]; // calculate X values to be used with solution function for (int j = 0; j < 50; j++) { solution[j, 0] = chart.RangeX.Min + (double)j * chart.RangeX.Length / 49; } // loop while (!needToStop) { // run epoch of learning procedure double error = teacher.RunEpoch(input, output) / samples; // calculate solution for (int j = 0; j < 50; j++) { networkInput[0] = (solution[j, 0] - xMin) * xFactor - 1.0; solution[j, 1] = (network.Compute(networkInput)[0] + 0.85) / yFactor + yMin; } chart.UpdateDataSeries("solution", solution); // calculate error double learningError = 0.0; for (int j = 0, k = data.GetLength(0); j < k; j++) { networkInput[0] = input[j][0]; learningError += Math.Abs(data[j, 1] - ((network.Compute(networkInput)[0] + 0.85) / yFactor + yMin)); } // set current iteration's info SetText(currentIterationBox, iteration.ToString()); SetText(currentErrorBox, learningError.ToString("F3")); // increase current iteration iteration++; // check if we need to stop if ((iterations != 0) && (iteration > iterations)) { break; } } // enable settings controls EnableControls(true); }
private void trainToolStripMenuItem_Click_1(object sender, EventArgs e) { ImageTrainDataSet testDataSet = FeatureDetector.GetAllTrainImageData(testDataPath, configure.trainFolders); testDataSet.flgCache = true; int[] labelIndexs = trainData.GetLabelIndexs(); String[] labels = trainData.GetLabels(); var bow = Accord.IO.Serializer.Load <BagOfVisualWords>(dataPath + String.Format(@"\train-{0}.bow", bowSize)); double[][] features = trainData.GetFeature(bow, mask); int numOutput = trainData.GetNumOutput(); var function = new SigmoidFunction(); logger.logStr("Start Training"); bool flgFound = false; int count = 0; while ((flgFound == false) && (count < 100)) { count++; var network = new ActivationNetwork(function, bow.NumberOfOutputs, 20, numOutput); new NguyenWidrow(network).Randomize(); var teacher = new ParallelResilientBackpropagationLearning(network); BowImageClassifier trainImgClassifier = new BowImageClassifier(); trainImgClassifier.Init(bow, network, mask); //creat output double[][] outputs = trainData.GetOutputs(numOutput); double avgError = 10000.0; double prevError = avgError; double bestError = avgError; int errorCount = 0; while ((errorCount < 3) && (avgError > 0.00001)) { //Application.DoEvents(); double[] errors = new double[10]; for (int i = 0; i < 10; i++) { errors[i] = teacher.RunEpoch(features, outputs); } avgError = errors.Average(); if (prevError > avgError) { int trainError = trainImgClassifier.Evaluate(trainData); int testError = trainImgClassifier.Evaluate(testData); int testSetError = trainImgClassifier.Evaluate(testDataSet); logger.logStr(String.Format("{0} {1} {2} {3} {4} #{5}", avgError, prevError, trainError, testError, testSetError, errorCount)); prevError = avgError; //save best error if (bestError > avgError) { bestError = avgError; Accord.IO.Serializer.Save(network, dataPath + String.Format(@"\train-{0}.net", bow.NumberOfOutputs)); } if (trainError + testError + testSetError == 0) { flgFound = true; Accord.IO.Serializer.Save(network, dataPath + String.Format(@"\train-{0}.net", bow.NumberOfOutputs)); break; } } else { logger.logStr(String.Format("{0}", avgError)); prevError = 10000.0; errorCount++; } Application.DoEvents(); } logger.logStr("Done " + bestError + " " + count); } }
static void Main(string[] args) { int VERBOSITY = 0; StreamWriter logFile = new StreamWriter("logFileParallel.csv"); logFile.WriteLine("maze,trainingSet,trainSetSize,testSet,performance,lengthPerformance,trainingError,sizeOfSetUniqueElements"); for (int run = 0; run < 10; run++) { Console.WriteLine("****************************************************RUN NUMBER " + run); //--------------------------------------------- CREATE MAZE --------------------------------------------- Maze maze = null; string mazeType = ""; Dictionary <string, List <Sequence> > setsToUse = new Dictionary <string, List <Sequence> >(); for (int mazeToUse = 1; mazeToUse < 2; mazeToUse++) { if (mazeToUse == 0) { Generate_T(out maze, out setsToUse); mazeType = "Maze-T"; } if (mazeToUse == 1) { Generate_21(out maze, out setsToUse); mazeType = "Maze-21"; } if (mazeToUse == 2) { Generate_25(out maze, out setsToUse); mazeType = "Maze-25"; } //double randomPathQuality = 0.0; //ComputePathQualityRandom(maze, out randomPathQuality, 1000); //Console.WriteLine("Random Path Quality = " + randomPathQuality.ToString("N2")); //Console.ReadKey(); //Loop through all the training sets foreach (string trainSetName in setsToUse.Keys) { Console.WriteLine("------------------"); Console.WriteLine(mazeType + " " + trainSetName); List <Sequence> setToUse = setsToUse[trainSetName]; //--------------------------------------------- PREPARE DATA --------------------------------------------- bool forceBidirectionality = true; List <Triplet> triplets = GetTripletsFromSequences(setToUse, forceBidirectionality); if (VERBOSITY > 1) { Console.WriteLine("\n---------------------------------------------------------"); Console.WriteLine("TRAINING SET : " + trainSetName); Console.WriteLine("---------------------------------------------------------"); Console.WriteLine("Training set elements"); foreach (Triplet item in triplets) { Console.WriteLine(item.ToString()); } Console.WriteLine("---------------------------------------------------------"); } double[][] input; double[][] output; GetTrainingSet(maze, triplets, out input, out output); //FOR AUTOASSOCIATION //double[][] io; //GetTrainingSet(maze, triplets, out io); //--------------------------------------------- CREATE NETWORK --------------------------------------------- //Create the network & train //var function = new BipolarSigmoidFunction(); var function = new SigmoidFunction(2.0); ActivationNetwork goalNetwork = goalNetwork = new ActivationNetwork(function, 2 * maze.StatesCount, 20, maze.StatesCount); ParallelResilientBackpropagationLearning goalTeacher = new ParallelResilientBackpropagationLearning(goalNetwork); //BackPropagationLearning goalTeacher = new BackPropagationLearning(goalNetwork); int epoch = 0; double stopError = 0.1; int resets = 0; double minimumErrorReached = double.PositiveInfinity; while (minimumErrorReached > stopError && resets < 5) { goalNetwork.Randomize(); goalTeacher.Reset(0.0125); double error = double.PositiveInfinity; for (epoch = 0; epoch < 500 && error > stopError; epoch++) { error = goalTeacher.RunEpoch(input, output); //Console.WriteLine("Epoch " + epoch + " = \t" + error); if (error < minimumErrorReached) { minimumErrorReached = error; goalNetwork.Save("goalNetwork.mlp"); } } //Console.Write("Reset (" + error+")->"); Console.Write(".(" + error.ToString("N2") + ") "); resets++; } Console.WriteLine(); //Console.WriteLine("Best error obtained =" + minimumErrorReached); goalNetwork = ActivationNetwork.Load("goalNetwork.mlp") as ActivationNetwork; if (VERBOSITY > 0) { GenerateReport(maze, triplets, goalNetwork); } //--------------------------------------------- TEST --------------------------------------------- //Console.WriteLine("Finding paths..."); double score, lengthScore; int totalElements; double[,] pathMatrix; ComputePathMatrix(maze, goalNetwork, out score, out lengthScore, out pathMatrix, trainSetName, mazeType); //totalElements = maze.StatesCount * maze.StatesCount - maze.StatesCount; //Console.WriteLine("Success over whole input space = " + score.ToString("N2") + "% and lengthScore=" + lengthScore.ToString("N2") + " over " + totalElements + "elements"); //logFile.WriteLine(mazeType + "," + trainSetName + "," + triplets.Count + "," + "whole-input-space" + "," + score + "," + lengthScore + "," + minimumErrorReached + "," + totalElements); List <Triplet> setToEvaluate = triplets; EvaluateSpecificSet(maze, pathMatrix, setToEvaluate, out score, out lengthScore, out totalElements); Console.WriteLine("Success percentage over training set = " + score.ToString("N2") + "% and lengthScore=" + lengthScore.ToString("N2") + " over " + totalElements + "elements"); logFile.WriteLine(mazeType + "," + trainSetName + "," + triplets.Count + "," + "training-set" + "," + score + "," + lengthScore + "," + minimumErrorReached + "," + totalElements); EvaluateWithoutSpecificSet(maze, pathMatrix, setToEvaluate, out score, out lengthScore, out totalElements); Console.WriteLine("Success percentage over generalization set = " + score.ToString("N2") + "% and lengthScore=" + lengthScore.ToString("N2") + " over " + totalElements + "elements"); logFile.WriteLine(mazeType + "," + trainSetName + "," + triplets.Count + "," + "generalization-set" + "," + score + "," + lengthScore + "," + minimumErrorReached + "," + totalElements); //setToEvaluate = GenerateTestSet_1LengthPath(maze); //EvaluateSpecificSet(maze, pathMatrix, setToEvaluate, out score, out lengthScore, out totalElements); //Console.WriteLine("Success percentage over 1-length set = " + score.ToString("N2") + "% and lengthScore=" + lengthScore.ToString("N2") + " over " + totalElements + "elements"); //logFile.WriteLine(mazeType + "," + trainSetName + "," + triplets.Count + "," + "length-1-sequences" + "," + score + "," + lengthScore + "," + minimumErrorReached + "," + totalElements); logFile.Flush(); //Console.WriteLine("Finding paths, over."); //Console.ReadKey(); } } } logFile.Close(); Console.ReadKey(); }
public void MulticlassTest1() { Accord.Math.Tools.SetupGenerator(0); // Suppose we would like to teach a network to recognize // the following input vectors into 3 possible classes: // double[][] inputs = { new double[] { 0, 1, 1, 0 }, // 0 new double[] { 0, 1, 0, 0 }, // 0 new double[] { 0, 0, 1, 0 }, // 0 new double[] { 0, 1, 1, 0 }, // 0 new double[] { 0, 1, 0, 0 }, // 0 new double[] { 1, 0, 0, 0 }, // 1 new double[] { 1, 0, 0, 0 }, // 1 new double[] { 1, 0, 0, 1 }, // 1 new double[] { 0, 0, 0, 1 }, // 1 new double[] { 0, 0, 0, 1 }, // 1 new double[] { 1, 1, 1, 1 }, // 2 new double[] { 1, 0, 1, 1 }, // 2 new double[] { 1, 1, 0, 1 }, // 2 new double[] { 0, 1, 1, 1 }, // 2 new double[] { 1, 1, 1, 1 }, // 2 }; int[] classes = { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, }; // First we have to convert this problem into a way that the neural // network can handle. The first step is to expand the classes into // indicator vectors, where a 1 into a position signifies that this // position indicates the class the sample belongs to. // double[][] outputs = Statistics.Tools.Expand(classes, -1, +1); // Create an activation function for the net var function = new BipolarSigmoidFunction(); // Create an activation network with the function and // 4 inputs, 5 hidden neurons and 3 possible outputs: var network = new ActivationNetwork(function, 4, 5, 3); // Randomly initialize the network new NguyenWidrow(network).Randomize(); // Teach the network using parallel Rprop: var teacher = new ParallelResilientBackpropagationLearning(network); double error = 1.0; while (error > 1e-5) { error = teacher.RunEpoch(inputs, outputs); } // Checks if the network has learned for (int i = 0; i < inputs.Length; i++) { double[] answer = network.Compute(inputs[i]); int expected = classes[i]; int actual; answer.Max(out actual); Assert.AreEqual(expected, actual, 0.01); } }
//開始學習 public bool Run() { bool IsDone = false; try { FlowDatas db = new FlowDatas(); (double[][] Inputs, double[][] Outputs) = DeepLearningTools.FlowSampleToLearningData(db.FlowSampleStatistics.Where(c => c.BehaviorNumber != 0).ToArray()); db.Dispose(); //產生DBN網路 DBNetwork = new DeepBeliefNetwork(Inputs.First().Length, (int)((Inputs.First().Length + Outputs.First().Length) / 1.5), (int)((Inputs.First().Length + Outputs.First().Length) / 2), Outputs.First().Length); //亂數打亂整個網路參數 new GaussianWeights(DBNetwork, 0.1).Randomize(); DBNetwork.UpdateVisibleWeights(); //設定無監督學習組態 DeepBeliefNetworkLearning teacher = new DeepBeliefNetworkLearning(DBNetwork) { Algorithm = (h, v, i) => new ContrastiveDivergenceLearning(h, v) { LearningRate = 0.01, Momentum = 0.5, Decay = 0.001, } }; //設置批量輸入學習。 int batchCount1 = Math.Max(1, Inputs.Length / 10); //創建小批量加速學習。 int[] groups1 = Accord.Statistics.Classes.Random(Inputs.Length, batchCount1); double[][][] batches = Inputs.Subgroups(groups1); //學習指定圖層的數據。 double[][][] layerData; //運行無監督學習。 for (int layerIndex = 0; layerIndex < DBNetwork.Machines.Count - 1; layerIndex++) { teacher.LayerIndex = layerIndex; layerData = teacher.GetLayerInput(batches); for (int i = 0; i < 200; i++) { double error = teacher.RunEpoch(layerData) / Inputs.Length; if (i % 10 == 0) { Console.WriteLine(i + ", Error = " + error); } } } //對整個網絡進行監督學習,提供輸出分類。 var teacher2 = new ParallelResilientBackpropagationLearning(DBNetwork); double error1 = double.MaxValue; //運行監督學習。 for (int i = 0; i < 500; i++) { error1 = teacher2.RunEpoch(Inputs, Outputs) / Inputs.Length; Console.WriteLine(i + ", Error = " + error1); DBNetwork.Save(Path); Console.WriteLine("Save Done"); } DBNetwork.Save(Path); Console.WriteLine("Save Done"); IsDone = true; } catch (Exception ex) { Debug.Write(ex.ToString()); } return(IsDone); }
/// <summary> /// The main application entry point. /// </summary> /// <param name="args">Command line arguments.</param> public static void Main(string[] args) { // get data Console.WriteLine("Loading data...."); var path = Path.GetFullPath(Path.Combine(AppDomain.CurrentDomain.BaseDirectory, @"..\..\..\..\california_housing.csv")); var housing = Frame.ReadCsv(path, separators: ","); housing = housing.Where(kv => ((decimal)kv.Value["median_house_value"]) < 500000); // shuffle the frame var rnd = new Random(); var indices = Enumerable.Range(0, housing.Rows.KeyCount).OrderBy(v => rnd.NextDouble()); housing = housing.IndexRowsWith(indices).SortRowsByKey(); // convert the house value range to thousands housing["median_house_value"] /= 1000; // create training, validation, and test partitions var training = housing.Rows[Enumerable.Range(0, 12000)]; var validation = housing.Rows[Enumerable.Range(12000, 2500)]; var test = housing.Rows[Enumerable.Range(14500, 2500)]; // set up model columns var columns = new string[] { "latitude", "longitude", "housing_median_age", "total_rooms", "total_bedrooms", "population", "households", "median_income" }; // build a neural network var network = new ActivationNetwork( new RectifiedLinearFunction(), // ReLU activation 8, // number of input features 8, // hidden layer with 8 nodes 1); // output layer with 1 node // set up a backpropagation learner var learner = new ParallelResilientBackpropagationLearning(network); // prep training feature and label arrays var features = training.Columns[columns].ToArray2D <double>().ToJagged(); var labels = (from v in training["median_house_value"].Values select new double[] { v }).ToArray(); // prep validation feature and label arrays var features_v = validation.Columns[columns].ToArray2D <double>().ToJagged(); var labels_v = (from v in validation["median_house_value"].Values select new double[] { v }).ToArray(); // randomize the network new GaussianWeights(network, 0.1).Randomize(); // train the neural network var errors = new List <double>(); var errors_v = new List <double>(); for (var epoch = 0; epoch < 100; epoch++) { learner.RunEpoch(features, labels); var rmse = Math.Sqrt(learner.ComputeError(features, labels) / labels.GetLength(0)); var rmse_v = Math.Sqrt(learner.ComputeError(features_v, labels_v) / labels_v.GetLength(0)); errors.Add(rmse); errors_v.Add(rmse_v); Console.WriteLine($"Epoch: {epoch}, Training RMSE: {rmse}, Validation RMSE: {rmse_v}"); } // plot the training curve Plot(errors, "Training", "Epoch", "RMSE"); // plot the training and validation curves Plot(errors, errors_v, "Training and validation", "Epoch", "RMSE"); Console.ReadLine(); }
private static void NeuralNetworkLearningSingleAttributes(LearningData learningData) { var stopWatch = new Stopwatch(); stopWatch.Start(); var testMatcher = new LoggingNeuralNetworkMatcher(learningData.TestData); var trainingMatcher = new LoggingNeuralNetworkMatcher(learningData.TrainingData); Parallel.ForEach(learningData.ActualMetadata.Keys, metadataKey => { var metadata = new Dictionary <string, IndexableAttributeMetadata> { { metadataKey, learningData.ActualMetadata[metadataKey] } }; var trainingInputs = learningData.TrainingData.Select(data => data.ToVectorArray(metadata)).ToArray(); var trainingOutputs = learningData.TrainingData.Select(data => new[] { data.PercentMatch }).ToArray(); var testInputs = learningData.TestData.Select(data => data.ToVectorArray(metadata)).ToArray(); var testOutputs = learningData.TestData.Select(data => new[] { data.PercentMatch }).ToArray(); if (testInputs.Length != testOutputs.Length || trainingInputs.Length != trainingOutputs.Length) { throw new ArgumentException("Inputs and outputs data are not the same size"); } var vectorSize = trainingInputs.First().Length; if (trainingInputs.Any(input => input.Length != vectorSize)) { throw new ArgumentException("Not all trainingInputs have the same vector size"); } if (testInputs.Any(input => input.Length != vectorSize)) { throw new ArgumentException("Not test inputs have the correct vector size"); } var results = new List <Tuple <int[], double, double> >(); Parallel.For(0, 16, i => { var parameters = new[] { i, 1 }; var network = new ActivationNetwork(new BipolarSigmoidFunction(), trainingInputs[0].Length, parameters); //new DeepBeliefNetwork(); var teacher = new ParallelResilientBackpropagationLearning(network); var random = new Random(); var error = double.MaxValue; var iteration = 0; while (error > 0.0005 && iteration < 200) { iteration++; //for (var i = 0; i < 10; i++) { //* var pair = random.Next(0, trainingInputs.Length - 1); error = teacher.Run(trainingInputs[pair], trainingOutputs[pair]); //*/ /* * error = teacher.RunEpoch(trainingInputs, trainingOutputs); * //*/ var accuracyRecallPrecision = trainingMatcher.MatchCount(network, metadata, new List <string>()); error = 3 - accuracyRecallPrecision.Item1 - accuracyRecallPrecision.Item2 - accuracyRecallPrecision.Item3; } if (iteration % 100 == 0) { Logger.DebugFormat("NeuralNetwork: Iteration {0} Error {1}", iteration, error); } } var inSampleError = teacher.ComputeError(trainingInputs, trainingOutputs); var outOfSampleError = teacher.ComputeError(testInputs, testOutputs); lock (results) { results.Add(new Tuple <int[], double, double>(parameters, inSampleError, outOfSampleError)); } testMatcher.LogMatchCount(string.Format("{0} ({1})", metadataKey, learningData.ActualMetadata[metadataKey].Attribute.GetType().FullName), network, metadata, new List <string>()); }); Logger.InfoFormat("Results for {1} ({2}):\n{0}", string.Join(", ", results.Select(result => $"{string.Join("-", result.Item1)}: In: {result.Item2} Out: {result.Item3}")), metadataKey, learningData.ActualMetadata[metadataKey].Attribute.GetType().FullName); }); stopWatch.Stop(); Logger.InfoFormat("Neural Network learning (single attribute) took {0}", stopWatch.Elapsed); }
// Worker thread void SearchSolution() { // initialize input and output values double[][] input = null; double[][] output = null; if (sigmoidType == 0) { // unipolar data input = new double[4][] { new double[] { 0, 0 }, new double[] { 0, 1 }, new double[] { 1, 0 }, new double[] { 1, 1 } }; output = new double[4][] { new double[] { 0 }, new double[] { 1 }, new double[] { 1 }, new double[] { 0 } }; } else { // bipolar data input = new double[4][] { new double[] { -1, -1 }, new double[] { -1, 1 }, new double[] { 1, -1 }, new double[] { 1, 1 } }; output = new double[4][] { new double[] { -1 }, new double[] { 1 }, new double[] { 1 }, new double[] { -1 } }; } // create neural network ActivationNetwork network = new ActivationNetwork( (sigmoidType == 0) ? (IActivationFunction) new SigmoidFunction(sigmoidAlphaValue) : (IActivationFunction) new BipolarSigmoidFunction(sigmoidAlphaValue), 2, 2, 1); // create teacher var teacher = new ParallelResilientBackpropagationLearning(network); // set learning rate and momentum teacher.Reset(initialStep); // iterations int iteration = 0; // statistic files StreamWriter errorsFile = null; try { // check if we need to save statistics to files if (saveStatisticsToFiles) { // open files errorsFile = File.CreateText("errors.csv"); } // erros list ArrayList errorsList = new ArrayList(); // loop while (!needToStop) { // run epoch of learning procedure double error = teacher.RunEpoch(input, output); errorsList.Add(error); // save current error if (errorsFile != null) { errorsFile.WriteLine(error); } // show current iteration & error SetText(currentIterationBox, iteration.ToString()); SetText(currentErrorBox, error.ToString()); iteration++; // check if we need to stop if (error <= learningErrorLimit) { break; } } // show error's dynamics double[,] errors = new double[errorsList.Count, 2]; for (int i = 0, n = errorsList.Count; i < n; i++) { errors[i, 0] = i; errors[i, 1] = (double)errorsList[i]; } errorChart.RangeX = new Range(0, errorsList.Count - 1); errorChart.UpdateDataSeries("error", errors); } catch (IOException) { MessageBox.Show("Failed writing file", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error); } finally { // close files if (errorsFile != null) { errorsFile.Close(); } } // enable settings controls EnableControls(true); }
public void SearchSolution() { if (amostras_treinamento == null || !amostras_treinamento.Any()) { MessageBox.Show("É necessário carregar os dados de treinamento!"); return; } else { erros.Clear(); double alfaSigmoide; double erro_desejado; int numNeuronios; try { alfaSigmoide = Math.Max(0.001, Math.Min(50, double.Parse(textBox12.Text))); } catch { alfaSigmoide = 2; } try { numNeuronios = Math.Max(1, Math.Min(1000, int.Parse(textBox13.Text))); } catch { numNeuronios = 5; } try { erro_desejado = Math.Max(0, Math.Min(100, double.Parse(textBox18.Text))); } catch { erro_desejado = 1; } SetText(textBox12, alfaSigmoide.ToString()); SetText(textBox13, numNeuronios.ToString()); SetText(textBox18, erro_desejado.ToString()); RNA = new ActivationNetwork(new BipolarSigmoidFunction(alfaSigmoide), 9, numNeuronios, 1); new NguyenWidrow(RNA).Randomize(); var teacher = new ParallelResilientBackpropagationLearning(RNA); double error = double.PositiveInfinity; double previous; double[][] input = new double[amostras_treinamento.Count][]; double[][] output = new double[amostras_treinamento.Count][]; int i = 0; foreach (Amostra_Paciente amostra in amostras_treinamento) { double[] temp = new double[] { amostra.label }; input[i] = amostra.arrayDados; output[i] = temp; i++; } int iter = 0; do { previous = error; error = teacher.RunEpoch(input, output); SetText(textBox15, error.ToString()); SetText(textBox14, iter.ToString()); iter++; }while (error > erro_desejado); erros.Clear(); string erro_string = "Resultados Treinamento\r\n"; i = 0; foreach (Amostra_Paciente amostra in amostras_treinamento) { double[] resultado = RNA.Compute(amostra.arrayDados); if (resultado[0] > 1) { resultado[0] = 1; } else if (resultado[0] < 0) { resultado[0] = 0; } double erro = Math.Abs(amostra.label - resultado[0]); erros.Add(erro); i++; } erro_string += "Taxa de acerto: " + ((1 - erros.Average()) * 100); MessageBox.Show(erro_string); } }
// Worker thread void SearchSolution() { // number of learning samples int samples = data.GetLength(0); // prepare learning data DoubleRange unit = new DoubleRange(-1, 1); double[][] input = data.GetColumn(0).Scale(fromRange: xRange, toRange: unit).ToJagged(); double[][] output = data.GetColumn(1).Scale(fromRange: yRange, toRange: unit).ToJagged(); // create multi-layer neural network ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(sigmoidAlphaValue), 1, neuronsInFirstLayer, 1); if (useNguyenWidrow) { new NguyenWidrow(network).Randomize(); } // create teacher var teacher = new ParallelResilientBackpropagationLearning(network); // iterations int iteration = 1; // solution array double[,] solution = new double[samples, 2]; // loop while (!needToStop) { // run epoch of learning procedure double error = teacher.RunEpoch(input, output) / samples; // calculate solution for (int j = 0; j < samples; j++) { double x = input[j][0]; double y = network.Compute(new[] { x })[0]; solution[j, 0] = x.Scale(fromRange: unit, toRange: xRange); solution[j, 1] = y.Scale(fromRange: unit, toRange: yRange); } chart.UpdateDataSeries("solution", solution); // calculate error double learningError = 0.0; for (int j = 0; j < samples; j++) { double x = input[j][0]; double expected = data[j, 1]; double actual = network.Compute(new[] { x })[0]; learningError += Math.Abs(expected - actual); } // set current iteration's info SetText(currentIterationBox, iteration.ToString()); SetText(currentErrorBox, learningError.ToString("F3")); // increase current iteration iteration++; // check if we need to stop if ((iterations != 0) && (iteration > iterations)) { break; } } // enable settings controls EnableControls(true); }
private bool Train(dynamic bow, SceneFeatureData scenefeatureData) { Bitmap mask = Utils.CreateMaskBitmap(new Size(1280, 720), new Rectangle[] { scenefeatureData.feature.area }); mask.Save(scenefeatureData.feature.name + "-mask.png"); var trainData = scenefeatureData.trainData; int[] labelIndexs = trainData.GetLabelIndexs(); String[] labels = trainData.GetLabels(); double[][] features = trainData.GetFeature(bow, mask); int numOutput = trainData.GetNumOutput(); var function = new SigmoidFunction(); bool flgFound = false; int count = 0; while ((flgFound == false) && (count < 100)) { count++; var network = new ActivationNetwork(function, bow.NumberOfOutputs, 20, numOutput); new NguyenWidrow(network).Randomize(); var teacher = new ParallelResilientBackpropagationLearning(network); BowImageClassifier trainImgClassifier = new BowImageClassifier(); trainImgClassifier.Init(bow, network, mask); //creat output double[][] outputs = trainData.GetOutputs(numOutput); double avgError = 10000.0; double prevError = avgError; double bestError = avgError; int errorCount = 0; while ((errorCount < 3) && (avgError > 0.00001)) { //Application.DoEvents(); double[] errors = new double[10]; for (int i = 0; i < 10; i++) { errors[i] = teacher.RunEpoch(features, outputs); } avgError = errors.Average(); if (prevError > avgError) { int trainError = trainImgClassifier.Evaluate(trainData); //int testError = trainImgClassifier.Evaluate(testData); //int testSetError = trainImgClassifier.Evaluate(testDataSet); logger.logStr(String.Format("{0} {1} {2}", avgError, prevError, trainError)); prevError = avgError; //save best error if (bestError > avgError) { bestError = avgError; //Accord.IO.Serializer.Save(network, dataPath + String.Format(@"\train-{0}.net", bow.NumberOfOutputs)); } if (trainError /*+ testError + testSetError*/ == 0) { Accord.IO.Serializer.Save(network, path + @"\" + scenefeatureData.feature.name + String.Format(@"\train-{0}.net", bow.NumberOfOutputs)); logger.logStr("Done " + bestError + " " + trainError); return(true); } } else { logger.logStr(String.Format("{0}", avgError)); prevError = 10000.0; errorCount++; } //Application.DoEvents(); } logger.logStr("Done " + bestError + " " + count); } return(false); }