public AdvancedNeuralNetwork(string[] inputFieldNames, string outputFieldName, int[] neuronsCount, double learningRate = 0.1, double sigmoidAlphaValue = 2, bool useRegularization = false, bool useNguyenWidrow = false, bool useSameWeights = false, JacobianMethod method = JacobianMethod.ByBackpropagation) { this.neuronsCount = neuronsCount; this.learningRate = learningRate; this.useRegularization = useRegularization; this.useNguyenWidrow = useNguyenWidrow; this.useSameWeights = useSameWeights; this.method = method; this.sigmoidAlphaValue = sigmoidAlphaValue; this.inputFieldNames = inputFieldNames; this.outputFieldName = outputFieldName; // create multi-layer neural network theNetwork = new ActivationNetwork( new BipolarSigmoidFunction(sigmoidAlphaValue), //Andere Function möglich??? inputFieldNames.Length, neuronsCount); if (useNguyenWidrow) { if (useSameWeights) { Accord.Math.Random.Generator.Seed = 0; } NguyenWidrow initializer = new NguyenWidrow(theNetwork); initializer.Randomize(); } // create teacher teacher = new LevenbergMarquardtLearning(theNetwork, useRegularization, method); // set learning rate and momentum teacher.LearningRate = learningRate; }
public void configuraRede() { //Se houver algum 0 na lista de neurônios, remover. neurons.Remove(0); //O número de camadas é igual a quantidade de int camadaIntermediaria = neurons.Count; int[] camadas = new int[camadaIntermediaria + 1]; for (int i = 0; i < camadaIntermediaria; i++) { camadas[i] = neurons[i]; } camadas[camadaIntermediaria] = predictionSize; network = new ActivationNetwork(new BipolarSigmoidFunction(2.0), windowSize + predictionSize * 6, camadas); //randomizar os pesos NguyenWidrow weightRandom = new NguyenWidrow(network); for (int i = 0; i < network.Layers.Length; i++) { weightRandom.Randomize(i); } }
public void ExecuteFold(int k) { int LengthOfInput = this.FormattedData[0].Input.Count(); int LengthOfOutput = this.FormattedData[0].Output.Count(); ActivationNetwork NeuralNetwork = new ActivationNetwork( new SigmoidFunction(2), LengthOfInput, this.NumberOfHiddenLayerNeurons(LengthOfInput, LengthOfOutput), LengthOfOutput); NguyenWidrow weights = new NguyenWidrow(NeuralNetwork); weights.Randomize(); ResilientBackpropagationLearning BackProp = new ResilientBackpropagationLearning(NeuralNetwork); BackProp.LearningRate = this.LearningRate; //BackProp.Momentum = 0.5; List <NetIO> TrainingData = new List <NetIO>(); List <NetIO> ValidationData = new List <NetIO>(); ReadWrite.RemoveKFold(this.FormattedData, ref TrainingData, ref ValidationData, k); // for each epoch int epoch = 0; int maxEpochs = int.MaxValue; EarlyStoppingTools netError = new EarlyStoppingTools(this.patience); do { ++epoch; double internalError = BackProp.RunEpoch(TrainingData.Select(l => l.Input.ToArray()).ToArray(), TrainingData.Select(l => l.Output.ToArray()).ToArray()); this.RssError = EarlyStoppingTools.RssError(NeuralNetwork, ValidationData.Select(l => l.Input.ToArray()).ToArray(), ValidationData.Select(l => l.Output.ToArray()).ToArray()); //Console.WriteLine("Epochs: " + epoch); //Console.WriteLine("Training error: " + internalError); //Console.WriteLine("CV Error: " + this.RssError); } while (!netError.ExceedsPatience(RssError) && epoch < maxEpochs); Console.Write("Target: "); ValidationData[0].Output.ForEach(i => Console.Write(i)); Console.WriteLine(); Console.WriteLine("Result: " + string.Join(",", NeuralNetwork.Compute(ValidationData[0].Input.ToArray()))); this.NumberOfEpochs = epoch; Console.WriteLine("Epochs required: " + epoch); Console.WriteLine("Error: " + RssError); }
public Network(int input_sz, int output_sz = 10) { input_size = input_sz; output_size = output_sz; net = new ActivationNetwork(new Accord.Neuro.BipolarSigmoidFunction(), input_size, input_size * 3, input_size * 2, input_size, 100, output_size); backprop = new ParallelResilientBackpropagationLearning(net); nguen = new NguyenWidrow(net); nguen.Randomize(); }
private void button4_Click(object sender, EventArgs e) { var result = true; if (Global.Model != null) { result = (MessageBox.Show("Current model is not saved. Continue?", "Network not saved", MessageBoxButtons.YesNo) == DialogResult.Yes); Global.ModelFile = null; } if (result) { string[] temp = txNodes.Text.Split('-'); List <int> listNeurons = new List <int>(); foreach (var str in temp) { int neuronsCount = 1; neuronsCount = Math.Max(neuronsCount, 1); if (Int32.TryParse(str, out neuronsCount)) { listNeurons.Add(neuronsCount); } } listNeurons.Add(1); // last layer if (listNeurons.Count == 1) { return; } Global.Model = new ActivationNetwork(new SigmoidFunction(), Global.FeaturesCount, listNeurons.ToArray()); //Global.Model = new ActivationNetwork(new IdentityFunction(), // Global.FeaturesCount, neuronsCount, 1); NguyenWidrow initializer = new NguyenWidrow(Global.Model); initializer.Randomize(); } ResetTrainingButtons(); }
/// <summary> /// <inheritdoc /> /// </summary> public override void Train() { var inputs = data.GetSelectedInput(features); var outputs = data.GetExpectedClassificationOutput(); network = new ActivationNetwork(new SigmoidFunction(), inputs[0].Length, 25, 1); var initialization = new NguyenWidrow(network); initialization.Randomize(); var teacher = new ResilientBackpropagationLearning(network); var NetworkOutputs = new double[inputs.Length][]; for (int i = 0; i < NetworkOutputs.Length; i++) { NetworkOutputs[i] = new double[1] { outputs[i] }; } double error = double.PositiveInfinity; int epoch = 0; while (error > 2.5 && epoch < 5000) { error = teacher.RunEpoch(inputs, NetworkOutputs); epoch++; } Save(); }
// Worker thread void SearchSolution() { // number of learning samples int samples = sourceMatrix.GetLength(0); // prepare learning data double[][] inputs = sourceMatrix.Submatrix(null, 0, 1).ToArray(); double[][] outputs = sourceMatrix.GetColumn(2).Transpose().ToArray(); // create multi-layer neural network ann = new ActivationNetwork( new BipolarSigmoidFunction(sigmoidAlphaValue), 2, neuronsInFirstLayer, 1); if (useNguyenWidrow) { if (useSameWeights) { Accord.Math.Tools.SetupGenerator(0); } NguyenWidrow initializer = new NguyenWidrow(ann); initializer.Randomize(); } // create teacher LevenbergMarquardtLearning teacher = new LevenbergMarquardtLearning(ann, useRegularization); // set learning rate and momentum teacher.LearningRate = learningRate; // iterations iteration = 1; var ranges = Matrix.Range(sourceMatrix); double[][] map = Matrix.Mesh(ranges[0], ranges[1], 0.05, 0.05); var sw = Stopwatch.StartNew(); // loop while (!needToStop) { // run epoch of learning procedure error = teacher.RunEpoch(inputs, outputs) / samples; var result = map.Apply(ann.Compute).GetColumn(0).Apply(Math.Sign); var graph = map.ToMatrix().InsertColumn(result.ToDouble()); CreateScatterplot(zedGraphControl2, graph); // increase current iteration iteration++; elapsed = sw.Elapsed; updateStatus(); // check if we need to stop if ((iterations != 0) && (iteration > iterations)) { break; } } sw.Stop(); // enable settings controls EnableControls(true); }
// Worker thread void SearchSolution() { // number of learning samples int samples = data.GetLength(0); // data transformation factor double yFactor = 1.7 / chart.RangeY.Length; double yMin = chart.RangeY.Min; double xFactor = 2.0 / chart.RangeX.Length; double xMin = chart.RangeX.Min; // prepare learning data double[][] input = new double[samples][]; double[][] output = new double[samples][]; for (int i = 0; i < samples; i++) { input[i] = new double[1]; output[i] = new double[1]; // set input input[i][0] = (data[i, 0] - xMin) * xFactor - 1.0; // set output output[i][0] = (data[i, 1] - yMin) * yFactor - 0.85; } // create multi-layer neural network ActivationNetwork network = new ActivationNetwork( new BipolarSigmoidFunction(sigmoidAlphaValue), 1, neuronsInFirstLayer, 1); if (useNguyenWidrow) { NguyenWidrow initializer = new NguyenWidrow(network); initializer.Randomize(); } // create teacher var teacher = new ParallelResilientBackpropagationLearning(network); // iterations int iteration = 1; // solution array double[,] solution = new double[50, 2]; double[] networkInput = new double[1]; // calculate X values to be used with solution function for (int j = 0; j < 50; j++) { solution[j, 0] = chart.RangeX.Min + (double)j * chart.RangeX.Length / 49; } // loop while (!needToStop) { // run epoch of learning procedure double error = teacher.RunEpoch(input, output) / samples; // calculate solution for (int j = 0; j < 50; j++) { networkInput[0] = (solution[j, 0] - xMin) * xFactor - 1.0; solution[j, 1] = (network.Compute(networkInput)[0] + 0.85) / yFactor + yMin; } chart.UpdateDataSeries("solution", solution); // calculate error double learningError = 0.0; for (int j = 0, k = data.GetLength(0); j < k; j++) { networkInput[0] = input[j][0]; learningError += Math.Abs(data[j, 1] - ((network.Compute(networkInput)[0] + 0.85) / yFactor + yMin)); } // set current iteration's info SetText(currentIterationBox, iteration.ToString()); SetText(currentErrorBox, learningError.ToString("F3")); // increase current iteration iteration++; // check if we need to stop if ((iterations != 0) && (iteration > iterations)) { break; } } // enable settings controls EnableControls(true); }
public void SearchSolution() { int length = tmp.Count; double[][] inputs; double[][] outputs; double[][] matrix; GetData(out inputs, out outputs, out matrix, tmp); // create multi-layer neural network this.ann = new ActivationNetwork(new BipolarSigmoidFunction(sigmoidAlphaValue), 9, neuronsInFirstLayer, 1); if (useNguyenWidrow) { if (useSameWeights) { Accord.Math.Random.Generator.Seed = 1; } NguyenWidrow initializer = new NguyenWidrow(ann); initializer.Randomize(); } // create teacher LevenbergMarquardtLearning teacher = new LevenbergMarquardtLearning(ann, useRegularization); // set learning rate and momentum teacher.LearningRate = learningRate; // iterations iteration = 1; var ranges = matrix.GetRange(0); double[][] map = Matrix.Mesh(ranges[0], 200, ranges[1], 200); // var sw = Stopwatch.StartNew(); // loop while (true) { // run epoch of learning procedure error = teacher.RunEpoch(inputs, outputs) / length; var result = map.Apply(ann.Compute).GetColumn(0).Apply(Math.Sign); var graph = map.ToMatrix().InsertColumn(result.ToDouble()); // increase current iteration iteration++; //elapsed = sw.Elapsed; // updateStatus(); // check if we need to stop if ((iterations != 0) && (iteration > iterations)) { break; } } ANN_END = true; // sw.Stop(); }
//Machine Learning void IDataminingDatabase.doMachineLearning(string[] inputFields, string outcomeField, string instrument, string savePath) { string name = "ANN"; double learningRate = 0.1; double sigmoidAlphaValue = 2; int iterations = 100; bool useRegularization = false; bool useNguyenWidrow = false; bool useSameWeights = false; progress.setProgress(name, "Creating ANN..."); // create multi-layer neural network ActivationNetwork ann = new ActivationNetwork( new BipolarSigmoidFunction(sigmoidAlphaValue), inputFields.Length, 20, 2); //How many neuros ???? Standart is 1 if (useNguyenWidrow) { progress.setProgress(name, "Creating NguyenWidrow..."); if (useSameWeights) { Accord.Math.Random.Generator.Seed = 0; } NguyenWidrow initializer = new NguyenWidrow(ann); initializer.Randomize(); } progress.setProgress(name, "Creating LevenbergMarquardtLearning..."); // create teacher LevenbergMarquardtLearning teacher = new LevenbergMarquardtLearning(ann, useRegularization); //, JacobianMethod.ByBackpropagation // set learning rate and momentum teacher.LearningRate = learningRate; IMongoQuery fieldsExistQuery = Query.And(Query.Exists(outcomeField + "_buy"), Query.Exists(outcomeField + "_sell")); foreach (string inputField in inputFields) { fieldsExistQuery = Query.And(fieldsExistQuery, Query.Exists(inputField)); } progress.setProgress(name, "Importing..."); // Load Data long start = database.getFirstTimestamp(); long end = database.getLastTimestamp(); var collection = mongodb.getDB().GetCollection("prices"); var docs = collection.FindAs <BsonDocument>(Query.And(fieldsExistQuery, Query.EQ("instrument", instrument), Query.LT("timestamp", end), Query.GTE("timestamp", start))).SetSortOrder(SortBy.Ascending("timestamp")); docs.SetFlags(QueryFlags.NoCursorTimeout); long resultCount = docs.Count(); //Press into Array from progress.setProgress(name, "Casting to array..."); double[][] inputs = new double[resultCount][]; // [inputFields.Length] double[][] outputs = new double[resultCount][]; // [2] int row = 0; foreach (var doc in docs) { outputs[row] = new double[] { doc[outcomeField + "_buy"].AsInt32, doc[outcomeField + "_sell"].AsInt32 }; double[] inputRow = new double[inputFields.Length]; for (int i = 0; i < inputFields.Length; i++) { double value = doc[inputFields[i]].AsDouble; if (double.IsInfinity(value) || double.IsNegativeInfinity(value) || double.IsNaN(value)) { throw new Exception("Invalid value!"); } else { inputRow[i] = value; } } inputs[row] = inputRow; //Check these! :) ??? row++; } // Teach the ANN for (int iteration = 0; iteration < iterations; iteration++) { progress.setProgress(name, "Teaching... " + iteration + " of " + iterations); double error = teacher.RunEpoch(inputs, outputs); if (savePath != null) { ann.Save(savePath); } } //Compute Error progress.setProgress(name, "Calculating error..."); int successes = 0; int fails = 0; for (int i = 0; i < inputs.Length; i++) { var realOutput = outputs[i]; //Buys double[] calculated = ann.Compute(inputs[i]); if (calculated[0] == 0 || calculated[0] == realOutput[0]) { successes++; } if (calculated[0] == 1 && realOutput[0] == 0) { fails++; } //Sells if (calculated[1] == 0 || calculated[1] == realOutput[1]) { successes++; } if (calculated[1] == 1 && realOutput[1] == 0) { fails++; } } double successRate = (double)successes / (inputs.Length * 2); double failRate = (double)fails / (inputs.Length * 2); progress.setProgress(name, "Finished with successRate of " + successRate + " failRate of " + failRate); }
static void Main(string[] args) { double learningRate = 0.1; double sigmoidAlphaValue = 2; // iterations int iterations = 100; bool useNguyenWidrow = false; var X_train = LoadData("X_train.csv"); var y_train = LoadData("y_train.csv"); var X_val = LoadData("X_val.csv"); var y_val = LoadData("y_val.csv"); var X_holdout = LoadData("X_holdout.csv"); var y_holdout = LoadData("y_holdout.csv"); int nb_samples = X_train.Length; int input_size = X_train[0].Length; // create multi-layer neural network ActivationNetwork ann = new ActivationNetwork( new BipolarSigmoidFunction(sigmoidAlphaValue), input_size, input_size, 1); if (useNguyenWidrow) { NguyenWidrow initializer = new NguyenWidrow(ann); initializer.Randomize(); } // create teacher //LevenbergMarquardtLearning teacher = new LevenbergMarquardtLearning(ann, useRegularization); BackPropagationLearning teacher = new BackPropagationLearning(ann); // set learning rate and momentum teacher.LearningRate = learningRate; teacher.Momentum = 0.8; // iterations int iteration = 1; //var ranges = Matrix.Range(sourceMatrix, 0); //double[][] map = Matrix.Mesh(ranges[0], ranges[1], 0.05, 0.05); // var sw = Stopwatch.StartNew(); bool use_batches = false; int batch_size = 5000; double[][] X_batch = new double[batch_size][]; double[][] y_batch = new double[batch_size][]; var rng = new Random(); while (true) { double error = 0.0; if (use_batches) { int[] sample_indeces = Enumerable.Range(0, nb_samples).OrderBy(z => rng.Next()).ToArray(); int nb_batch = nb_samples / batch_size; int n_grad = 50; Console.Write("|{0}| error=\r", new string('-', n_grad)); for (int ibatch = 0; ibatch < nb_batch; ++ibatch) { for (int i = 0; i < batch_size; ++i) { X_batch[i] = X_train[sample_indeces[i]]; y_batch[i] = y_train[sample_indeces[i]]; } error += teacher.RunEpoch(X_batch, y_batch); int ngrad1 = (int)Math.Ceiling((ibatch + 1) * n_grad / (double)(nb_batch)); int ngrad0 = n_grad - ngrad1; double cur_err = error / (batch_size * (ibatch + 1)); Console.Write("|{0}{1}| error={2:0.####}\r", new string('#', ngrad1), new string('-', ngrad0), cur_err); } error /= (batch_size * nb_batch); } else { error = teacher.RunEpoch(X_train, y_train) / nb_samples; } // Получим оценку точности на валидационном наборе int n_hit2 = 0; for (int i = 0; i < X_val.Length; ++i) { double[] output = ann.Compute(X_val[i]); double y_bool = output[0] > 0.5 ? 1.0 : 0.0; n_hit2 += y_bool == y_val[i][0] ? 1 : 0; } double val_acc = n_hit2 / (double)X_val.Length; // TODO: тут надо бы смотреть, увеличилось ли качество сетки на валидации по сравнению с предыдущей эпохой, // сохранять веса сетки в случае улучшения (через deep copy ann), и делать остановку в случае, если качество // не растет уже 5-10 эпох. Console.WriteLine($"\niteration={iteration} train_mse={error} val_acc={val_acc}"); iteration++; if ((iterations != 0) && (iteration > iterations)) { break; } } // Получим оценку точности на отложенном наборе int n_hit = 0; for (int i = 0; i < X_holdout.Length; ++i) { double[] output = ann.Compute(X_holdout[i]); double y_bool = output[0] > 0.5 ? 1.0 : 0.0; n_hit += y_bool == y_holdout[i][0] ? 1 : 0; } double holdout_acc = n_hit / (double)X_holdout.Length; Console.WriteLine($"holdout_acc={holdout_acc}"); return; }
private void listBox4_SelectedIndexChanged(object sender, EventArgs e) { if (Global.Model == null || listBox4.SelectedItem == null) { return; } var selected = ((DailyPrice)listBox4.SelectedItem).StockCode; toolStripStatusLabel3.Text = $"Predicting {((DailyPrice)listBox4.SelectedItem).StockCode}"; IEnumerable <DailyPrice> priceForDate = Global.DataList .Where(x => x.StockCode == selected) .OrderBy(x => x.CloseDate); var currentList = priceForDate.ToList(); chart2.Series[0].Points.DataBindY(priceForDate.Select(x => x.ClosePrice).ToArray()); ActivationNetwork _model = null; //Training a new model for the stock if (checkBox1.Checked) { _model = new ActivationNetwork(new BipolarSigmoidFunction(2), Global.FeaturesCount, 15, 1); //hardcoded neurons count var _iterations = 1000; var _teacher = new ResilientBackpropagationLearning(_model); var _initializer = new NguyenWidrow(_model); _initializer.Randomize(); var _inputs = DataHelper.DataHelper.GetInputArray(currentList); var _outputs = DataHelper.DataHelper.GetOutputArray(currentList); for (int i = 0; i < _iterations; i++) { var trainingError = _teacher.RunEpoch(_inputs, _outputs); toolStripStatusLabel3.Text = $"Predicting {((DailyPrice)listBox4.SelectedItem).StockCode} {i}/{_iterations} | e={trainingError / _inputs.Length}"; Application.DoEvents(); } } //end of training var firstItem = priceForDate.Select(x => x.ClosePrice).FirstOrDefault(); var lastItem = priceForDate.Select(x => x.ClosePrice).LastOrDefault(); var firstStock = priceForDate.FirstOrDefault(); lbStockCurrent.Text = $"{selected} | {priceForDate.Count()} days: Profit: {lastItem - firstItem} VND | {Math.Round((lastItem / firstItem - 1) * 100, 2)}%\n" + $"Volatility: {Math.Round(firstStock.Volatility * 100, 2)}%\n"; linkLabel1.Text = firstStock.StockCode; linkLabel1.Links.Clear(); linkLabel1.Links.Add(new LinkLabel.Link() { LinkData = firstStock.URL }); var listPredict = priceForDate.ToList(); int dayCount = listPredict.Count; double error = 0.0; if (listPredict.Count > 1) { for (int i = 0; i < listPredict.Count - 1; i++) { var previous = currentList[i]; var original = currentList[i + 1]; listPredict[i + 1] = PredictSingle(previous, Utils.GetNextDay(previous.CloseDate), _model); error += Math.Sqrt(Math.Pow(listPredict[i + 1].Profit - original.Profit, 2)); } error /= dayCount; var startIdx = listPredict.Count - 1; string predictedDetails = String.Empty; for (int i = 0; i < 5; i++) { var newIdx = startIdx + i; var previous = listPredict.LastOrDefault(); var predictValue = PredictSingle(previous, Utils.GetNextDay(previous.CloseDate)); listPredict.Add(predictValue); predictedDetails += $"{predictValue.CloseDate} - {predictValue.ClosePrice} | {predictValue.ProfitPretified}%\n"; } chart2.Series[1].Points.DataBindY(listPredict.Select(x => x.ClosePrice).ToArray()); var firstPredict = listPredict[dayCount - 1].ClosePrice; var lastPredict = Math.Round(listPredict.Select(x => x.ClosePrice).LastOrDefault()); lbForecast.Text = $"Average error: {error}\n" + $"Last predicted price: {lastPredict}\n" + $"Change from now: {lastPredict - firstPredict} VND | {Math.Round((lastPredict / firstPredict - 1) * 100, 2)}%\n\n" + predictedDetails; } //moving average (4 days) as baseline int numDays = 4; var averageList = new List <double>(); for (int i = 0; i < listPredict.Count; i++) { if (i >= (numDays - 1)) { var average = 0.0d; for (int a = 0; a < numDays; a++) { if (i < currentList.Count) { average += currentList[i - a].ClosePrice / numDays; } else { average += listPredict[i - a].ClosePrice / numDays; } } averageList.Add(average); } else { averageList.Add(double.NaN); } } chart2.Series[2].Points.DataBindY(averageList.ToArray()); toolStripStatusLabel3.Text = $"Ready"; checkBox1.Checked = false; }
/// <summary> /// <inheritdoc /> /// </summary> public override void Train() { var inputsOriginal = data.GetSelectedInput(features); var outputsOriginal = data.GetExpectedRegressionOutput(); var tempInputs = new List <double[]>(); var tempOutputs = new List <double>(); for (int i = 0; i < inputsOriginal.Length; i++) { if (positive && outputsOriginal[i] < 0.0) { tempInputs.Add(inputsOriginal[i]); tempOutputs.Add(outputsOriginal[i]); } if (!positive && outputsOriginal[i] > 0.0) { tempInputs.Add(inputsOriginal[i]); tempOutputs.Add(outputsOriginal[i]); } } var inputs = tempInputs.ToArray(); var outputs = tempOutputs.ToArray(); var function = new SigmoidFunction(); network = new ActivationNetwork(function, inputs[0].Length, 5, 1); var teacher = new ResilientBackpropagationLearning(network); var initialization = new NguyenWidrow(network); initialization.Randomize(); var scaledOutputs = Vector.Scale(outputs, range, new DoubleRange(0.0, 1.0)); var outputsNetwork = new double[outputs.Length][]; for (int i = 0; i < outputs.Length; i++) { outputsNetwork[i] = new double[1] { scaledOutputs[i] } } ; double error = Double.PositiveInfinity; double maxError = outputs.Length / 5e2; int epoch = 0; while (error > maxError && epoch < 5000) { error = teacher.RunEpoch(inputs, outputsNetwork); } Save(); }
//создание сети для обучения и учителя по топологии private void createLearn(int[] topology) { if (this.alphaBox.Text != "") activationFunc = new BipolarSigmoidFunction(Double.Parse(this.alphaBox.Text)); else activationFunc = new BipolarSigmoidFunction(); network = new ActivationNetwork(activationFunc, colCountData - 1, topology); //ActivationLayer layer = network.Layers[0] as ActivationLayer; NguyenWidrow initializer = new NguyenWidrow(network); initializer.Randomize(); // create teacher GeneticLearning genetic = new GeneticLearning(network, chromosomes); teacher = genetic; }