/// <summary> /// Обучить нейронную сеть на накопленных исследованиях /// </summary> /// <param name="countLifes">На скольких жизней, начиная от последней, обучить модель</param> /// <param name="epochs">Количество эпох обучения. По умолчанию 1</param> /// <param name="learningRate">Норма обучения. По умолчанию 1e-3</param> /// <param name="trainType">Тип обучения. По умолчанию online</param> /// <param name="minLoss">ошибка, при которой обучение останавливается</param> /// <param name="optimizer">Оптимизатор. По умолчанию Adam</param> /// <param name="loss">Метрика ошибки. По умолчанию MSE</param> public void Train(int countLifes = 50, int epochs = 1, double learningRate = 1e-3, TrainType trainType = TrainType.Online, double minLoss = 0.0, IOptimizer optimizer = null, ILoss loss = null) { if (loss == null) { loss = new LossMeanSqrSqrt(); } if (optimizer == null) { optimizer = new Adam(); } int start = lifes.Count - countLifes; Vector rewards = GetRewards(start, lifes.Count); var inputs = new List <NNValue>(); var outputs = new List <NNValue>(); for (int i = 0; i < rewards.N; i++) { var conditions = lifes[start + i].GetConditions(); foreach (var condition in conditions) { var state = condition.Item1; var action = condition.Item2; inputs.Add(state.ToNNValue()); if (rewards[i] > 0) { outputs.Add(new NNValue(action.probabilities.MaxOutVector().TransformVector(x => (x == -1) ? 0 : 1))); } else { outputs.Add(new NNValue((1.0 - action.probabilities).MaxOutVector().TransformVector(x => (x == -1) ? 0 : 1))); } } } #region Shuffle for (int i = start; i < inputs.Count; i++) { var a = random.Next(start, inputs.Count); var b = random.Next(start, inputs.Count); var temp1 = inputs[a]; var temp2 = outputs[a]; inputs[a] = inputs[b]; outputs[a] = outputs[b]; inputs[b] = temp1; outputs[b] = temp2; } #endregion #region Train DataSetNoReccurent dataSetNoReccurent = new DataSetNoReccurent(inputs.ToArray(), outputs.ToArray(), loss); Trainer trainer = new Trainer(graphBackward, trainType, optimizer); trainer.Train(epochs, learningRate, model, dataSetNoReccurent, minLoss); #endregion }
// Обучение нейронной сети private void button2_Click(object sender, EventArgs e) { GetData(); DataSetNoReccurent DatasetNo = new DataSetNoReccurent(x, y, new CrossEntropy(), 0.4); Trainer trainer = new Trainer(new GraphCPU(), TrainType.Online, new Adam()); trainer.Train(8, 0.001, NNW, DatasetNo, 0.0006); }
static void Main(string[] args) { Random random = new Random(13); NeuralNetwork cNN = new NeuralNetwork(random, 0.2); var conv = new ConvolutionLayer(new RectifiedLinearUnit(0.01), 8, 3, 3); conv.IsSame = true; cNN.AddNewLayer(new Shape(28, 28), conv); cNN.AddNewLayer(new MaxPooling(2, 2)); cNN.AddNewLayer(new ConvolutionLayer(new RectifiedLinearUnit(0.01), 16, 3, 3)); cNN.AddNewLayer(new MaxPooling(2, 2)); cNN.AddNewLayer(new ConvolutionLayer(new RectifiedLinearUnit(0.01), 32, 3, 3)); cNN.AddNewLayer(new UnPooling(2, 2)); cNN.AddNewLayer(new ConvolutionLayer(new RectifiedLinearUnit(0.01), 16, 3, 3)); cNN.AddNewLayer(new MaxPooling(2, 2)); cNN.AddNewLayer(new Flatten()); cNN.AddNewLayer(new FeedForwardLayer(20, new RectifiedLinearUnit(0.01))); cNN.AddNewLayer(new FeedForwardLayer(2, new SoftmaxUnit())); Console.WriteLine(cNN); GraphCPU graph = new GraphCPU(false); NNValue nValue = NNValue.Random(28, 28, 2, random); NNValue nValue1 = NNValue.Random(28, 28, 2, random); NNValue outp = new NNValue(new double[] { 0, 1 }); NNValue outp1 = new NNValue(new double[] { 1, 0 }); DataSetNoReccurent data = new DataSetNoReccurent(new NNValue[] { nValue, nValue1 }, new NNValue[] { outp, outp1 }, new CrossEntropyWithSoftmax()); TrainerCPU trainer = new TrainerCPU(TrainType.MiniBatch, new Adam()); trainer.BatchSize = 2; trainer.Train(10000, 0.001, cNN, data, 2, 0.0001); double[] dbs = cNN.Activate(nValue, graph).DataInTensor; double[] dbs1 = cNN.Activate(nValue1, graph).DataInTensor; }
/// <summary> /// Обучить нейронную сеть на накопленных исследованиях /// </summary> /// <param name="countGenerations">На скольких жизней, начиная от последней, обучить модель</param> /// <param name="epochs">Количество эпох обучения. По умолчанию 1</param> /// <param name="learningRate">Норма обучения. По умолчанию 1e-3</param> /// <param name="trainType">Тип обучения. По умолчанию online</param> /// <param name="minLoss">ошибка, при которой обучение останавливается</param> /// <param name="optimizer">Оптимизатор. По умолчанию Adam</param> /// <param name="loss">Метрика ошибки. По умолчанию MSE</param> public void Train(int countGenerations = -1, int epochs = 1, float learningRate = 1e-3f, TrainType trainType = TrainType.Online, float minLoss = 0.0f, ILoss loss = null) { if (loss == null) { loss = new LossMeanSqrSqrt(); } if (countGenerations == -1) { countGenerations = Generations.Count; } int start = Generations.Count - countGenerations; int end = start + countGenerations; if (end > Generations.Count) { end = Generations.Count; } Vector rewards = GetRewards(start, end); var inputs = new List <NNValue>(); var outputs = new List <NNValue>(); for (int i = 0; i < rewards.Count; i++) { var conditions = Generations[start + i].GetConditions(); foreach (var condition in conditions) { var state = condition.Item1; var action = condition.Item2; double p = 0.01; if (rewards[i] > 0) { inputs.Add(state.Input); Vector outp = new Vector(degreesOfFreedom); outp[action.index] = 1.0 - p; outputs.Add(new NNValue(outp)); } else if (rewards[i] < 0) { inputs.Add(state.Input); int u = 0; while ((u = random.Next(0, degreesOfFreedom)) == action.index) { ; } Vector output = new Vector(degreesOfFreedom); output[u] = 1.0 - p; outputs.Add(new NNValue(output)); } } } if (inputs.Count > 0) { Shuffle(inputs, outputs); #region Train DataSetNoReccurent dataSetNoReccurent = new DataSetNoReccurent(inputs.ToArray(), outputs.ToArray(), loss); trainer.Train(epochs, learningRate, model, dataSetNoReccurent, minLoss); #endregion } }