public double CalculateGradient(double?target = null) //This function is used in back propagation and uses many techniques such as direvative of sigmoid function { if (target == null) { return(Gradient = OutputSynapses.Sum(a => a.OutputNeuron.Gradient * a.Weight) * Sigmoid.Derivative(Value)); //Calculate the error for each neuron in hidden layer } return(Gradient = CalculateError(target.Value) * Sigmoid.Derivative(Value)); //Calculate the error in the output layer }
/// <summary> /// Calculates the gradient. /// </summary> /// <param name="target">The target.</param> /// <returns></returns> public double CalculateGradient(double?target = null) { if (target == null) { return(Gradient = OutputSynapses.Sum(a => a.OutputNeuron.Gradient * a.Weight) * Sigmoid.Derivative(Value)); } return(Gradient = CalculateError(target.Value) * Sigmoid.Derivative(Value)); }
/*** Added by Benson ***/ public void UpdateInput(double learnRate) { // calculate the gradient double inputGradient = OutputSynapses.Sum(a => a.OutputNeuron.Gradient * a.Weight); // update the input value Value += learnRate * inputGradient; }
public double CalculateSigmoidError(double?target = null) { if (target == null) { return(Error = OutputSynapses.Sum(a => a.OutputNeuron.Error * a.Weight) * Sigmoid.Derivative(Output)); } return(Error = Sigmoid.Derivative(Output) * CalculateError(target.Value)); }
public float CalculateGradient(float?target = null) { if (target == null) { return(Gradient = OutputSynapses.Sum(x => x.OutputNeuron.Gradient * x.Weight) * Sigmoid.Derivative(Value)); } return(Gradient = CalculateError(target.Value) * Sigmoid.Derivative(Value)); }
//误差值,如果有target代表是输出层 //误差值,如果没有target,是隐藏层,隐藏层的误差是前一层传播过来的,结果是对连接到改节点的所有突触的权重*所连接节点的误差求和。 public double CalculateError(double?target = null) { if (target == null) { return(Error = OutputSynapses.Sum(a => a.Weight * a.OutputNeuron.Error)); } return(Error = target.Value - OutputValue); }
/// <summary> /// Update the weights of the InputSynapses in order to gain in precision /// </summary> /// <param name="_learnRate">learning rate of the Neural Network</param> public IEnumerator UpdateSynapses(float _learnRate) { LocalGradient = NeuralMath.SigmoidDerivative(Value) * (OutputSynapses.Sum(s => (s.OutPutNeuron.LocalGradient * s.Weight))); foreach (Synapse synapse in InputSynapses) { synapse.WeightDelta = synapse.Weight - (_learnRate * (synapse.InputNeuron.Value * LocalGradient)); yield return(new WaitForEndOfFrame()); } }
//Obliczanie gradientu public double CalculateGradient(double?target = null) { //Dla neuronów warstw ukrytych if (target == null) { return(Gradient = OutputSynapses.Sum(a => a.OutputNeuron.Gradient * a.Weight) * ActivationFunction.Derivative(Value)); } //Dla neuronów warstwy wyjściowej return(Gradient = CalculateError(target.Value) * ActivationFunction.Derivative(Value)); }
public double CalculateGradient(double?target = null) { var derivative = ActivationFunction.Derivative(Value); if (target != null) { Gradient = derivative * CalculateError(target.Value); } else { Gradient = derivative * OutputSynapses.Sum(synapse => synapse.OutputNeuron.Gradient * synapse.Weight); } return(Gradient); }
public double CalculateGradient() => Gradient = OutputSynapses.Sum(synapse => synapse.OutputNeuron.Gradient * synapse.Weight) * Derivative(Value);
public double CalculateGradient() { return(Gradient = OutputSynapses.Sum(a => a.OutputNeuron.Gradient * a.Weight) * CalculateDerivative()); }
public double CalculateGradient() { return(Gradient = OutputSynapses.Sum(a => a.OutputNeuron.Gradient * a.Weight) * _dact(Value)); }