public NeuronLayer(int NumNodes, NeuronLayer PreviousLayer) { Nodes = new SigmoidNeuron[NumNodes]; for (int i = 0; i < NumNodes; ++i) { Nodes[i] = new SigmoidNeuron(PreviousLayer); } }
public NeuronLayer(int NumNodes, string[] NeuronData, NeuronLayer PreviousLayer) { Nodes = new SigmoidNeuron[NumNodes]; for (int i = 0; i < NumNodes; ++i) { Nodes[i] = new SigmoidNeuron((NeuronData.Length > 0) ? NeuronData[i] : null, PreviousLayer); } }
// Might want to move into a co-routine as this might become very large and expensive. private IEnumerator CoFeedForward() { yield return(new WaitForEndOfFrame()); // Update the Input layer with updated inputs. for (int i = 0; i < LayerNeuronCounts[0]; ++i) { NetLayers[0].Nodes[i].Activation = Inputs[i]; if (ShouldYield()) { yield return(new WaitForEndOfFrame()); } } // Go through each layer that isn't the input layer. for (int i = 1; i < NumLayers; ++i) { // Go through each node in this layer and calculate it's activation value. for (int j = 0; j < LayerNeuronCounts[i]; ++j) { SigmoidNeuron tempNeuron = NetLayers[i].Nodes[j]; tempNeuron.Activation = Sigmoid(tempNeuron.GetZ()); if (ShouldYield()) { yield return(new WaitForEndOfFrame()); } } } // Update the outputs from the output layer. for (int i = 0; i < LayerNeuronCounts[NumLayers - 1]; ++i) { Outputs[i] = NetLayers[NumLayers - 1].Nodes[i].Activation; if (ShouldYield()) { yield return(new WaitForEndOfFrame()); } } for (int i = 0; i < FeedforwardCallBacks.Count; ++i) { if (FeedforwardCallBacks[i] != null) { FeedforwardCallBacks[i](); } } }
public void Backprop() { FeedForward(); // go through the output layer for (int i = 0; i < LayerNeuronCounts[NumLayers - 1]; ++i) { // for each neuron in the output layer calculate the difference between the output and the expected output SigmoidNeuron tempNeuron = NetLayers[NumLayers - 1].Nodes[i]; tempNeuron.Delta = CostDerivertive(Outputs[i], ExpectedOutputs[i]) * SigmoidPrime(tempNeuron.z); tempNeuron.NablaBias += tempNeuron.Delta; // then go through each neuron connected to this neuron. for (int j = 0; j < tempNeuron.Inputs.Length; ++j) { // set the change required in the weight. tempNeuron.NablaWeights[j] += tempNeuron.Delta * tempNeuron.Inputs[j].Activation; // and set it's required change property tempNeuron.Inputs[j].Delta = (tempNeuron.Weights[j] * tempNeuron.Delta) * SigmoidPrime(tempNeuron.Inputs[j].z); } } // go through the remaning layers and nodes in those layers for (int i = NumLayers - 2; i > 0; --i) { for (int j = 0; j < LayerNeuronCounts[i]; ++j) { NetLayers[i].Nodes[j].NablaBias += NetLayers[i].Nodes[j].Delta; // for each node connected to this node set the required change in weight and it's required change property. for (int k = 0; k < NetLayers[i].Nodes[j].NablaWeights.Length; ++k) { NetLayers[i].Nodes[j].NablaWeights[k] += NetLayers[i].Nodes[j].Delta * NetLayers[i].Nodes[j].Inputs[k].Activation; NetLayers[i].Nodes[j].Inputs[k].Delta = (NetLayers[i].Nodes[j].Weights[k] * NetLayers[i].Nodes[j].Delta) * SigmoidPrime(NetLayers[i].Nodes[j].Inputs[k].z); } } } }
public void FeedForward() { for (int i = 0; i < LayerNeuronCounts[0]; ++i) { NetLayers[0].Nodes[i].Activation = Inputs[i]; } // Go through each layer that isn't the input layer. for (int i = 1; i < NumLayers; ++i) { // Go through each node in this layer and calculate it's activation value. for (int j = 0; j < LayerNeuronCounts[i]; ++j) { SigmoidNeuron tempNeuron = NetLayers[i].Nodes[j]; tempNeuron.Activation = Sigmoid(tempNeuron.GetZ()); } } // Update the outputs from the output layer. for (int i = 0; i < LayerNeuronCounts[NumLayers - 1]; ++i) { Outputs[i] = NetLayers[NumLayers - 1].Nodes[i].Activation; } }