SLink(float w, SNeuron from, SNeuron to, bool rec = false) { weight = w; fromNeuron = from.neuronID; toNeuron = to.neuronID; isRecurrent = rec; }
/* * The NEAT update method runs the inputs through the network * and returns the outputs. However with NEAT, the network can * have any topology with neurons going forward, backward or * recurrent. Therefore, the layer-based update method used in * traditional feed forward neural networks is not practical. * * The network can be updated in two modes: * * active: Each neuron adds up all the activations, calculated from * all the incoming neurons, during the previous time-step. * Essentialy the conceptual difference is that in a layer-based * approach the activations are summed per layer and in this mode, * the activations travel from one neuron to the next. * This mode is useful for unsupervised learning. * * snapshot: To completely flush the network and achieve the same * result as the layer-based method, this mode of update flushes the * activations all the way through from the input neurons to the * output neurons. To do this the update needs to iterate through all * the neurons "depth" times. This mode is useful for supervised * learning. */ public List <float> Update(List <float> inputs, UpdateType type) { List <float> outputs = new List <float> (); int flushCount = 1; if (type == UpdateType.Snapshot) { flushCount = depth; } for (int i = 0; i < flushCount; i++) { // clear the output from the last iteration outputs.Clear(); int neuronIndex = 0; // update the input neurons to the 'inputs' parameter for (; neurons[neuronIndex].type == NeuronType.Input; neuronIndex++) { neurons[neuronIndex].output = inputs[neuronIndex]; } Debug.Assert(neurons[neuronIndex].type == NeuronType.Bias); // set bias output to 1 neurons[neuronIndex++].output = 1; // iterate through all the neurons through the network for (; neuronIndex < neurons.Count; neuronIndex++) { /* * The result from adding up all the activations from the * neurons linking to the current neuron. */ float sum = 0; SNeuron neuron = neurons[neuronIndex]; for (int j = 0; j < neuron.linksTo.Count; j++) { SLink link = neuron.linksTo[j]; sum += link.weight * FindNeuronById(link.fromNeuron).output; } // apply the activation function neuron.output = Sigmoid(sum, neuron.activationResponse); if (neuron.type == NeuronType.Output) { outputs.Add(neuron.output); } } } /* * The implementation difference in the snapshot mode is that * instead of dynamically updating the network and storing * the results from previous updates, only a "snapshot" * is taken from the network and this can be ensured by * resetting the network each update. */ if (type == UpdateType.Snapshot) { for (int i = 0; i < neurons.Count; i++) { SNeuron neuron = neurons[i]; neuron.output = 0; } } return(outputs); }