public void BackPropogate(NetworkState last, NetworkState now)
        {
            for (int i = states.Count - 1; i >= 0; i--)
            {
                object state = states[i];
                Layer  layer = Network.Layers[i];

                RecurrentLayer   recurrentLayer   = layer as RecurrentLayer;
                FeedForwardLayer feedForwardLayer = layer as FeedForwardLayer;

                if (recurrentLayer != null)
                {
                    RecurrentTrainingState recurrentState = (RecurrentTrainingState)state;
                    recurrentState.Last = (RecurrentState)last.states[i];
                    recurrentState.Now  = (RecurrentState)now.states[i];
                    recurrentLayer.BackPropogate(recurrentState);
                }
                else
                {
                    FeedForwardTrainingState feedForwardState = (FeedForwardTrainingState)state;
                    feedForwardState.Now = (FeedForwardState)now.states[i];
                    feedForwardLayer.BackPropogate(feedForwardState);
                }
            }
        }
        public void ApplyWeightChanges(double learningCoefficient)
        {
            for (int i = 0; i < states.Count; i++)
            {
                object state = states[i];
                Layer  layer = Network.Layers[i];

                RecurrentTrainingState   recurrantState   = state as RecurrentTrainingState;
                FeedForwardTrainingState feedForwardState = state as FeedForwardTrainingState;
                if (recurrantState != null)
                {
                    ((RecurrentLayer)layer).ApplyWeightChanges(recurrantState, learningCoefficient);
                }
                else
                {
                    ((FeedForwardLayer)layer).ApplyWeightChanges(feedForwardState, learningCoefficient);
                }
            }
        }
        public void BackPropogate(FeedForwardTrainingState state)
        {
            Debug.Assert(state.Now.WeightedSums != state.Now.Outputs);

            // Initially, state.Errors contains just those backpropogated from the next layer.
            // These are d(E)/d(outputs of this layer).

            // Multiply by the derivative of the activation function to finalise
            // errors with respect to the weighted sum for the units in this layer.
            if (state.Now.Outputs.Length != 1)
            {
                MultiplyByActivationDerivative(state.Now.WeightedSums, state.Errors);
            }

            // Propogate the errors back to the input.
            Array.Clear(state.InputErrors, 0, state.InputErrors.Length);
            WeightedOutputSum(state.Errors, inputWeights, state.InputErrors);

            // Add the error of each weight. This is its value of the input it connects to
            // times the error of the output it connects to.
            Sum(state.Errors, state.BiasWeightErrors);
            SumProducts(state.Now.Inputs, state.Errors, state.InputWeightErrors);
        }
 public void ApplyWeightChanges(FeedForwardTrainingState state, double learningCoefficient)
 {
     Layer.ApplyWeightChanges(biasWeights, state.BiasWeightErrors, learningCoefficient);
     Layer.ApplyWeightChanges(inputWeights, state.InputWeightErrors, learningCoefficient);
 }