public void BackPropogate(RecurrentTrainingState state)
        {
            Debug.Assert(state.Now.WeightedSums != state.Now.Outputs);

            // Initially, state.Errors contains just those backpropogated from the next layer.
            // These are d(E)/d(outputs of this layer).

            // Add the error from the timestep that was next in time.
            WeightedOutputSum(state.InternalErrors, internalWeights, state.Errors);

            // Multiply by the derivative of the activation function to finalise
            // errors with respect to the weighted sum of the unit for this layer.
            MultiplyByActivationDerivative(state.Now.WeightedSums, state.Errors);

            // Propogate the errors back to the input.
            Array.Clear(state.InputErrors, 0, state.InputErrors.Length);
            WeightedOutputSum(state.Errors, inputWeights, state.InputErrors);

            // The errors for this layer become the errors at the timestep T+1
            // as we move to the previous timestep.
            Array.Copy(state.Errors, state.InternalErrors, state.Errors.Length);

            // Add the error of each weight. This is its value of the input it connects to
            // times the error of the output it connects to.
            Sum(state.Errors, state.BiasWeightErrors);
            SumProducts(state.Now.Inputs, state.Errors, state.InputWeightErrors);
            SumProducts(state.Last.Outputs, state.Errors, state.InternalWeightErrors);
        }
        public void BackPropogate(NetworkState last, NetworkState now)
        {
            for (int i = states.Count - 1; i >= 0; i--)
            {
                object state = states[i];
                Layer  layer = Network.Layers[i];

                RecurrentLayer   recurrentLayer   = layer as RecurrentLayer;
                FeedForwardLayer feedForwardLayer = layer as FeedForwardLayer;

                if (recurrentLayer != null)
                {
                    RecurrentTrainingState recurrentState = (RecurrentTrainingState)state;
                    recurrentState.Last = (RecurrentState)last.states[i];
                    recurrentState.Now  = (RecurrentState)now.states[i];
                    recurrentLayer.BackPropogate(recurrentState);
                }
                else
                {
                    FeedForwardTrainingState feedForwardState = (FeedForwardTrainingState)state;
                    feedForwardState.Now = (FeedForwardState)now.states[i];
                    feedForwardLayer.BackPropogate(feedForwardState);
                }
            }
        }
        public void ApplyWeightChanges(double learningCoefficient)
        {
            for (int i = 0; i < states.Count; i++)
            {
                object state = states[i];
                Layer  layer = Network.Layers[i];

                RecurrentTrainingState   recurrantState   = state as RecurrentTrainingState;
                FeedForwardTrainingState feedForwardState = state as FeedForwardTrainingState;
                if (recurrantState != null)
                {
                    ((RecurrentLayer)layer).ApplyWeightChanges(recurrantState, learningCoefficient);
                }
                else
                {
                    ((FeedForwardLayer)layer).ApplyWeightChanges(feedForwardState, learningCoefficient);
                }
            }
        }
 public void ApplyWeightChanges(RecurrentTrainingState state, double learningCoefficient)
 {
     Layer.ApplyWeightChanges(biasWeights, state.BiasWeightErrors, learningCoefficient);
     Layer.ApplyWeightChanges(inputWeights, state.InputWeightErrors, learningCoefficient);
     Layer.ApplyWeightChanges(internalWeights, state.InternalWeightErrors, learningCoefficient);
 }