示例#1
0
文件: BiRNN.cs 项目: My-Khan/RNNSharp
        private void LearnTwoRNN(Sequence pSequence, SimpleLayer[] mergedHiddenLayer, SimpleLayer[] seqOutput)
        {
            int numStates = pSequence.States.Length;

            Parallel.Invoke(() =>
            {
                forwardRNN.netReset(true);
                forwardRNN.Hidden2OutputWeight = Hidden2OutputWeight.CopyTo();
            },
                            () =>
            {
                backwardRNN.netReset(true);
                backwardRNN.Hidden2OutputWeight = Hidden2OutputWeight.CopyTo();
            });

            Parallel.Invoke(() =>
            {
                for (int curState = 0; curState < numStates; curState++)
                {
                    SimpleLayer outputCells       = seqOutput[curState];
                    SimpleLayer mergedHiddenCells = mergedHiddenLayer[curState];
                    for (int i = 0; i < Hidden2OutputWeight.Height; i++)
                    {
                        //update weights for hidden to output layer
                        double er = outputCells.er[i];
                        double[] vector_hidden = mergedHiddenCells.cellOutput;
                        double[] vector_i      = Hidden2OutputWeight[i];
                        double[] vector_lr     = Hidden2OutputWeightLearningRate[i];
                        int k = 0;

                        Vector <double> vecErr = new Vector <double>(er);
                        while (k < Hidden2OutputWeight.Width - Vector <double> .Count)
                        {
                            Vector <double> vecDelta = new Vector <double>(vector_hidden, k);
                            Vector <double> vecLearningRateWeights = new Vector <double>(vector_lr, k);
                            Vector <double> vecB = new Vector <double>(vector_i, k);

                            //Get delta
                            vecDelta *= er;

                            //Normalize weight
                            vecDelta = NormalizeGradient(vecDelta);

                            //Computing new learning rate and update its weights
                            Vector <double> vecLearningRate = ComputeLearningRate(vecDelta, ref vecLearningRateWeights);
                            vecLearningRateWeights.CopyTo(vector_lr, k);

                            //Update weights
                            vecB += (vecLearningRate * vecDelta);
                            vecB.CopyTo(vector_i, k);

                            k += Vector <double> .Count;
                        }

                        while (k < Hidden2OutputWeight.Width)
                        {
                            double delta           = NormalizeGradient(mergedHiddenCells.cellOutput[k] * er);
                            double newLearningRate = UpdateLearningRate(Hidden2OutputWeightLearningRate, i, k, delta);

                            vector_i[k] += newLearningRate * delta;

                            k++;
                        }
                    }
                }
            },
                            () =>
            {
                //Learn forward network
                for (int curState = 0; curState < numStates; curState++)
                {
                    // error propogation
                    State state = pSequence.States[curState];

                    forwardRNN.SetInputLayer(state, curState, numStates, null);
                    forwardRNN.computeHiddenLayer(state, true);

                    //Copy output result to forward net work's output
                    forwardRNN.OutputLayer = seqOutput[curState];
                    forwardRNN.ComputeHiddenLayerErr();

                    //Update net weights
                    forwardRNN.LearnNet(state, numStates, curState);
                }
            },
                            () =>
            {
                for (int curState = 0; curState < numStates; curState++)
                {
                    int curState2 = numStates - 1 - curState;
                    State state2  = pSequence.States[curState2];

                    backwardRNN.SetInputLayer(state2, curState2, numStates, null, false);
                    backwardRNN.computeHiddenLayer(state2, true);

                    //Copy output result to forward net work's output
                    backwardRNN.OutputLayer = seqOutput[curState2];
                    backwardRNN.ComputeHiddenLayerErr();

                    //Update net weights
                    backwardRNN.LearnNet(state2, numStates, curState);
                }
            });
        }