Example #1
0
 public void matrixXvectorADD(LSTMCell[] dest, neuron[] srcvec, LSTMWeight[][] srcmatrix, int from, int to, int from2, int to2)
 {
     //ac mod
     Parallel.For(0, (to - from), parallelOption, i =>
     {
         for (int j = 0; j < to2 - from2; j++)
         {
             dest[i + from].netIn += srcvec[j + from2].ac * srcmatrix[i][j].wInputInputGate;
         }
     });
 }
Example #2
0
 public void matrixXvectorADD(neuron[] dest, LSTMCell[] srcvec, Matrix srcmatrix, int from, int to, int from2, int to2)
 {
     //ac mod
     Parallel.For(0, (to - from), parallelOption, i =>
     {
         for (int j = 0; j < to2 - from2; j++)
         {
             dest[i + from].ac += srcvec[j + from2].cellOutput * srcmatrix[j][i];
         }
     });
 }
Example #3
0
 private void CreateCells()
 {
     neuFeatures = new SingleVector(DenseFeatureSize);
     OutputLayer = new neuron[L2];
     neuHidden = new neuron[L1];
 }
Example #4
0
        private void CreateCell(BinaryReader br)
        {
            neuFeatures = new SingleVector(DenseFeatureSize);
            OutputLayer = new neuron[L2];

            for (int a = 0; a < L2; a++)
            {
                OutputLayer[a].cellOutput = 0;
                OutputLayer[a].er = 0;
            }

            neuHidden = new LSTMCell[L1];
            for (int i = 0; i < L1; i++)
            {
                neuHidden[i] = new LSTMCell();
                LSTMCellInit(neuHidden[i]);
            }

            if (br != null)
            {
                //Load weight from input file
                for (int i = 0; i < L1; i++)
                {
                    neuHidden[i].wCellIn = br.ReadSingle();
                    neuHidden[i].wCellForget = br.ReadSingle();
                    neuHidden[i].wCellOut = br.ReadSingle();
                }
            }
            else
            {
                //Initialize weight by random number
                for (int i = 0; i < L1; i++)
                {
                    //internal weights, also important
                    neuHidden[i].wCellIn = RandInitWeight();
                    neuHidden[i].wCellForget = RandInitWeight();
                    neuHidden[i].wCellOut = RandInitWeight();
                }
            }
        }
Example #5
0
        private void LearnTwoRNN(Sequence pSequence, Matrix<neuron> mergedHiddenLayer, neuron[][] seqOutput)
        {
            netReset(true);

            int numStates = pSequence.States.Length;
            forwardRNN.Hidden2OutputWeight = Hidden2OutputWeight.CopyTo();
            backwardRNN.Hidden2OutputWeight = Hidden2OutputWeight.CopyTo();

            Parallel.Invoke(() =>
                {
                    for (int curState = 0; curState < numStates; curState++)
                    {
                        for (int i = 0; i < Hidden2OutputWeight.GetHeight(); i++)
                        {
                            //update weights for hidden to output layer

                            for (int k = 0; k < Hidden2OutputWeight.GetWidth(); k++)
                            {
                                Hidden2OutputWeight[i][k] += LearningRate * mergedHiddenLayer[curState][k].cellOutput * seqOutput[curState][i].er;
                            }
                        }
                    }

                },
                ()=>
            {

                //Learn forward network
                for (int curState = 0; curState < numStates; curState++)
                {
                    // error propogation
                    State state = pSequence.States[curState];
                    forwardRNN.setInputLayer(state, curState, numStates, null);
                    forwardRNN.computeNet(state, null);      //compute probability distribution

                    //Copy output result to forward net work's output
                    forwardRNN.OutputLayer = seqOutput[curState];

                    forwardRNN.learnNet(state, curState, true);
                    forwardRNN.LearnBackTime(state, numStates, curState);
                }
            },
            () =>
            {

                for (int curState = 0; curState < numStates; curState++)
                {
                    int curState2 = numStates - 1 - curState;

                    // error propogation
                    State state2 = pSequence.States[curState2];
                    backwardRNN.setInputLayer(state2, curState2, numStates, null, false);
                    backwardRNN.computeNet(state2, null);      //compute probability distribution

                    //Copy output result to forward net work's output
                    backwardRNN.OutputLayer = seqOutput[curState2];

                    backwardRNN.learnNet(state2, curState2, true);
                    backwardRNN.LearnBackTime(state2, numStates, curState2);
                }
            });
        }
Example #6
0
        public neuron[][] InnerDecode(Sequence pSequence, out Matrix<neuron> outputHiddenLayer, out Matrix<double> rawOutputLayer)
        {
            int numStates = pSequence.States.Length;
            Matrix<double> mForward = null;
            Matrix<double> mBackward = null;

            //Reset the network
            netReset(false);

            Parallel.Invoke(() =>
            {
                //Computing forward RNN
                mForward = new Matrix<double>(numStates, forwardRNN.L1);
                for (int curState = 0; curState < numStates; curState++)
                {
                    State state = pSequence.States[curState];
                    forwardRNN.setInputLayer(state, curState, numStates, null);
                    forwardRNN.computeNet(state, null);      //compute probability distribution

                    forwardRNN.GetHiddenLayer(mForward, curState);
                }
            },
             () =>
             {
                 //Computing backward RNN
                 mBackward = new Matrix<double>(numStates, backwardRNN.L1);
                 for (int curState = numStates - 1; curState >= 0; curState--)
                 {
                     State state = pSequence.States[curState];
                     backwardRNN.setInputLayer(state, curState, numStates, null, false);
                     backwardRNN.computeNet(state, null);      //compute probability distribution

                     backwardRNN.GetHiddenLayer(mBackward, curState);
                 }
             });

            //Merge forward and backward
            Matrix<neuron> mergedHiddenLayer = new Matrix<neuron>(numStates, forwardRNN.L1);
            Parallel.For(0, numStates, parallelOption, curState =>
            {
                for (int i = 0; i < forwardRNN.L1; i++)
                {
                    mergedHiddenLayer[curState][i].cellOutput = mForward[curState][i] + mBackward[curState][i];
                }
            });

            //Calculate output layer
            Matrix<double> tmp_rawOutputLayer = new Matrix<double>(numStates, L2);
            neuron[][] seqOutput = new neuron[numStates][];
            Parallel.For(0, numStates, parallelOption, curState =>
            {
                seqOutput[curState] = new neuron[L2];
                matrixXvectorADD(seqOutput[curState], mergedHiddenLayer[curState], Hidden2OutputWeight, 0, L2, 0, L1, 0);

                for (int i = 0; i < L2; i++)
                {
                    tmp_rawOutputLayer[curState][i] = seqOutput[curState][i].cellOutput;
                }

                //Activation on output layer
                SoftmaxLayer(seqOutput[curState]);
            });

            outputHiddenLayer = mergedHiddenLayer;
            rawOutputLayer = tmp_rawOutputLayer;

            return seqOutput;
        }