public override Matrix InnerDecode(Sequence pSequence) { //Reset the network netReset(); int numStates = pSequence.GetSize(); predicted_fnn = new int[numStates]; predicted_bnn = new int[numStates]; Matrix mForward = new Matrix(numStates, forwardRNN.L2); Matrix mBackward = new Matrix(numStates, backwardRNN.L2); Parallel.Invoke(() => { //Computing forward RNN for (int curState = 0; curState < numStates; curState++) { State state = pSequence.Get(curState); forwardRNN.setInputLayer(state, curState, numStates, predicted_fnn); forwardRNN.computeNet(state, mForward[curState]); //compute probability distribution predicted_fnn[curState] = forwardRNN.GetBestOutputIndex(); forwardRNN.copyHiddenLayerToInput(); } }, () => { //Computing backward RNN for (int curState = numStates - 1; curState >= 0; curState--) { State state = pSequence.Get(curState); backwardRNN.setInputLayer(state, curState, numStates, predicted_bnn, false); backwardRNN.computeNet(state, mBackward[curState]); //compute probability distribution predicted_bnn[curState] = backwardRNN.GetBestOutputIndex(); backwardRNN.copyHiddenLayerToInput(); } }); //Merge forward and backward Matrix m = new Matrix(numStates, forwardRNN.L2); for (int curState = 0; curState < numStates; curState++) { for (int i = 0; i < forwardRNN.L2; i++) { m[curState][i] = mForward[curState][i] + mBackward[curState][i]; } } return m; }
public override int[] learnSentenceForRNNCRF(Sequence pSequence) { //Reset the network int numStates = pSequence.GetSize(); int[] predicted = new int[numStates]; //Predict output Matrix m = InnerDecode(pSequence); ForwardBackward(numStates, m); //Get the best result predicted = new int[numStates]; for (int i = 0; i < numStates; i++) { State state = pSequence.Get(i); logp += Math.Log10(m_Diff[i][state.GetLabel()]); counter++; predicted[i] = GetBestZIndex(i); } UpdateBigramTransition(pSequence); netReset(); forwardRNN.m_Diff = m_Diff; backwardRNN.m_Diff = m_Diff; double[] output_fnn = new double[L2]; double[] output_bnn = new double[L2]; Parallel.Invoke(() => { //Learn forward network for (int curState = 0; curState < numStates; curState++) { // error propogation State state = pSequence.Get(curState); forwardRNN.setInputLayer(state, curState, numStates, predicted_fnn); forwardRNN.computeNet(state, output_fnn); //compute probability distribution forwardRNN.learnNet(state, curState); forwardRNN.LearnBackTime(state, numStates, curState); forwardRNN.copyHiddenLayerToInput(); } }, () => { for (int curState = numStates - 1; curState >= 0; curState--) { // error propogation State state = pSequence.Get(curState); backwardRNN.setInputLayer(state, curState, numStates, predicted_bnn, false); backwardRNN.computeNet(state, output_bnn); //compute probability distribution backwardRNN.learnNet(state, curState); backwardRNN.LearnBackTime(state, numStates, curState); backwardRNN.copyHiddenLayerToInput(); } }); return predicted; }
public override int[] PredictSentence(Sequence pSequence) { //Reset the network int numStates = pSequence.GetSize(); int[] predicted = new int[numStates]; //Predict output Matrix m = InnerDecode(pSequence); //Merge forward and backward for (int curState = 0; curState < numStates; curState++) { State state = pSequence.Get(curState); //activation 2 --softmax on words double sum = 0; //sum is used for normalization: it's better to have larger precision as many numbers are summed together here for (int c = 0; c < forwardRNN.L2; c++) { if (m[curState][c] > 50) m[curState][c] = 50; //for numerical stability if (m[curState][c] < -50) m[curState][c] = -50; //for numerical stability double val = Math.Exp(m[curState][c]); sum += val; m[curState][c] = val; } for (int c = 0; c < forwardRNN.L2; c++) { m[curState][c] /= sum; } logp += Math.Log10(m[curState][state.GetLabel()]); counter++; predicted[curState] = GetBestOutputIndex(m, curState); } netReset(); double[] output = new double[L2]; //Learn forward network for (int curState = 0; curState < numStates; curState++) { // error propogation State state = pSequence.Get(curState); forwardRNN.setInputLayer(state, curState, numStates, predicted_fnn); forwardRNN.computeNet(state, output); //compute probability distribution //Copy output result to forward net work's output for (int i = 0; i < forwardRNN.L2; i++) { forwardRNN.neuOutput[i].ac = m[curState][i]; } forwardRNN.learnNet(state, curState); forwardRNN.LearnBackTime(state, numStates, curState); forwardRNN.copyHiddenLayerToInput(); } for (int curState = numStates - 1; curState >= 0; curState--) { // error propogation State state = pSequence.Get(curState); backwardRNN.setInputLayer(state, curState, numStates, predicted_bnn, false); backwardRNN.computeNet(state, output); //compute probability distribution //Copy output result to forward net work's output for (int i = 0; i < backwardRNN.L2; i++) { backwardRNN.neuOutput[i].ac = m[curState][i]; } backwardRNN.learnNet(state, curState); backwardRNN.LearnBackTime(state, numStates, curState); backwardRNN.copyHiddenLayerToInput(); } return predicted; }
public Sequence ExtractFeatures(Sentence sentence) { Sequence sequence = new Sequence(); int n = sentence.GetTokenSize(); List<string[]> features = sentence.GetFeatureSet(); //For each token, get its sparse and dense feature set according configuration and training corpus sequence.SetSize(n); for (int i = 0; i < n; i++) { State state = sequence.Get(i); ExtractSparseFeature(i, n, features, state); var spDenseFeature = ExtractDenseFeature(i, n, features); state.SetDenseData(spDenseFeature); } return sequence; }