/** Gets the initial grammar node from the linguist and creates a GrammarNodeToken */ protected void localStart() { ISearchGraph searchGraph = linguist.getSearchGraph(); currentFrameNumber = 0; curTokensScored.value = 0; numStateOrder = searchGraph.getNumStateOrder(); activeListManager.setNumStateOrder(numStateOrder); if (buildWordLattice) { loserManager = new AlternateHypothesisManager(maxLatticeEdges); } ISearchState state = searchGraph.getInitialState(); activeList = activeListManager.getEmittingList(); activeList.add(new Token(state, currentFrameNumber)); clearCollectors(); growBranches(); growNonEmittingBranches(); // tokenTracker.setEnabled(false); // tokenTracker.startUtterance(); }
/** * /// Adds the given token to the list * * /// @param token the token to add */ override public void add(Token token) { ActiveList activeList = findListFor(token); if (activeList == null) { throw new Exception("Cannot find ActiveList for " + token.getSearchState().GetType().Name); } activeList.add(token); }
/// <summary> /// Gets the initial grammar node from the linguist and creates a GrammarNodeToken. /// </summary> protected void localStart() { currentFrameNumber = 0; curTokensScored.value = 0; ActiveList newActiveList = activeListFactory.newInstance(); ISearchState state = linguist.getSearchGraph().getInitialState(); newActiveList.add(new Token(state, currentFrameNumber)); activeList = newActiveList; growBranches(); }
/** * /// Collects the next set of emitting tokens from a token and accumulates them in the active or result lists * * /// @param token the token to collect successors from */ protected void collectSuccessorTokens(Token token) { ISearchState state = token.getSearchState(); // If this is a final state, add it to the final list if (token.isFinal()) { resultList.Add(token); } if (token.getScore() < threshold) { return; } if (state is IWordSearchState && token.getScore() < wordThreshold) { return; } ISearchStateArc[] arcs = state.getSuccessors(); // For each successor // calculate the entry score for the token based upon the // predecessor token score and the transition probabilities // if the score is better than the best score encountered for // the SearchState and frame then create a new token, add // it to the lattice and the SearchState. // If the token is an emitting token add it to the list, // otherwise recursively collect the new tokens successors. foreach (ISearchStateArc arc in arcs) { ISearchState nextState = arc.getState(); // We're actually multiplying the variables, but since // these come in log(), multiply gets converted to add float logEntryScore = token.getScore() + arc.getProbability(); if (wantEntryPruning) // false by default { if (logEntryScore < threshold) { continue; } if (nextState is IWordSearchState && logEntryScore < wordThreshold) { continue; } } Token predecessor = getResultListPredecessor(token); // if not emitting, check to see if we've already visited // this state during this frame. Expand the token only if we // haven't visited it already. This prevents the search // from getting stuck in a loop of states with no // intervening emitting nodes. This can happen with nasty // jsgf grammars such as ((foo*)*)* if (!nextState.isEmitting()) { Token newToken = new Token(predecessor, nextState, logEntryScore, arc.getInsertionProbability(), arc.getLanguageProbability(), currentFrameNumber); tokensCreated.value++; if (!isVisited(newToken)) { collectSuccessorTokens(newToken); } continue; } Token bestToken = getBestToken(nextState); if (bestToken == null) { Token newToken = new Token(predecessor, nextState, logEntryScore, arc.getInsertionProbability(), arc.getLanguageProbability(), currentFrameNumber); tokensCreated.value++; setBestToken(newToken, nextState); activeList.add(newToken); } else { if (bestToken.getScore() <= logEntryScore) { bestToken.update(predecessor as Token, nextState, logEntryScore, arc.getInsertionProbability(), arc.getLanguageProbability(), currentFrameNumber); viterbiPruned.value++; } else { viterbiPruned.value++; } } //Token bestToken = getBestToken(nextState); //Boolean firstToken = bestToken == null; //if (firstToken || bestToken.getScore() <= logEntryScore) { // Token newToken = new Token(predecessor, nextState, logEntryScore, // arc.getInsertionProbability(), // arc.getLanguageProbability(), // currentFrameNumber); // tokensCreated.value++; // setBestToken(newToken, nextState); // if (!newToken.isEmitting()) { // if (!isVisited(newToken)) { // collectSuccessorTokens(newToken); // } // } else { // if (firstToken) { // activeList.add(newToken); // } else { // activeList.replace(bestToken, newToken); // viterbiPruned.value++; // } // } //} else { // viterbiPruned.value++; //} } }