private void Train(IList <Pair <string, IFileFilter> > trainTreebankPath, Pair <string, IFileFilter> devTreebankPath, string serializedPath) { log.Info("Training method: " + op.TrainOptions().trainingMethod); IList <Tree> binarizedTrees = Generics.NewArrayList(); foreach (Pair <string, IFileFilter> treebank in trainTreebankPath) { Sharpen.Collections.AddAll(binarizedTrees, ReadBinarizedTreebank(treebank.First(), treebank.Second())); } int nThreads = op.trainOptions.trainingThreads; nThreads = nThreads <= 0 ? Runtime.GetRuntime().AvailableProcessors() : nThreads; Edu.Stanford.Nlp.Tagger.Common.Tagger tagger = null; if (op.testOptions.preTag) { Timing retagTimer = new Timing(); tagger = Edu.Stanford.Nlp.Tagger.Common.Tagger.LoadModel(op.testOptions.taggerSerializedFile); RedoTags(binarizedTrees, tagger, nThreads); retagTimer.Done("Retagging"); } ICollection <string> knownStates = FindKnownStates(binarizedTrees); ICollection <string> rootStates = FindRootStates(binarizedTrees); ICollection <string> rootOnlyStates = FindRootOnlyStates(binarizedTrees, rootStates); log.Info("Known states: " + knownStates); log.Info("States which occur at the root: " + rootStates); log.Info("States which only occur at the root: " + rootStates); Timing transitionTimer = new Timing(); IList <IList <ITransition> > transitionLists = CreateTransitionSequence.CreateTransitionSequences(binarizedTrees, op.compoundUnaries, rootStates, rootOnlyStates); IIndex <ITransition> transitionIndex = new HashIndex <ITransition>(); foreach (IList <ITransition> transitions in transitionLists) { transitionIndex.AddAll(transitions); } transitionTimer.Done("Converting trees into transition lists"); log.Info("Number of transitions: " + transitionIndex.Size()); Random random = new Random(op.trainOptions.randomSeed); Treebank devTreebank = null; if (devTreebankPath != null) { devTreebank = ReadTreebank(devTreebankPath.First(), devTreebankPath.Second()); } PerceptronModel newModel = new PerceptronModel(this.op, transitionIndex, knownStates, rootStates, rootOnlyStates); newModel.TrainModel(serializedPath, tagger, random, binarizedTrees, transitionLists, devTreebank, nThreads); this.model = newModel; }
public static Edu.Stanford.Nlp.Tagger.Maxent.Distsim InitLexicon(string path) { lock (lexiconMap) { Edu.Stanford.Nlp.Tagger.Maxent.Distsim lex = lexiconMap[path]; if (lex == null) { Timing timer = new Timing(); lex = new Edu.Stanford.Nlp.Tagger.Maxent.Distsim(path); lexiconMap[path] = lex; timer.Done(log, "Loading distsim lexicon from " + path); } return(lex); } }
public static ParserGrammar LoadModel(string path, params string[] extraFlags) { ParserGrammar parser; try { Timing timing = new Timing(); parser = IOUtils.ReadObjectFromURLOrClasspathOrFileSystem(path); timing.Done(logger, "Loading parser from serialized file " + path); } catch (Exception e) { throw new RuntimeIOException(e); } if (extraFlags.Length > 0) { parser.SetOptionFlags(extraFlags); } return(parser); }
public virtual void ExecuteOneTrainingBatch(IList <Tree> trainingBatch, IdentityHashMap <Tree, byte[]> compressedParses, double[] sumGradSquare) { Timing convertTiming = new Timing(); convertTiming.Doing("Converting trees"); IdentityHashMap <Tree, IList <Tree> > topParses = CacheParseHypotheses.ConvertToTrees(trainingBatch, compressedParses, op.trainOptions.trainingThreads); convertTiming.Done(); DVParserCostAndGradient gcFunc = new DVParserCostAndGradient(trainingBatch, topParses, dvModel, op); double[] theta = dvModel.ParamsToVector(); switch (Minimizer) { case (1): { //maxFuncIter = 10; // 1: QNMinimizer, 2: SGD QNMinimizer qn = new QNMinimizer(op.trainOptions.qnEstimates, true); qn.UseMinPackSearch(); qn.UseDiagonalScaling(); qn.TerminateOnAverageImprovement(true); qn.TerminateOnNumericalZero(true); qn.TerminateOnRelativeNorm(true); theta = qn.Minimize(gcFunc, op.trainOptions.qnTolerance, theta, op.trainOptions.qnIterationsPerBatch); break; } case 2: { //Minimizer smd = new SGDMinimizer(); double tol = 1e-4; theta = smd.minimize(gcFunc,tol,theta,op.trainOptions.qnIterationsPerBatch); double lastCost = 0; double currCost = 0; bool firstTime = true; for (int i = 0; i < op.trainOptions.qnIterationsPerBatch; i++) { //gcFunc.calculate(theta); double[] grad = gcFunc.DerivativeAt(theta); currCost = gcFunc.ValueAt(theta); log.Info("batch cost: " + currCost); // if(!firstTime){ // if(currCost > lastCost){ // System.out.println("HOW IS FUNCTION VALUE INCREASING????!!! ... still updating theta"); // } // if(Math.abs(currCost - lastCost) < 0.0001){ // System.out.println("function value is not decreasing. stop"); // } // }else{ // firstTime = false; // } lastCost = currCost; ArrayMath.AddMultInPlace(theta, grad, -1 * op.trainOptions.learningRate); } break; } case 3: { // AdaGrad double eps = 1e-3; double currCost = 0; for (int i = 0; i < op.trainOptions.qnIterationsPerBatch; i++) { double[] gradf = gcFunc.DerivativeAt(theta); currCost = gcFunc.ValueAt(theta); log.Info("batch cost: " + currCost); for (int feature = 0; feature < gradf.Length; feature++) { sumGradSquare[feature] = sumGradSquare[feature] + gradf[feature] * gradf[feature]; theta[feature] = theta[feature] - (op.trainOptions.learningRate * gradf[feature] / (System.Math.Sqrt(sumGradSquare[feature]) + eps)); } } break; } default: { throw new ArgumentException("Unsupported minimizer " + Minimizer); } } dvModel.VectorToParams(theta); }
private void TrainModel(string serializedPath, Edu.Stanford.Nlp.Tagger.Common.Tagger tagger, Random random, IList <Tree> binarizedTrees, IList <IList <ITransition> > transitionLists, Treebank devTreebank, int nThreads, ICollection <string> allowedFeatures ) { double bestScore = 0.0; int bestIteration = 0; PriorityQueue <ScoredObject <PerceptronModel> > bestModels = null; if (op.TrainOptions().averagedModels > 0) { bestModels = new PriorityQueue <ScoredObject <PerceptronModel> >(op.TrainOptions().averagedModels + 1, ScoredComparator.AscendingComparator); } IList <int> indices = Generics.NewArrayList(); for (int i = 0; i < binarizedTrees.Count; ++i) { indices.Add(i); } Oracle oracle = null; if (op.TrainOptions().trainingMethod == ShiftReduceTrainOptions.TrainingMethod.Oracle) { oracle = new Oracle(binarizedTrees, op.compoundUnaries, rootStates); } IList <PerceptronModel.Update> updates = Generics.NewArrayList(); MulticoreWrapper <int, Pair <int, int> > wrapper = null; if (nThreads != 1) { updates = Java.Util.Collections.SynchronizedList(updates); wrapper = new MulticoreWrapper <int, Pair <int, int> >(op.trainOptions.trainingThreads, new PerceptronModel.TrainTreeProcessor(this, binarizedTrees, transitionLists, updates, oracle)); } IntCounter <string> featureFrequencies = null; if (op.TrainOptions().featureFrequencyCutoff > 1) { featureFrequencies = new IntCounter <string>(); } for (int iteration = 1; iteration <= op.trainOptions.trainingIterations; ++iteration) { Timing trainingTimer = new Timing(); int numCorrect = 0; int numWrong = 0; Java.Util.Collections.Shuffle(indices, random); for (int start = 0; start < indices.Count; start += op.trainOptions.batchSize) { int end = Math.Min(start + op.trainOptions.batchSize, indices.Count); Triple <IList <PerceptronModel.Update>, int, int> result = TrainBatch(indices.SubList(start, end), binarizedTrees, transitionLists, updates, oracle, wrapper); numCorrect += result.second; numWrong += result.third; foreach (PerceptronModel.Update update in result.first) { foreach (string feature in update.features) { if (allowedFeatures != null && !allowedFeatures.Contains(feature)) { continue; } Weight weights = featureWeights[feature]; if (weights == null) { weights = new Weight(); featureWeights[feature] = weights; } weights.UpdateWeight(update.goldTransition, update.delta); weights.UpdateWeight(update.predictedTransition, -update.delta); if (featureFrequencies != null) { featureFrequencies.IncrementCount(feature, (update.goldTransition >= 0 && update.predictedTransition >= 0) ? 2 : 1); } } } updates.Clear(); } trainingTimer.Done("Iteration " + iteration); log.Info("While training, got " + numCorrect + " transitions correct and " + numWrong + " transitions wrong"); OutputStats(); double labelF1 = 0.0; if (devTreebank != null) { EvaluateTreebank evaluator = new EvaluateTreebank(op, null, new ShiftReduceParser(op, this), tagger); evaluator.TestOnTreebank(devTreebank); labelF1 = evaluator.GetLBScore(); log.Info("Label F1 after " + iteration + " iterations: " + labelF1); if (labelF1 > bestScore) { log.Info("New best dev score (previous best " + bestScore + ")"); bestScore = labelF1; bestIteration = iteration; } else { log.Info("Failed to improve for " + (iteration - bestIteration) + " iteration(s) on previous best score of " + bestScore); if (op.trainOptions.stalledIterationLimit > 0 && (iteration - bestIteration >= op.trainOptions.stalledIterationLimit)) { log.Info("Failed to improve for too long, stopping training"); break; } } log.Info(); if (bestModels != null) { bestModels.Add(new ScoredObject <PerceptronModel>(new PerceptronModel(this), labelF1)); if (bestModels.Count > op.TrainOptions().averagedModels) { bestModels.Poll(); } } } if (op.TrainOptions().saveIntermediateModels&& serializedPath != null && op.trainOptions.debugOutputFrequency > 0) { string tempName = Sharpen.Runtime.Substring(serializedPath, 0, serializedPath.Length - 7) + "-" + Filename.Format(iteration) + "-" + Nf.Format(labelF1) + ".ser.gz"; ShiftReduceParser temp = new ShiftReduceParser(op, this); temp.SaveModel(tempName); } // TODO: we could save a cutoff version of the model, // especially if we also get a dev set number for it, but that // might be overkill if (iteration % 10 == 0 && op.TrainOptions().decayLearningRate > 0.0) { learningRate *= op.TrainOptions().decayLearningRate; } } // end for iterations if (wrapper != null) { wrapper.Join(); } if (bestModels != null) { if (op.TrainOptions().cvAveragedModels&& devTreebank != null) { IList <ScoredObject <PerceptronModel> > models = Generics.NewArrayList(); while (bestModels.Count > 0) { models.Add(bestModels.Poll()); } Java.Util.Collections.Reverse(models); double bestF1 = 0.0; int bestSize = 0; for (int i_1 = 1; i_1 <= models.Count; ++i_1) { log.Info("Testing with " + i_1 + " models averaged together"); // TODO: this is kind of ugly, would prefer a separate object AverageScoredModels(models.SubList(0, i_1)); ShiftReduceParser temp = new ShiftReduceParser(op, this); EvaluateTreebank evaluator = new EvaluateTreebank(temp.GetOp(), null, temp, tagger); evaluator.TestOnTreebank(devTreebank); double labelF1 = evaluator.GetLBScore(); log.Info("Label F1 for " + i_1 + " models: " + labelF1); if (labelF1 > bestF1) { bestF1 = labelF1; bestSize = i_1; } } AverageScoredModels(models.SubList(0, bestSize)); } else { AverageScoredModels(bestModels); } } // TODO: perhaps we should filter the features and then get dev // set scores. That way we can merge the models which are best // after filtering. if (featureFrequencies != null) { FilterFeatures(featureFrequencies.KeysAbove(op.TrainOptions().featureFrequencyCutoff)); } CondenseFeatures(); }
// fill value & derivative protected internal override void Calculate(double[] theta) { dvModel.VectorToParams(theta); double localValue = 0.0; double[] localDerivative = new double[theta.Length]; TwoDimensionalMap <string, string, SimpleMatrix> binaryW_dfsG; TwoDimensionalMap <string, string, SimpleMatrix> binaryW_dfsB; binaryW_dfsG = TwoDimensionalMap.TreeMap(); binaryW_dfsB = TwoDimensionalMap.TreeMap(); TwoDimensionalMap <string, string, SimpleMatrix> binaryScoreDerivativesG; TwoDimensionalMap <string, string, SimpleMatrix> binaryScoreDerivativesB; binaryScoreDerivativesG = TwoDimensionalMap.TreeMap(); binaryScoreDerivativesB = TwoDimensionalMap.TreeMap(); IDictionary <string, SimpleMatrix> unaryW_dfsG; IDictionary <string, SimpleMatrix> unaryW_dfsB; unaryW_dfsG = new SortedDictionary <string, SimpleMatrix>(); unaryW_dfsB = new SortedDictionary <string, SimpleMatrix>(); IDictionary <string, SimpleMatrix> unaryScoreDerivativesG; IDictionary <string, SimpleMatrix> unaryScoreDerivativesB; unaryScoreDerivativesG = new SortedDictionary <string, SimpleMatrix>(); unaryScoreDerivativesB = new SortedDictionary <string, SimpleMatrix>(); IDictionary <string, SimpleMatrix> wordVectorDerivativesG = new SortedDictionary <string, SimpleMatrix>(); IDictionary <string, SimpleMatrix> wordVectorDerivativesB = new SortedDictionary <string, SimpleMatrix>(); foreach (TwoDimensionalMap.Entry <string, string, SimpleMatrix> entry in dvModel.binaryTransform) { int numRows = entry.GetValue().NumRows(); int numCols = entry.GetValue().NumCols(); binaryW_dfsG.Put(entry.GetFirstKey(), entry.GetSecondKey(), new SimpleMatrix(numRows, numCols)); binaryW_dfsB.Put(entry.GetFirstKey(), entry.GetSecondKey(), new SimpleMatrix(numRows, numCols)); binaryScoreDerivativesG.Put(entry.GetFirstKey(), entry.GetSecondKey(), new SimpleMatrix(1, numRows)); binaryScoreDerivativesB.Put(entry.GetFirstKey(), entry.GetSecondKey(), new SimpleMatrix(1, numRows)); } foreach (KeyValuePair <string, SimpleMatrix> entry_1 in dvModel.unaryTransform) { int numRows = entry_1.Value.NumRows(); int numCols = entry_1.Value.NumCols(); unaryW_dfsG[entry_1.Key] = new SimpleMatrix(numRows, numCols); unaryW_dfsB[entry_1.Key] = new SimpleMatrix(numRows, numCols); unaryScoreDerivativesG[entry_1.Key] = new SimpleMatrix(1, numRows); unaryScoreDerivativesB[entry_1.Key] = new SimpleMatrix(1, numRows); } if (op.trainOptions.trainWordVectors) { foreach (KeyValuePair <string, SimpleMatrix> entry_2 in dvModel.wordVectors) { int numRows = entry_2.Value.NumRows(); int numCols = entry_2.Value.NumCols(); wordVectorDerivativesG[entry_2.Key] = new SimpleMatrix(numRows, numCols); wordVectorDerivativesB[entry_2.Key] = new SimpleMatrix(numRows, numCols); } } // Some optimization methods prints out a line without an end, so our // debugging statements are misaligned Timing scoreTiming = new Timing(); scoreTiming.Doing("Scoring trees"); int treeNum = 0; MulticoreWrapper <Tree, Pair <DeepTree, DeepTree> > wrapper = new MulticoreWrapper <Tree, Pair <DeepTree, DeepTree> >(op.trainOptions.trainingThreads, new DVParserCostAndGradient.ScoringProcessor(this)); foreach (Tree tree in trainingBatch) { wrapper.Put(tree); } wrapper.Join(); scoreTiming.Done(); while (wrapper.Peek()) { Pair <DeepTree, DeepTree> result = wrapper.Poll(); DeepTree goldTree = result.first; DeepTree bestTree = result.second; StringBuilder treeDebugLine = new StringBuilder(); Formatter formatter = new Formatter(treeDebugLine); bool isDone = (Math.Abs(bestTree.GetScore() - goldTree.GetScore()) <= 0.00001 || goldTree.GetScore() > bestTree.GetScore()); string done = isDone ? "done" : string.Empty; formatter.Format("Tree %6d Highest tree: %12.4f Correct tree: %12.4f %s", treeNum, bestTree.GetScore(), goldTree.GetScore(), done); log.Info(treeDebugLine.ToString()); if (!isDone) { // if the gold tree is better than the best hypothesis tree by // a large enough margin, then the score difference will be 0 // and we ignore the tree double valueDelta = bestTree.GetScore() - goldTree.GetScore(); //double valueDelta = Math.max(0.0, - scoreGold + bestScore); localValue += valueDelta; // get the context words for this tree - should be the same // for either goldTree or bestTree IList <string> words = GetContextWords(goldTree.GetTree()); // The derivatives affected by this tree are only based on the // nodes present in this tree, eg not all matrix derivatives // will be affected by this tree BackpropDerivative(goldTree.GetTree(), words, goldTree.GetVectors(), binaryW_dfsG, unaryW_dfsG, binaryScoreDerivativesG, unaryScoreDerivativesG, wordVectorDerivativesG); BackpropDerivative(bestTree.GetTree(), words, bestTree.GetVectors(), binaryW_dfsB, unaryW_dfsB, binaryScoreDerivativesB, unaryScoreDerivativesB, wordVectorDerivativesB); } ++treeNum; } double[] localDerivativeGood; double[] localDerivativeB; if (op.trainOptions.trainWordVectors) { localDerivativeGood = NeuralUtils.ParamsToVector(theta.Length, binaryW_dfsG.ValueIterator(), unaryW_dfsG.Values.GetEnumerator(), binaryScoreDerivativesG.ValueIterator(), unaryScoreDerivativesG.Values.GetEnumerator(), wordVectorDerivativesG.Values .GetEnumerator()); localDerivativeB = NeuralUtils.ParamsToVector(theta.Length, binaryW_dfsB.ValueIterator(), unaryW_dfsB.Values.GetEnumerator(), binaryScoreDerivativesB.ValueIterator(), unaryScoreDerivativesB.Values.GetEnumerator(), wordVectorDerivativesB.Values .GetEnumerator()); } else { localDerivativeGood = NeuralUtils.ParamsToVector(theta.Length, binaryW_dfsG.ValueIterator(), unaryW_dfsG.Values.GetEnumerator(), binaryScoreDerivativesG.ValueIterator(), unaryScoreDerivativesG.Values.GetEnumerator()); localDerivativeB = NeuralUtils.ParamsToVector(theta.Length, binaryW_dfsB.ValueIterator(), unaryW_dfsB.Values.GetEnumerator(), binaryScoreDerivativesB.ValueIterator(), unaryScoreDerivativesB.Values.GetEnumerator()); } // correct - highest for (int i = 0; i < localDerivativeGood.Length; i++) { localDerivative[i] = localDerivativeB[i] - localDerivativeGood[i]; } // TODO: this is where we would combine multiple costs if we had parallelized the calculation value = localValue; derivative = localDerivative; // normalizing by training batch size value = (1.0 / trainingBatch.Count) * value; ArrayMath.MultiplyInPlace(derivative, (1.0 / trainingBatch.Count)); // add regularization to cost: double[] currentParams = dvModel.ParamsToVector(); double regCost = 0; foreach (double currentParam in currentParams) { regCost += currentParam * currentParam; } regCost = op.trainOptions.regCost * 0.5 * regCost; value += regCost; // add regularization to gradient ArrayMath.MultiplyInPlace(currentParams, op.trainOptions.regCost); ArrayMath.PairwiseAddInPlace(derivative, currentParams); }
/// <summary>Test the parser on a treebank.</summary> /// <remarks> /// Test the parser on a treebank. Parses will be written to stdout, and /// various other information will be written to stderr and stdout, /// particularly if <code>op.testOptions.verbose</code> is true. /// </remarks> /// <param name="testTreebank">The treebank to parse</param> /// <returns> /// The labeled precision/recall F<sub>1</sub> (EVALB measure) /// of the parser on the treebank. /// </returns> public virtual double TestOnTreebank(Treebank testTreebank) { log.Info("Testing on treebank"); Timing treebankTotalTimer = new Timing(); TreePrint treePrint = op.testOptions.TreePrint(op.tlpParams); ITreebankLangParserParams tlpParams = op.tlpParams; ITreebankLanguagePack tlp = op.Langpack(); PrintWriter pwOut; PrintWriter pwErr; if (op.testOptions.quietEvaluation) { NullOutputStream quiet = new NullOutputStream(); pwOut = tlpParams.Pw(quiet); pwErr = tlpParams.Pw(quiet); } else { pwOut = tlpParams.Pw(); pwErr = tlpParams.Pw(System.Console.Error); } if (op.testOptions.verbose) { pwErr.Print("Testing "); pwErr.Println(testTreebank.TextualSummary(tlp)); } if (op.testOptions.evalb) { EvalbFormatWriter.InitEVALBfiles(tlpParams); } PrintWriter pwFileOut = null; if (op.testOptions.writeOutputFiles) { string fname = op.testOptions.outputFilesPrefix + "." + op.testOptions.outputFilesExtension; try { pwFileOut = op.tlpParams.Pw(new FileOutputStream(fname)); } catch (IOException ioe) { Sharpen.Runtime.PrintStackTrace(ioe); } } PrintWriter pwStats = null; if (op.testOptions.outputkBestEquivocation != null) { try { pwStats = op.tlpParams.Pw(new FileOutputStream(op.testOptions.outputkBestEquivocation)); } catch (IOException ioe) { Sharpen.Runtime.PrintStackTrace(ioe); } } if (op.testOptions.testingThreads != 1) { MulticoreWrapper <IList <IHasWord>, IParserQuery> wrapper = new MulticoreWrapper <IList <IHasWord>, IParserQuery>(op.testOptions.testingThreads, new ParsingThreadsafeProcessor(pqFactory, pwErr)); LinkedList <Tree> goldTrees = new LinkedList <Tree>(); foreach (Tree goldTree in testTreebank) { IList <IHasWord> sentence = GetInputSentence(goldTree); goldTrees.Add(goldTree); pwErr.Println("Parsing [len. " + sentence.Count + "]: " + SentenceUtils.ListToString(sentence)); wrapper.Put(sentence); while (wrapper.Peek()) { IParserQuery pq = wrapper.Poll(); goldTree = goldTrees.Poll(); ProcessResults(pq, goldTree, pwErr, pwOut, pwFileOut, pwStats, treePrint); } } // for tree iterator wrapper.Join(); while (wrapper.Peek()) { IParserQuery pq = wrapper.Poll(); Tree goldTree_1 = goldTrees.Poll(); ProcessResults(pq, goldTree_1, pwErr, pwOut, pwFileOut, pwStats, treePrint); } } else { IParserQuery pq = pqFactory.ParserQuery(); foreach (Tree goldTree in testTreebank) { IList <CoreLabel> sentence = GetInputSentence(goldTree); pwErr.Println("Parsing [len. " + sentence.Count + "]: " + SentenceUtils.ListToString(sentence)); pq.ParseAndReport(sentence, pwErr); ProcessResults(pq, goldTree, pwErr, pwOut, pwFileOut, pwStats, treePrint); } } // for tree iterator //Done parsing...print the results of the evaluations treebankTotalTimer.Done("Testing on treebank"); if (op.testOptions.quietEvaluation) { pwErr = tlpParams.Pw(System.Console.Error); } if (saidMemMessage) { ParserUtils.PrintOutOfMemory(pwErr); } if (op.testOptions.evalb) { EvalbFormatWriter.CloseEVALBfiles(); } if (numSkippedEvals != 0) { pwErr.Printf("Unable to evaluate %d parser hypotheses due to yield mismatch\n", numSkippedEvals); } // only created here so we know what parser types are supported... IParserQuery pq_1 = pqFactory.ParserQuery(); if (summary) { if (pcfgLB != null) { pcfgLB.Display(false, pwErr); } if (pcfgChildSpecific != null) { pcfgChildSpecific.Display(false, pwErr); } if (pcfgLA != null) { pcfgLA.Display(false, pwErr); } if (pcfgCB != null) { pcfgCB.Display(false, pwErr); } if (pcfgDA != null) { pcfgDA.Display(false, pwErr); } if (pcfgTA != null) { pcfgTA.Display(false, pwErr); } if (pcfgLL != null && pq_1.GetPCFGParser() != null) { pcfgLL.Display(false, pwErr); } if (depDA != null) { depDA.Display(false, pwErr); } if (depTA != null) { depTA.Display(false, pwErr); } if (depLL != null && pq_1.GetDependencyParser() != null) { depLL.Display(false, pwErr); } if (factLB != null) { factLB.Display(false, pwErr); } if (factChildSpecific != null) { factChildSpecific.Display(false, pwErr); } if (factLA != null) { factLA.Display(false, pwErr); } if (factCB != null) { factCB.Display(false, pwErr); } if (factDA != null) { factDA.Display(false, pwErr); } if (factTA != null) { factTA.Display(false, pwErr); } if (factLL != null && pq_1.GetFactoredParser() != null) { factLL.Display(false, pwErr); } if (pcfgCatE != null) { pcfgCatE.Display(false, pwErr); } foreach (IEval eval in evals) { eval.Display(false, pwErr); } foreach (BestOfTopKEval eval_1 in topKEvals) { eval_1.Display(false, pwErr); } } // these ones only have a display mode, so display if turned on!! if (pcfgRUO != null) { pcfgRUO.Display(true, pwErr); } if (pcfgCUO != null) { pcfgCUO.Display(true, pwErr); } if (tsv) { NumberFormat nf = new DecimalFormat("0.00"); pwErr.Println("factF1\tfactDA\tfactEx\tpcfgF1\tdepDA\tfactTA\tnum"); if (factLB != null) { pwErr.Print(nf.Format(factLB.GetEvalbF1Percent())); } pwErr.Print("\t"); if (pq_1.GetDependencyParser() != null && factDA != null) { pwErr.Print(nf.Format(factDA.GetEvalbF1Percent())); } pwErr.Print("\t"); if (factLB != null) { pwErr.Print(nf.Format(factLB.GetExactPercent())); } pwErr.Print("\t"); if (pcfgLB != null) { pwErr.Print(nf.Format(pcfgLB.GetEvalbF1Percent())); } pwErr.Print("\t"); if (pq_1.GetDependencyParser() != null && depDA != null) { pwErr.Print(nf.Format(depDA.GetEvalbF1Percent())); } pwErr.Print("\t"); if (pq_1.GetPCFGParser() != null && factTA != null) { pwErr.Print(nf.Format(factTA.GetEvalbF1Percent())); } pwErr.Print("\t"); if (factLB != null) { pwErr.Print(factLB.GetNum()); } pwErr.Println(); } double f1 = 0.0; if (factLB != null) { f1 = factLB.GetEvalbF1(); } //Close files (if necessary) if (pwFileOut != null) { pwFileOut.Close(); } if (pwStats != null) { pwStats.Close(); } if (parserQueryEvals != null) { foreach (IParserQueryEval parserQueryEval in parserQueryEvals) { parserQueryEval.Display(false, pwErr); } } return(f1); }