/// <summary> /// Run the quality benchmark. /// </summary> /// <param name="judge"> /// The judge that can tell if a certain result doc is relevant for a certain quality query. /// If null, no judgements would be made. Usually null for a submission run. /// </param> /// <param name="submitRep">Submission report is created if non null.</param> /// <param name="qualityLog">If not null, quality run data would be printed for each query.</param> /// <returns><see cref="QualityStats"/> of each quality query that was executed.</returns> /// <exception cref="Exception">If quality benchmark failed to run.</exception> public virtual QualityStats[] Execute(IJudge judge, SubmissionReport submitRep, TextWriter qualityLog) { int nQueries = Math.Min(maxQueries, m_qualityQueries.Length); QualityStats[] stats = new QualityStats[nQueries]; for (int i = 0; i < nQueries; i++) { QualityQuery qq = m_qualityQueries[i]; // generate query Query q = m_qqParser.Parse(qq); // search with this query long t1 = J2N.Time.NanoTime() / J2N.Time.MillisecondsPerNanosecond; // LUCENENET: Use NanoTime() rather than CurrentTimeMilliseconds() for more accurate/reliable results TopDocs td = m_searcher.Search(q, null, maxResults); long searchTime = (J2N.Time.NanoTime() / J2N.Time.MillisecondsPerNanosecond) - t1; // LUCENENET: Use NanoTime() rather than CurrentTimeMilliseconds() for more accurate/reliable results //most likely we either submit or judge, but check both if (judge != null) { stats[i] = AnalyzeQueryResults(qq, q, td, judge, qualityLog, searchTime); } if (submitRep != null) { submitRep.Report(qq, td, m_docNameField, m_searcher); } } if (submitRep != null) { submitRep.Flush(); } return(stats); }