Exemplo n.º 1
0
        /// <summary>Analyze/judge results for a single quality query; optionally log them.</summary>
        private QualityStats AnalyzeQueryResults(QualityQuery qq, Query q, TopDocs td, IJudge judge, TextWriter logger, long searchTime)
        {
            QualityStats stts = new QualityStats(judge.MaxRecall(qq), searchTime);

            ScoreDoc[]       sd = td.ScoreDocs;
            long             t1 = Support.Time.CurrentTimeMilliseconds(); // extraction of first doc name we measure also construction of doc name extractor, just in case.
            DocNameExtractor xt = new DocNameExtractor(m_docNameField);

            for (int i = 0; i < sd.Length; i++)
            {
                string docName            = xt.DocName(m_searcher, sd[i].Doc);
                long   docNameExtractTime = Support.Time.CurrentTimeMilliseconds() - t1;
                t1 = Support.Time.CurrentTimeMilliseconds();
                bool isRelevant = judge.IsRelevant(docName, qq);
                stts.AddResult(i + 1, isRelevant, docNameExtractTime);
            }
            if (logger != null)
            {
                logger.WriteLine(qq.QueryID + "  -  " + q);
                stts.Log(qq.QueryID + " Stats:", 1, logger, "  ");
            }
            return(stts);
        }
Exemplo n.º 2
0
        public void TestTrecQuality()
        {
            // first create the partial reuters index
            createReutersIndex();


            int    maxResults   = 1000;
            String docNameField = "doctitle"; // orig docID is in the linedoc format title

            TextWriter logger = VERBOSE ? Console.Out : null;

            // prepare topics
            Stream           topics  = GetType().getResourceAsStream("trecTopics.txt");
            TrecTopicsReader qReader = new TrecTopicsReader();

            QualityQuery[] qqs = qReader.ReadQueries(new StreamReader(topics, Encoding.UTF8));

            // prepare judge
            Stream qrels = GetType().getResourceAsStream("trecQRels.txt");
            IJudge judge = new TrecJudge(new StreamReader(qrels, Encoding.UTF8));

            // validate topics & judgments match each other
            judge.ValidateData(qqs, logger);

            Store.Directory dir      = NewFSDirectory(new DirectoryInfo(System.IO.Path.Combine(getWorkDir().FullName, "index")));
            IndexReader     reader   = DirectoryReader.Open(dir);
            IndexSearcher   searcher = new IndexSearcher(reader);

            IQualityQueryParser qqParser = new SimpleQQParser("title", "body");
            QualityBenchmark    qrun     = new QualityBenchmark(qqs, qqParser, searcher, docNameField);

            SubmissionReport submitLog = VERBOSE ? new SubmissionReport(logger, "TestRun") : null;

            qrun.MaxResults = (maxResults);
            QualityStats[] stats = qrun.Execute(judge, submitLog, logger);

            // --------- verify by the way judgments were altered for this test:
            // for some queries, depending on m = qnum % 8
            // m==0: avg_precision and recall are hurt, by marking fake docs as relevant
            // m==1: precision_at_n and avg_precision are hurt, by unmarking relevant docs
            // m==2: all precision, precision_at_n and recall are hurt.
            // m>=3: these queries remain perfect
            for (int i = 0; i < stats.Length; i++)
            {
                QualityStats s = stats[i];
                switch (i % 8)
                {
                case 0:
                    assertTrue("avg-p should be hurt: " + s.GetAvp(), 1.0 > s.GetAvp());
                    assertTrue("recall should be hurt: " + s.Recall, 1.0 > s.Recall);
                    for (int j = 1; j <= QualityStats.MAX_POINTS; j++)
                    {
                        assertEquals("p_at_" + j + " should be perfect: " + s.GetPrecisionAt(j), 1.0, s.GetPrecisionAt(j), 1E-2);
                    }
                    break;

                case 1:
                    assertTrue("avg-p should be hurt", 1.0 > s.GetAvp());
                    assertEquals("recall should be perfect: " + s.Recall, 1.0, s.Recall, 1E-2);
                    for (int j = 1; j <= QualityStats.MAX_POINTS; j++)
                    {
                        assertTrue("p_at_" + j + " should be hurt: " + s.GetPrecisionAt(j), 1.0 > s.GetPrecisionAt(j));
                    }
                    break;

                case 2:
                    assertTrue("avg-p should be hurt: " + s.GetAvp(), 1.0 > s.GetAvp());
                    assertTrue("recall should be hurt: " + s.Recall, 1.0 > s.Recall);
                    for (int j = 1; j <= QualityStats.MAX_POINTS; j++)
                    {
                        assertTrue("p_at_" + j + " should be hurt: " + s.GetPrecisionAt(j), 1.0 > s.GetPrecisionAt(j));
                    }
                    break;

                default:
                {
                    assertEquals("avg-p should be perfect: " + s.GetAvp(), 1.0, s.GetAvp(), 1E-2);
                    assertEquals("recall should be perfect: " + s.Recall, 1.0, s.Recall, 1E-2);
                    for (int j = 1; j <= QualityStats.MAX_POINTS; j++)
                    {
                        assertEquals("p_at_" + j + " should be perfect: " + s.GetPrecisionAt(j), 1.0, s.GetPrecisionAt(j), 1E-2);
                    }
                    break;
                }
                }
            }

            QualityStats avg = QualityStats.Average(stats);

            if (logger != null)
            {
                avg.Log("Average statistis:", 1, logger, "  ");
            }


            assertTrue("mean avg-p should be hurt: " + avg.GetAvp(), 1.0 > avg.GetAvp());
            assertTrue("avg recall should be hurt: " + avg.Recall, 1.0 > avg.Recall);
            for (int j = 1; j <= QualityStats.MAX_POINTS; j++)
            {
                assertTrue("avg p_at_" + j + " should be hurt: " + avg.GetPrecisionAt(j), 1.0 > avg.GetPrecisionAt(j));
            }

            reader.Dispose();
            dir.Dispose();
        }