Exemplo n.º 1
0
        private int maxDocs = 200;                                                                                                      // max to cache

        internal Hits(Searcher s, Query q, Filter f)
        {
            weight   = q.Weight(s);
            searcher = s;
            filter   = f;
            GetMoreDocs(50);             // retrieve 100 initially
        }
Exemplo n.º 2
0
        // check that first skip on just created scorers always goes to the right doc
        private static void  CheckFirstSkipTo(Query q, IndexSearcher s)
        {
            //System.out.println("checkFirstSkipTo: "+q);
            float maxDiff = 1e-4f; //{{Lucene.Net-2.9.1}}Intentional diversion from Java Lucene

            int[]         lastDoc    = new int[] { -1 };
            IndexReader[] lastReader = { null };

            s.Search(q, new AnonymousClassCollector1(lastDoc, q, s, maxDiff, lastReader), null);

            if (lastReader[0] != null)
            {
                // confirm that skipping beyond the last doc, on the
                // previous reader, hits NO_MORE_DOCS
                IndexReader previousReader = lastReader[0];
                Weight      w      = q.Weight(new IndexSearcher(previousReader), null);
                Scorer      scorer = w.Scorer(previousReader, true, false, null);

                if (scorer != null)
                {
                    bool more = scorer.Advance(lastDoc[0] + 1, null) != DocIdSetIterator.NO_MORE_DOCS;
                    Assert.IsFalse(more, "query's last doc was " + lastDoc[0] + " but skipTo(" + (lastDoc[0] + 1) + ") got to " + scorer.DocID());
                }
            }
        }
Exemplo n.º 3
0
        /// <summary>check that the query weight is serializable. </summary>
        /// <throws>  IOException if serialization check fail.  </throws>
        private static void  CheckSerialization(Query q, Searcher s)
        {
            Weight w = q.Weight(s, null);

            try
            {
                System.IO.MemoryStream bos = new System.IO.MemoryStream();
                System.IO.BinaryWriter oos = new System.IO.BinaryWriter(bos);
                System.Runtime.Serialization.Formatters.Binary.BinaryFormatter formatter = new System.Runtime.Serialization.Formatters.Binary.BinaryFormatter();
                formatter.Serialize(oos.BaseStream, w);
                oos.Close();
                System.IO.BinaryReader ois = new System.IO.BinaryReader(new System.IO.MemoryStream(bos.ToArray()));
                formatter = new System.Runtime.Serialization.Formatters.Binary.BinaryFormatter();
                formatter.Deserialize(ois.BaseStream);
                ois.Close();

                //skip equals() test for now - most weights don't override equals() and we won't add this just for the tests.
                //TestCase.Assert.AreEqual(w2,w,"writeObject(w) != w.  ("+w+")");
            }
            catch (System.Exception e)
            {
                System.IO.IOException e2 = new System.IO.IOException("Serialization failed for " + w, e);
                throw e2;
            }
        }
Exemplo n.º 4
0
 internal Hits(Searcher s, Query q, Filter f)
 {
     weight = q.Weight(s);
     searcher = s;
     filter = f;
     GetMoreDocs(50); // retrieve 100 initially
 }
Exemplo n.º 5
0
        /// <summary> Create weight in multiple index scenario.
        ///
        /// Distributed query processing is done in the following steps:
        /// 1. rewrite query
        /// 2. extract necessary terms
        /// 3. collect dfs for these terms from the Searchables
        /// 4. create query weight using aggregate dfs.
        /// 5. distribute that weight to Searchables
        /// 6. merge results
        ///
        /// Steps 1-4 are done here, 5+6 in the search() methods
        ///
        /// </summary>
        /// <returns> rewritten queries
        /// </returns>
        public /*protected internal*/ override Weight CreateWeight(Query original, IState state)
        {
            // step 1
            Query rewrittenQuery = Rewrite(original, state);

            // step 2
            ISet <Term> terms = Lucene.Net.Support.Compatibility.SetFactory.CreateHashSet <Term>();

            rewrittenQuery.ExtractTerms(terms);

            // step3
            Term[] allTermsArray = terms.ToArray();
            int[]  aggregatedDfs = new int[terms.Count];
            for (int i = 0; i < searchables.Length; i++)
            {
                int[] dfs = searchables[i].DocFreqs(allTermsArray, state);
                for (int j = 0; j < aggregatedDfs.Length; j++)
                {
                    aggregatedDfs[j] += dfs[j];
                }
            }

            var dfMap = new Dictionary <Term, int>();

            for (int i = 0; i < allTermsArray.Length; i++)
            {
                dfMap[allTermsArray[i]] = aggregatedDfs[i];
            }

            // step4
            int            numDocs  = MaxDoc;
            CachedDfSource cacheSim = new CachedDfSource(dfMap, numDocs, Similarity);

            return(rewrittenQuery.Weight(cacheSim, state));
        }
Exemplo n.º 6
0
		public /*internal*/ bool debugCheckedForDeletions = false; // for test purposes.
		
		internal Hits(Searcher s, Query q, Filter f)
		{
			weight = q.Weight(s);
			searcher = s;
			filter = f;
			nDeletions = CountDeletions(s);
			GetMoreDocs(50); // retrieve 100 initially
			lengthAtStart = length;
		}
Exemplo n.º 7
0
        public /*internal*/ bool debugCheckedForDeletions = false;                                                                      // for test purposes.

        internal Hits(Searcher s, Query q, Filter f)
        {
            weight     = q.Weight(s);
            searcher   = s;
            filter     = f;
            nDeletions = CountDeletions(s);
            GetMoreDocs(50);             // retrieve 100 initially
            lengthAtStart = length;
        }
Exemplo n.º 8
0
        /// <summary>alternate scorer skipTo(),skipTo(),next(),next(),skipTo(),skipTo(), etc
        /// and ensure a hitcollector receives same docs and scores
        /// </summary>
        public static void  CheckSkipTo(Query q, IndexSearcher s)
        {
            //System.out.println("Checking "+q);

            if (q.Weight(s, null).GetScoresDocsOutOfOrder())
            {
                return;                  // in this case order of skipTo() might differ from that of next().
            }
            int skip_op = 0;
            int next_op = 1;

            int[][] orders = new int[][] { new int[] { next_op }, new int[] { skip_op }, new int[] { skip_op, next_op }, new int[] { next_op, skip_op }, new int[] { skip_op, skip_op, next_op, next_op }, new int[] { next_op, next_op, skip_op, skip_op }, new int[] { skip_op, skip_op, skip_op, next_op, next_op } };
            for (int k = 0; k < orders.Length; k++)
            {
                int[] order = orders[k];
                // System.out.print("Order:");for (int i = 0; i < order.length; i++)
                // System.out.print(order[i]==skip_op ? " skip()":" next()");
                // System.out.println();
                int[] opidx   = new int[] { 0 };
                int[] lastDoc = new[] { -1 };

                // FUTURE: ensure scorer.doc()==-1

                float         maxDiff    = 1e-5f;
                IndexReader[] lastReader = new IndexReader[] { null };

                s.Search(q, new AnonymousClassCollector(order, opidx, skip_op, lastReader, maxDiff, q, s, lastDoc), null);

                if (lastReader[0] != null)
                {
                    // Confirm that skipping beyond the last doc, on the
                    // previous reader, hits NO_MORE_DOCS
                    IndexReader previousReader = lastReader[0];
                    Weight      w      = q.Weight(new IndexSearcher(previousReader), null);
                    Scorer      scorer = w.Scorer(previousReader, true, false, null);
                    if (scorer != null)
                    {
                        bool more = scorer.Advance(lastDoc[0] + 1, null) != DocIdSetIterator.NO_MORE_DOCS;
                        Assert.IsFalse(more, "query's last doc was " + lastDoc[0] + " but skipTo(" + (lastDoc[0] + 1) + ") got to " + scorer.DocID());
                    }
                }
            }
        }
Exemplo n.º 9
0
        /// <summary> Create weight in multiple index scenario.
        ///
        /// Distributed query processing is done in the following steps:
        /// 1. rewrite query
        /// 2. extract necessary terms
        /// 3. collect dfs for these terms from the Searchables
        /// 4. create query weight using aggregate dfs.
        /// 5. distribute that weight to Searchables
        /// 6. merge results
        ///
        /// Steps 1-4 are done here, 5+6 in the search() methods
        ///
        /// </summary>
        /// <returns> rewritten queries
        /// </returns>
        public /*protected internal*/ override Weight CreateWeight(Query original)
        {
            // step 1
            Query rewrittenQuery = Rewrite(original);

            // step 2
            Support.Set <Lucene.Net.Index.Term> terms = new Support.Set <Term>();
            rewrittenQuery.ExtractTerms(terms);

            // step3
            Term[] allTermsArray = new Term[terms.Count];
            int    index         = 0;

            foreach (Term t in terms)
            {
                allTermsArray[index++] = t;
            }

            int[] aggregatedDfs = new int[terms.Count];
            for (int i = 0; i < searchables.Length; i++)
            {
                int[] dfs = searchables[i].DocFreqs(allTermsArray);
                for (int j = 0; j < aggregatedDfs.Length; j++)
                {
                    aggregatedDfs[j] += dfs[j];
                }
            }

            IDictionary <Term, int> dfMap = new Support.Dictionary <Term, int>();

            for (int i = 0; i < allTermsArray.Length; i++)
            {
                dfMap[allTermsArray[i]] = aggregatedDfs[i];
            }

            // step4
            int            numDocs  = MaxDoc();
            CachedDfSource cacheSim = new CachedDfSource(dfMap, numDocs, GetSimilarity());

            return(rewrittenQuery.Weight(cacheSim));
        }
Exemplo n.º 10
0
        /// <summary> Create weight in multiple index scenario.
        ///
        /// Distributed query processing is done in the following steps:
        /// 1. rewrite query
        /// 2. extract necessary terms
        /// 3. collect dfs for these terms from the Searchables
        /// 4. create query weight using aggregate dfs.
        /// 5. distribute that weight to Searchables
        /// 6. merge results
        ///
        /// Steps 1-4 are done here, 5+6 in the search() methods
        ///
        /// </summary>
        /// <returns> rewritten queries
        /// </returns>
        protected internal override Weight CreateWeight(Query original)
        {
            // step 1
            Query rewrittenQuery = Rewrite(original);

            // step 2
            System.Collections.Hashtable terms = new System.Collections.Hashtable();
            rewrittenQuery.ExtractTerms(terms);

            // step3
            Term[] allTermsArray = new Term[terms.Count];
            int    index         = 0;

            System.Collections.IEnumerator e = terms.Keys.GetEnumerator();
            while (e.MoveNext())
            {
                allTermsArray[index++] = e.Current as Term;
            }
            int[] aggregatedDfs = new int[terms.Count];
            for (int i = 0; i < searchables.Length; i++)
            {
                int[] dfs = searchables[i].DocFreqs(allTermsArray);
                for (int j = 0; j < aggregatedDfs.Length; j++)
                {
                    aggregatedDfs[j] += dfs[j];
                }
            }

            System.Collections.Hashtable dfMap = new System.Collections.Hashtable();
            for (int i = 0; i < allTermsArray.Length; i++)
            {
                dfMap[allTermsArray[i]] = (System.Int32)aggregatedDfs[i];
            }

            // step4
            int            numDocs  = MaxDoc();
            CachedDfSource cacheSim = new CachedDfSource(dfMap, numDocs);

            return(rewrittenQuery.Weight(cacheSim));
        }
Exemplo n.º 11
0
        /// <summary>alternate scorer skipTo(),skipTo(),next(),next(),skipTo(),skipTo(), etc
        /// and ensure a hitcollector receives same docs and scores
        /// </summary>
        public static void  CheckSkipTo(Query q, IndexSearcher s)
        {
            //System.out.println("Checking "+q);

            if (BooleanQuery.GetAllowDocsOutOfOrder())
            {
                return;                  // in this case order of skipTo() might differ from that of next().
            }
            int skip_op = 0;
            int next_op = 1;

            int[][] orders = new int[][] { new int[] { next_op }, new int[] { skip_op }, new int[] { skip_op, next_op }, new int[] { next_op, skip_op }, new int[] { skip_op, skip_op, next_op, next_op }, new int[] { next_op, next_op, skip_op, skip_op }, new int[] { skip_op, skip_op, skip_op, next_op, next_op } };
            for (int k = 0; k < orders.Length; k++)
            {
                int[] order = orders[k];
                // System.out.print("Order:");for (int i = 0; i < order.length; i++)
                // System.out.print(order[i]==skip_op ? " skip()":" next()");
                // System.out.println();
                int[] opidx = new int[] { 0 };

                Weight w      = q.Weight(s);
                Scorer scorer = w.Scorer(s.GetIndexReader(), true, false);
                if (scorer == null)
                {
                    continue;
                }

                // FUTURE: ensure scorer.doc()==-1

                int[] sdoc    = new int[] { -1 };
                float maxDiff = 1e-5f;
                s.Search(q, new AnonymousClassCollector(order, opidx, skip_op, scorer, sdoc, maxDiff, q, s));

                // make sure next call to scorer is false.
                int op = order[(opidx[0]++) % order.Length];
                // System.out.println(op==skip_op ? "last: skip()":"last: next()");
                bool more = (op == skip_op?scorer.Advance(sdoc[0] + 1):scorer.NextDoc()) != DocIdSetIterator.NO_MORE_DOCS;
                Assert.IsFalse(more);
            }
        }
Exemplo n.º 12
0
        public static List<SearchRecord> HighLightSearch(out Query query)
        {
            List<SearchRecord> recordList = new List<SearchRecord>();            
            query = GetQuery();
            try
            {
                if (searchIndexList.Count > 0)
                {
                    foreach (IndexSet indexSet in searchIndexList)
                    {
                        if (indexSet.Type == IndexTypeEnum.Increment)
                            continue;
                        Source source = indexDict[indexSet];
                        Dictionary<string, IndexField> fpDict = source.FieldDict;
                        //IndexSearcher searcher = new IndexSearcher(indexSet.Path);
                        IndexSearcher presearcher = new IndexSearcher(indexSet.Path);
                        ParallelMultiSearcher searcher = new ParallelMultiSearcher(new IndexSearcher[] { presearcher });
#if DEBUG
                        System.Console.WriteLine(query.ToString());
#endif
                        Highlighter highlighter = new Highlighter(new QueryScorer(query));
                        highlighter.SetTextFragmenter(new SimpleFragmenter(SupportClass.FRAGMENT_SIZE));
                        TopDocs topDocs = searcher.Search(query.Weight(searcher), null, searchSet.MaxMatches);
                        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
                        for (int i = 0; i < scoreDocs.Length; i++)
                        {
                            float score = scoreDocs[i].score;
                            if (score < searchSet.MinScore)
                                continue;
                            Document doc = searcher.Doc(scoreDocs[i].doc);
                            Field[] fields = new Field[doc.GetFields().Count];
                            doc.GetFields().CopyTo(fields, 0);
                            List<SearchField> sfList = new List<SearchField>(); 
                            foreach (Field field in fields)
                            {
                                string key = field.Name();
                                string value = field.StringValue();
                                string output = SupportClass.String.DropHTML(value);
                                TokenStream tokenStream = analyzer.TokenStream(key, new System.IO.StringReader(output));
                                string result = "";
                                result = highlighter.GetBestFragment(tokenStream, output);
                                if (result != null && string.IsNullOrEmpty(result.Trim()) == false)
                                {
                                    if (fpDict.ContainsKey(key))
                                        sfList.Add(new SearchField(key, fpDict[key].Caption,value, result, field.GetBoost(), fpDict[key].IsTitle,true,fpDict[key].Order));
                                    else
                                        sfList.Add(new SearchField(key, key,value, result, field.GetBoost(), false,false,0));
                                }
                                else
                                {
                                    if (fpDict.ContainsKey(key))
                                        sfList.Add(new SearchField(key, fpDict[key].Caption, value, value, field.GetBoost(), fpDict[key].IsTitle, true, fpDict[key].Order));
                                    else
                                        sfList.Add(new SearchField(key, key, value, result, field.GetBoost(), false, false, 0));
                                }
                            }
                            recordList.Add(new SearchRecord(indexSet, sfList, indexDict[indexSet].PrimaryKey, score));
                        }
                    }
                }
                else
                {
                    foreach (IndexSet indexSet in indexFieldsDict.Keys)
                    {
                        if (indexSet.Type == IndexTypeEnum.Increment)
                            continue;
                        Source source = indexDict[indexSet];
                        Dictionary<string, IndexField> fpDict = source.FieldDict;
                        //IndexSearcher searcher = new IndexSearcher(indexSet.Path);
                        IndexSearcher presearcher = new IndexSearcher(indexSet.Path);
                        ParallelMultiSearcher searcher = new ParallelMultiSearcher(new IndexSearcher[] { presearcher });
#if DEBUG
                        System.Console.WriteLine(query.ToString());
#endif
                        Highlighter highlighter = new Highlighter(new QueryScorer(query));
                        highlighter.SetTextFragmenter(new SimpleFragmenter(SupportClass.FRAGMENT_SIZE));
                        TopDocs topDocs = searcher.Search(query.Weight(searcher), null, searchSet.MaxMatches);
                        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
                        for (int i = 0; i < scoreDocs.Length; i++)
                        {
                            float score = scoreDocs[i].score;
                            if (score < searchSet.MinScore)
                                continue;
                            Document doc = searcher.Doc(scoreDocs[i].doc);
                            Field[] fields = new Field[doc.GetFields().Count];
                            doc.GetFields().CopyTo(fields, 0);
                            List<SearchField> sfList = new List<SearchField>(); 
                            foreach (Field field in fields)
                            {
                                string key = field.Name();
                                string value = field.StringValue();
                                string output = SupportClass.String.DropHTML(value);
                                TokenStream tokenStream = analyzer.TokenStream(key, new System.IO.StringReader(output));
                                string result = "";
                                result = highlighter.GetBestFragment(tokenStream, output);
                                if (result != null && string.IsNullOrEmpty(result.Trim()) == false)
                                {
                                    if (fpDict.ContainsKey(key))
                                        sfList.Add(new SearchField(key, fpDict[key].Caption, value, result, field.GetBoost(), fpDict[key].IsTitle, true, fpDict[key].Order));
                                    else
                                        sfList.Add(new SearchField(key, key, value, result, field.GetBoost(), false, false, 0));
                                }
                                else
                                {
                                    if (fpDict.ContainsKey(key))
                                        sfList.Add(new SearchField(key, fpDict[key].Caption, value, value, field.GetBoost(), fpDict[key].IsTitle, true, fpDict[key].Order));
                                    else
                                        sfList.Add(new SearchField(key, key, value, result, field.GetBoost(), false, false, 0));
                                }
                            }
                            recordList.Add(new SearchRecord(indexSet, sfList, indexDict[indexSet].PrimaryKey, score));
                        }
                    }
                }
            }
            catch (Exception e)
            {
                SupportClass.FileUtil.WriteToLog(SupportClass.LogPath, e.StackTrace.ToString());
            }
            return recordList;
        }
Exemplo n.º 13
0
 /// <summary> creates a weight for <c>query</c></summary>
 /// <returns> new weight
 /// </returns>
 /*protected internal*/
 public virtual Weight CreateWeight(Query query)
 {
     return query.Weight(this);
 }
Exemplo n.º 14
0
 public static List<SearchRecord> SearchPage(out Query query, out Dictionary<string, int> statistics,List<string> filterList,int pageSize, int pageNum,bool fileInclude,bool highLight)
 {
     List<SearchRecord> recordList = new List<SearchRecord>();
     query = GetQuery(fileInclude);
     statistics = new Dictionary<string, int>();
     try
     {
         #region Add Index Dir
         //SupportClass.FileUtil.WriteToLog(@"D:\Indexer\log\search.log", "begin to init searcher.");
         List<IndexSearcher> searcherList = new List<IndexSearcher>();
         if (searchIndexList.Count > 0)
         {
             foreach (IndexSet indexSet in searchIndexList)
             {
                 if (indexSet.Type == IndexTypeEnum.Increment)
                     continue;
                 searcherList.Add(new IndexSearcher(indexSet.Path));
             }
         }
         else
         {
             foreach (IndexSet indexSet in indexFieldsDict.Keys)
             {
                 if (indexSet.Type == IndexTypeEnum.Increment)
                     continue;
                 searcherList.Add(new IndexSearcher(indexSet.Path));
             }
         }
         if (fileInclude)
         {
             searcherList.Add(new IndexSearcher(fileSet.Path));
         }
         #endregion
         //SupportClass.FileUtil.WriteToLog(@"D:\Indexer\log\search.log", "begin to Search.");
         ParallelMultiSearcher searcher = new ParallelMultiSearcher(searcherList.ToArray());
         TopDocs topDocs = searcher.Search(query.Weight(searcher), null, searchSet.MaxMatches);
         ScoreDoc[] scoreDocs = topDocs.scoreDocs;
         Highlighter highlighter = new Highlighter(new QueryScorer(query));
         highlighter.SetTextFragmenter(new SimpleFragmenter(SupportClass.FRAGMENT_SIZE));
         #region Order by Score
         //SupportClass.FileUtil.WriteToLog(@"D:\Indexer\log\search.log", "Add to list.");
         List<ScoreDoc> scoreDocList = new List<ScoreDoc>();
         for (int i = 0; i < scoreDocs.Length; i++)
         {
             float score = scoreDocs[i].score;
             if (score < searchSet.MinScore)
                 continue;
             scoreDocList.Add(scoreDocs[i]);
         }
         //SupportClass.FileUtil.WriteToLog(@"D:\Indexer\log\search.log", "Begin to sort.");
         scoreDocList.Sort(delegate(ScoreDoc x, ScoreDoc y)
         {
             if (x.score > y.score)
                 return -1;
             else if (x.score == y.score)
                 return 0;
             else
                 return 1;
         });
         //SupportClass.FileUtil.WriteToLog(@"D:\Indexer\log\search.log", "End sort.");
         #endregion
         #region Doc Statistic
         int start = 0, end = scoreDocList.Count;
         if (pageSize > 0 && pageNum >= 1)
         {
             start = pageSize * (pageNum - 1)+1;
             end = pageNum * pageSize;
         }
         int current = 1;
         SpecialFieldSelector sfSelector = new SpecialFieldSelector(SupportClass.TableFileNameField);
         for (int recNum = 0; recNum < scoreDocList.Count; recNum++)
         {
             float score = scoreDocList[recNum].score;
             if (score < searchSet.MinScore)
                 continue;
             Document fDoc = searcher.Doc(scoreDocList[recNum].doc,sfSelector);
             string caption = fDoc.Get(SupportClass.TableFileNameField);
             if ((caption.Equals(SupportClass.TFNFieldValue) == false))
             {
                 if (sfpDict.ContainsKey(caption) == false || nameIndexDict.ContainsKey(caption) == false)
                 {
                     continue;
                 }
             }
             if (statistics.ContainsKey(caption))
             {
                 statistics[caption] = statistics[caption] + 1;
             }
             else
             {
                 statistics.Add(caption, 1);
             }
             if (filterList != null && filterList.Count>0)
             {
                 if (!filterList.Contains(caption))
                     continue;
             }
             #region Add Page
             if (current >= start && current <= end)
             {
                 Document doc = searcher.Doc(scoreDocList[recNum].doc);
                 doc.RemoveField(SupportClass.TableFileNameField);
                 Dictionary<string, IndexField> fpDict = sfpDict[caption];
                 Field[] fields = new Field[doc.GetFields().Count];
                 doc.GetFields().CopyTo(fields, 0);
                 #region SearchField
                 List<SearchField> sfList = new List<SearchField>();
                 foreach (Field field in fields)
                 {
                     string key = field.Name();
                     string value = field.StringValue();
                     string result = "";
                     if (highLight)
                     {
                         string output = SupportClass.String.DropHTML(value);
                         TokenStream tokenStream = analyzer.TokenStream(key, new System.IO.StringReader(output));
                         result = highlighter.GetBestFragment(tokenStream, output);
                         if (result != null && string.IsNullOrEmpty(result.Trim()) == false)
                         {
                             if (fpDict.ContainsKey(key))
                                 sfList.Add(new SearchField(key, fpDict[key].Caption, value, result, field.GetBoost(), fpDict[key].IsTitle, true, fpDict[key].Order));
                             else
                                 sfList.Add(new SearchField(key, key, value, result, field.GetBoost(), false, false, 0));
                         }
                         else
                         {
                             if (fpDict.ContainsKey(key))
                                 sfList.Add(new SearchField(key, fpDict[key].Caption, value, value, field.GetBoost(), fpDict[key].IsTitle, true, fpDict[key].Order));
                             else
                                 sfList.Add(new SearchField(key, key, value, result, field.GetBoost(), false, false, 0));
                         }
                     }
                     else
                     {
                         if (fpDict.ContainsKey(key))
                             sfList.Add(new SearchField(key, fpDict[key].Caption, value, value, field.GetBoost(), fpDict[key].IsTitle, true, fpDict[key].Order));
                         else
                             sfList.Add(new SearchField(key, key, value, result, field.GetBoost(), false, false, 0));
                     }
                 }
                 #endregion
                 if (caption.Equals(SupportClass.TFNFieldValue) == false)
                 {
                     IndexSet indexSet = nameIndexDict[caption];
                     recordList.Add(new SearchRecord(indexSet, sfList, indexDict[indexSet].PrimaryKey, score));
                 }
                 else
                 {
                     recordList.Add(new SearchRecord("文件", "文件", "文件", score, sfList));
                 }
             }
             #endregion
             current++;
         }
         //SupportClass.FileUtil.WriteToLog(@"D:\Indexer\log\search.log", "End of Search.");
         #endregion
     }
     catch (Exception)
     {
         //SupportClass.FileUtil.WriteToLog(@"D:\Indexer\log\search_log.txt", e.StackTrace.ToString());
     }
     return recordList;
 }
Exemplo n.º 15
0
 /// <summary> creates a weight for <code>query</code></summary>
 /// <returns> new weight
 /// </returns>
 protected internal virtual Weight CreateWeight(Query query)
 {
     return(query.Weight(this));
 }
Exemplo n.º 16
0
		// check that first skip on just created scorers always goes to the right doc
		private static void  CheckFirstSkipTo(Query q, IndexSearcher s)
		{
			//System.out.println("checkFirstSkipTo: "+q);
			float maxDiff = 1e-5f;
			int[] lastDoc = new int[]{- 1};
			s.Search(q, new AnonymousClassHitCollector1(lastDoc, q, s, maxDiff));
			Weight w = q.Weight(s);
			Scorer scorer = w.Scorer(s.GetIndexReader());
			bool more = scorer.SkipTo(lastDoc[0] + 1);
			if (more)
				Assert.IsFalse(more, "query's last doc was " + lastDoc[0] + " but skipTo(" + (lastDoc[0] + 1) + ") got to " + scorer.Doc());
		}
Exemplo n.º 17
0
		/// <summary>alternate scorer skipTo(),skipTo(),next(),next(),skipTo(),skipTo(), etc
		/// and ensure a hitcollector receives same docs and scores
		/// </summary>
		public static void  CheckSkipTo(Query q, IndexSearcher s)
		{
			//System.out.println("Checking "+q);
			
			if (BooleanQuery.GetAllowDocsOutOfOrder())
				return ; // in this case order of skipTo() might differ from that of next().
			
			int skip_op = 0;
			int next_op = 1;
			int[][] orders = new int[][]{new int[]{next_op}, new int[]{skip_op}, new int[]{skip_op, next_op}, new int[]{next_op, skip_op}, new int[]{skip_op, skip_op, next_op, next_op}, new int[]{next_op, next_op, skip_op, skip_op}, new int[]{skip_op, skip_op, skip_op, next_op, next_op}};
			for (int k = 0; k < orders.Length; k++)
			{
				int[] order = orders[k];
				//System.out.print("Order:");for (int i = 0; i < order.length; i++) System.out.print(order[i]==skip_op ? " skip()":" next()"); System.out.println();
				int[] opidx = new int[]{0};
				
				Weight w = q.Weight(s);
				Scorer scorer = w.Scorer(s.GetIndexReader());
				
				// FUTURE: ensure scorer.doc()==-1
				
				int[] sdoc = new int[]{- 1};
				float maxDiff = 1e-5f;
				s.Search(q, new AnonymousClassHitCollector(order, opidx, skip_op, scorer, sdoc, maxDiff, q, s));
				
				// make sure next call to scorer is false.
				int op = order[(opidx[0]++) % order.Length];
				//System.out.println(op==skip_op ? "last: skip()":"last: next()");
				bool more = op == skip_op?scorer.SkipTo(sdoc[0] + 1):scorer.Next();
				Assert.IsFalse(more);
			}
		}
Exemplo n.º 18
0
		/// <summary>check that the query weight is serializable. </summary>
		/// <throws>  IOException if serialization check fail.  </throws>
		private static void  CheckSerialization(Query q, Searcher s)
		{
			Weight w = q.Weight(s);
			try
			{
				System.IO.MemoryStream bos = new System.IO.MemoryStream();
				System.IO.BinaryWriter oos = new System.IO.BinaryWriter(bos);
				System.Runtime.Serialization.Formatters.Binary.BinaryFormatter formatter = new System.Runtime.Serialization.Formatters.Binary.BinaryFormatter();
				formatter.Serialize(oos.BaseStream, w);
				oos.Close();
				System.IO.BinaryReader ois = new System.IO.BinaryReader(new System.IO.MemoryStream(bos.ToArray()));
				formatter.Deserialize(ois.BaseStream);
				ois.Close();
				
				//skip rquals() test for now - most weights don't overide equals() and we won't add this just for the tests.
				//TestCase.assertEquals("writeObject(w) != w.  ("+w+")",w2,w);   
			}
			catch (System.Exception e)
			{
				System.IO.IOException e2 = new System.IO.IOException("Serialization failed for " + w, e);
				throw e2;
			}
		}
Exemplo n.º 19
0
        /// <summary>alternate scorer skipTo(),skipTo(),next(),next(),skipTo(),skipTo(), etc
        /// and ensure a hitcollector receives same docs and scores
        /// </summary>
        public static void  CheckSkipTo(Query q, IndexSearcher s)
        {
            //System.out.println("Checking "+q);
            
            if (q.Weight(s).GetScoresDocsOutOfOrder())
                return ; // in this case order of skipTo() might differ from that of next().
            
            int skip_op = 0;
            int next_op = 1;
            int[][] orders = new int[][]{new int[]{next_op}, new int[]{skip_op}, new int[]{skip_op, next_op}, new int[]{next_op, skip_op}, new int[]{skip_op, skip_op, next_op, next_op}, new int[]{next_op, next_op, skip_op, skip_op}, new int[]{skip_op, skip_op, skip_op, next_op, next_op}};
            for (int k = 0; k < orders.Length; k++)
            {
                
                int[] order = orders[k];
                // System.out.print("Order:");for (int i = 0; i < order.length; i++)
                // System.out.print(order[i]==skip_op ? " skip()":" next()");
                // System.out.println();
                int[] opidx = new int[]{0};
                int[] lastDoc = new[] {-1};
                
                // FUTURE: ensure scorer.doc()==-1
                
                float maxDiff = 1e-5f;
                IndexReader[] lastReader = new IndexReader[] {null};

                s.Search(q, new AnonymousClassCollector(order, opidx, skip_op, lastReader, maxDiff, q, s, lastDoc));

                if (lastReader[0] != null)
                {
                    // Confirm that skipping beyond the last doc, on the
                    // previous reader, hits NO_MORE_DOCS
                    IndexReader previousReader = lastReader[0];
                    Weight w = q.Weight(new IndexSearcher(previousReader));
                    Scorer scorer = w.Scorer(previousReader, true, false);
                    if (scorer != null)
                    {
                        bool more = scorer.Advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
                        Assert.IsFalse(more, "query's last doc was " + lastDoc[0] + " but skipTo(" + (lastDoc[0] + 1) + ") got to " + scorer.DocID());
                    }
                }
            }
        }
Exemplo n.º 20
0
        public static List<SearchRecord> SearchEx(out Query query)
        {
            List<SearchRecord> recordList = new List<SearchRecord>();
            query = GetQuery();
            try
            {
                if (searchIndexList.Count > 0)
                {
                    foreach (IndexSet indexSet in searchIndexList)
                    {
                        if (indexSet.Type == IndexTypeEnum.Increment)
                            continue;
                        Source source = indexDict[indexSet];
                        Dictionary<string, IndexField> fpDict = source.FieldDict;
                        //IndexSearcher searcher = new IndexSearcher(indexSet.Path);
                        IndexSearcher presearcher = new IndexSearcher(indexSet.Path);
                        ParallelMultiSearcher searcher = new ParallelMultiSearcher(new IndexSearcher[] { presearcher });
#if DEBUG
                        System.Console.WriteLine(query.ToString());
#endif
                        TopDocs topDocs = searcher.Search(query.Weight(searcher), null, searchSet.MaxMatches);
                        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
                        for (int i = 0; i < scoreDocs.Length; i++)
                        {
                            Document doc = searcher.Doc(scoreDocs[i].doc);
                            float score = scoreDocs[i].score;
                            if (score < searchSet.MinScore)
                                continue;
                            Field[] fields = new Field[doc.GetFields().Count];
                            doc.GetFields().CopyTo(fields, 0);
                            List<SearchField> sfList = new List<SearchField>();
                            foreach (Field field in fields)
                            {
                                if(fpDict.ContainsKey(field.Name()))
                                    sfList.Add(new SearchField(field, fpDict[field.Name()]));
                                else
                                    sfList.Add(new SearchField(field));
                            }
                            recordList.Add(new SearchRecord(indexSet, sfList, indexDict[indexSet].PrimaryKey, score));
                        }
                    }
                }
                else
                {
                    foreach (IndexSet indexSet in indexFieldsDict.Keys)
                    {
                        if (indexSet.Type == IndexTypeEnum.Increment)
                            continue;
                        Source source = indexDict[indexSet];
                        Dictionary<string, IndexField> fpDict = source.FieldDict;
                        //IndexSearcher searcher = new IndexSearcher(indexSet.Path);
                        IndexSearcher presearcher = new IndexSearcher(indexSet.Path);
                        ParallelMultiSearcher searcher = new ParallelMultiSearcher(new IndexSearcher[] { presearcher });
#if DEBUG
                        System.Console.WriteLine(query.ToString());
#endif
                        TopDocs topDocs = searcher.Search(query.Weight(searcher), null, searchSet.MaxMatches);
                        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
                        for (int i = 0; i < scoreDocs.Length; i++)
                        {
                            Document doc = searcher.Doc(scoreDocs[i].doc);
                            float score = scoreDocs[i].score;
                            if (score < searchSet.MinScore)
                                continue;
                            Field[] fields = new Field[doc.GetFields().Count];
                            doc.GetFields().CopyTo(fields, 0);
                            List<SearchField> sfList = new List<SearchField>();
                            foreach (Field field in fields)
                            {
                                if (fpDict.ContainsKey(field.Name()))
                                    sfList.Add(new SearchField(field, fpDict[field.Name()]));
                                else
                                    sfList.Add(new SearchField(field));
                            }
                            recordList.Add(new SearchRecord(indexSet, sfList, indexDict[indexSet].PrimaryKey, score));
                        }
                    }
                }
            }
            catch (Exception e)
            {
                SupportClass.FileUtil.WriteToLog(SupportClass.LogPath, e.StackTrace.ToString());
            }
            return recordList;
        }
Exemplo n.º 21
0
        // check that first skip on just created scorers always goes to the right doc
        private static void  CheckFirstSkipTo(Query q, IndexSearcher s)
        {
            //System.out.println("checkFirstSkipTo: "+q);
            float maxDiff = 1e-4f; //{{Lucene.Net-2.9.1}}Intentional diversion from Java Lucene
            int[] lastDoc = new int[]{- 1};
            IndexReader[] lastReader = {null};

            s.Search(q, new AnonymousClassCollector1(lastDoc, q, s, maxDiff, lastReader));
            
            if(lastReader[0] != null)
            {
                // confirm that skipping beyond the last doc, on the
                // previous reader, hits NO_MORE_DOCS
                IndexReader previousReader = lastReader[0];
                Weight w = q.Weight(new IndexSearcher(previousReader));
                Scorer scorer = w.Scorer(previousReader, true, false);

                if (scorer != null)
                {
                    bool more = scorer.Advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;                    
                    Assert.IsFalse(more, "query's last doc was " + lastDoc[0] + " but skipTo(" + (lastDoc[0] + 1) + ") got to " + scorer.DocID());
                }
            }
        }
Exemplo n.º 22
0
 /// <summary> creates a weight for <c>query</c></summary>
 /// <returns> new weight
 /// </returns>
 public /*protected internal*/ virtual Weight CreateWeight(Query query, IState state)
 {
     return(query.Weight(this, state));
 }
Exemplo n.º 23
0
        public override DocIdSet GetDocIdSet(IndexReader reader)
        {
            Weight weight = query.Weight(new IndexSearcher(reader));

            return(new AnonymousClassDocIdSet(weight, reader, this));
        }
Exemplo n.º 24
0
		/// <summary> creates a weight for <code>query</code></summary>
		/// <returns> new weight
		/// </returns>
		protected internal virtual Weight CreateWeight(Query query)
		{
			return query.Weight(this);
		}
Exemplo n.º 25
0
        public static List<SearchRecord> ExactFastSearch(out Query query)
        {
            List<SearchRecord> docList = new List<SearchRecord>();
            query = null;
            try
            {
                List<IndexReader> readerList = new List<IndexReader>();
                foreach (IndexSet indexSet in searchIndexList)
                {
                    if (indexSet.Type == IndexTypeEnum.Increment)
                        continue;
                    readerList.Add(IndexReader.Open(indexSet.Path));
                }
                MultiReader multiReader = new MultiReader(readerList.ToArray());
                IndexSearcher searcher = new IndexSearcher(multiReader);
                query = GetQuery();
#if DEBUG
                System.Console.WriteLine(query.ToString());
#endif
                TopDocs topDocs = searcher.Search(query.Weight(searcher), null, searchSet.MaxMatches);
                ScoreDoc[] scoreDocs = topDocs.scoreDocs;
                for (int i = 0; i < scoreDocs.Length; i++)
                {
                    Document doc = searcher.Doc(scoreDocs[i].doc);
                    float score = scoreDocs[i].score;
                    if (score < searchSet.MinScore)
                        continue;
                    docList.Add(doc);
                }
            }
            catch (Exception e)
            {
                SupportClass.FileUtil.WriteToLog(SupportClass.LogPath, e.StackTrace.ToString());
            }
            return docList;
        }