/// <summary> Create weight in multiple index scenario. /// /// Distributed query processing is done in the following steps: /// 1. rewrite query /// 2. extract necessary terms /// 3. collect dfs for these terms from the Searchables /// 4. create query weight using aggregate dfs. /// 5. distribute that weight to Searchables /// 6. merge results /// /// Steps 1-4 are done here, 5+6 in the search() methods /// /// </summary> /// <returns> rewritten queries /// </returns> public /*protected internal*/ override Weight CreateWeight(Query original) { // step 1 Query rewrittenQuery = Rewrite(original); // step 2 ISet <Term> terms = Lucene.Net.Support.Compatibility.SetFactory.CreateHashSet <Term>(); rewrittenQuery.ExtractTerms(terms); // step3 Term[] allTermsArray = terms.ToArray(); int[] aggregatedDfs = new int[terms.Count]; for (int i = 0; i < searchables.Length; i++) { int[] dfs = searchables[i].DocFreqs(allTermsArray); for (int j = 0; j < aggregatedDfs.Length; j++) { aggregatedDfs[j] += dfs[j]; } } var dfMap = new Dictionary <Term, int>(); for (int i = 0; i < allTermsArray.Length; i++) { dfMap[allTermsArray[i]] = aggregatedDfs[i]; } // step4 int numDocs = MaxDoc; CachedDfSource cacheSim = new CachedDfSource(dfMap, numDocs, Similarity); return(rewrittenQuery.Weight(cacheSim)); }
// inherit javadoc public override void ExtractTerms(System.Collections.Generic.ISet <Term> terms) { Query.ExtractTerms(terms); }