/// <summary> Create weight in multiple index scenario. /// /// Distributed query processing is done in the following steps: /// 1. rewrite query /// 2. extract necessary terms /// 3. collect dfs for these terms from the Searchables /// 4. create query weight using aggregate dfs. /// 5. distribute that weight to Searchables /// 6. merge results /// /// Steps 1-4 are done here, 5+6 in the search() methods /// /// </summary> /// <returns> rewritten queries /// </returns> public /*protected internal*/ override Weight CreateWeight(Query original) { // step 1 Query rewrittenQuery = Rewrite(original); // step 2 System.Collections.Hashtable terms = new System.Collections.Hashtable(); rewrittenQuery.ExtractTerms(terms); // step3 Term[] allTermsArray = new Term[terms.Count]; int index = 0; System.Collections.IEnumerator e = terms.Keys.GetEnumerator(); while (e.MoveNext()) { allTermsArray[index++] = e.Current as Term; } int[] aggregatedDfs = new int[terms.Count]; for (int i = 0; i < searchables.Length; i++) { int[] dfs = searchables[i].DocFreqs(allTermsArray); for (int j = 0; j < aggregatedDfs.Length; j++) { aggregatedDfs[j] += dfs[j]; } } System.Collections.Hashtable dfMap = new System.Collections.Hashtable(); for (int i = 0; i < allTermsArray.Length; i++) { dfMap[allTermsArray[i]] = (System.Int32)aggregatedDfs[i]; } // step4 int numDocs = MaxDoc(); CachedDfSource cacheSim = new CachedDfSource(dfMap, numDocs, GetSimilarity()); return(rewrittenQuery.Weight(cacheSim)); }