GetFieldNames() public abstract method

Get a list of unique field names that exist in this index and have the specified field option information.
public abstract GetFieldNames ( FieldOption fldOption ) : ICollection
fldOption FieldOption specifies which field option should be available for the returned fields ///
return ICollection
Beispiel #1
0
        public virtual void  TestGetFieldNames()
        {
            RAMDirectory d = new RAMDirectory();
            // set up writer
            IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(), true);

            AddDocumentWithFields(writer);
            writer.Close();
            // set up reader
            IndexReader reader = IndexReader.Open(d);

            System.Collections.Hashtable fieldNames = (System.Collections.Hashtable)reader.GetFieldNames();
            Assert.IsTrue(fieldNames.Contains("keyword"));
            Assert.IsTrue(fieldNames.Contains("text"));
            Assert.IsTrue(fieldNames.Contains("unindexed"));
            Assert.IsTrue(fieldNames.Contains("unstored"));
            // add more documents
            writer = new IndexWriter(d, new StandardAnalyzer(), false);
            // want to get some more segments here
            for (int i = 0; i < 5 * writer.mergeFactor; i++)
            {
                AddDocumentWithFields(writer);
            }
            // new fields are in some different segments (we hope)
            for (int i = 0; i < 5 * writer.mergeFactor; i++)
            {
                AddDocumentWithDifferentFields(writer);
            }
            writer.Close();
            // verify fields again
            reader     = IndexReader.Open(d);
            fieldNames = (System.Collections.Hashtable)reader.GetFieldNames();
            Assert.AreEqual(9, fieldNames.Count); // the following fields + an empty one (bug?!)
            Assert.IsTrue(fieldNames.Contains("keyword"));
            Assert.IsTrue(fieldNames.Contains("text"));
            Assert.IsTrue(fieldNames.Contains("unindexed"));
            Assert.IsTrue(fieldNames.Contains("unstored"));
            Assert.IsTrue(fieldNames.Contains("keyword2"));
            Assert.IsTrue(fieldNames.Contains("text2"));
            Assert.IsTrue(fieldNames.Contains("unindexed2"));
            Assert.IsTrue(fieldNames.Contains("unstored2"));

            // verify that only indexed fields were returned
            System.Collections.ICollection indexedFieldNames = reader.GetFieldNames(true);
            Assert.AreEqual(6, indexedFieldNames.Count);
            Assert.IsTrue(fieldNames.Contains("keyword"));
            Assert.IsTrue(fieldNames.Contains("text"));
            Assert.IsTrue(fieldNames.Contains("unstored"));
            Assert.IsTrue(fieldNames.Contains("keyword2"));
            Assert.IsTrue(fieldNames.Contains("text2"));
            Assert.IsTrue(fieldNames.Contains("unstored2"));

            // verify that only unindexed fields were returned
            System.Collections.ICollection unindexedFieldNames = reader.GetFieldNames(false);
            Assert.AreEqual(3, unindexedFieldNames.Count); // the following fields + an empty one
            Assert.IsTrue(fieldNames.Contains("unindexed"));
            Assert.IsTrue(fieldNames.Contains("unindexed2"));
        }
				/// <summary>
				/// Returns a list of fields to search on
				/// </summary>
				/// <returns></returns>
				protected static string[] GetSearchFields(IndexReader reader)
				{
						//exclude the special index fields
						return reader.GetFieldNames(IndexReader.FieldOption.ALL)
								.Where(x => !x.StartsWith(LuceneIndexer.SpecialFieldPrefix))
								.ToArray();
				}
 public override System.Collections.Generic.ICollection <string> GetFieldNames(IndexReader.FieldOption fieldNames)
 {
     EnsureOpen();
     System.Collections.Generic.List <string> fieldSet = new System.Collections.Generic.List <string>();
     for (int i = 0; i < readers.Count; i++)
     {
         IndexReader reader = ((IndexReader)readers[i]);
         System.Collections.Generic.ICollection <string> names = reader.GetFieldNames(fieldNames);
         fieldSet.AddRange(names);
     }
     return(fieldSet);
 }
Beispiel #4
0
        public bool TryLoad(string path, IndexLoadOptions options)
        {
            try
            {
                _directory = FSDirectory.Open(path);
                _reader = IndexReader.Open(_directory, options.ReadOnly);

                if (options.ForceUnlock && _directory.FileExists(IndexWriter.WRITE_LOCK_NAME))
                {
                    _directory.ClearLock(IndexWriter.WRITE_LOCK_NAME);
                }

                var fields = _reader.GetFieldNames(IndexReader.FieldOption.ALL);
                _numFields = fields.Count;

                CountTerms();

                foreach (var file in _directory.ListAll())
                {
                    try
                    {
                        string fpath = Path.Combine(_directory.Directory.ToString(), file);

                        if (_lastModified == null)
                            _lastModified = File.GetLastWriteTimeUtc(fpath);
                        else
                        {
                            var mod = File.GetLastWriteTimeUtc(fpath);
                            if (mod > _lastModified.Value)
                                _lastModified = mod;
                        }
                    }
                    catch
                    {
                        // ignore
                    }
                }

                _loaded = true;
                return true;
            }
            catch
            {
                _directory = null;
                _reader = null;
                _numFields = 0;
                _numTerms = 0;
                _loaded = false;
                _lastModified = null;
                return false;
            }
        }
Beispiel #5
0
        public override ICollection <string> GetFieldNames(IndexReader.FieldOption fieldNames)
        {
            EnsureOpen();
            List <string> fieldSet = new List <string>();

            for (int i = 0; i < readers.Count; i++)
            {
                IndexReader          reader = readers[i];
                ICollection <string> names  = reader.GetFieldNames(fieldNames);
                fieldSet.AddRange(names);
            }
            return(fieldSet);
        }
 internal static System.Collections.Generic.ICollection <string> GetFieldNames(IndexReader.FieldOption fieldNames, IndexReader[] subReaders)
 {
     // maintain a unique set of field names
     System.Collections.Generic.Dictionary <string, string> fieldSet = new System.Collections.Generic.Dictionary <string, string>();
     for (int i = 0; i < subReaders.Length; i++)
     {
         IndexReader reader = subReaders[i];
         System.Collections.Generic.IEnumerator <string> names = reader.GetFieldNames(fieldNames).GetEnumerator();
         while (names.MoveNext())
         {
             fieldSet[names.Current] = names.Current;
         }
     }
     return(fieldSet.Keys);
 }
        /// <summary>Add an IndexReader whose stored fields will not be returned.  This can
        /// accellerate search when stored fields are only needed from a subset of
        /// the IndexReaders.
        ///
        /// </summary>
        /// <throws>  IllegalArgumentException if not all indexes contain the same number </throws>
        /// <summary>     of documents
        /// </summary>
        /// <throws>  IllegalArgumentException if not all indexes have the same value </throws>
        /// <summary>     of {@link IndexReader#MaxDoc()}
        /// </summary>
        /// <throws>  IOException if there is a low-level IO error </throws>
        public virtual void  Add(IndexReader reader, bool ignoreStoredFields)
        {
            EnsureOpen();
            if (readers.Count == 0)
            {
                this.maxDoc       = reader.MaxDoc();
                this.numDocs      = reader.NumDocs();
                this.hasDeletions = reader.HasDeletions();
            }

            if (reader.MaxDoc() != maxDoc)
            {
                // check compatibility
                throw new System.ArgumentException("All readers must have same maxDoc: " + maxDoc + "!=" + reader.MaxDoc());
            }
            if (reader.NumDocs() != numDocs)
            {
                throw new System.ArgumentException("All readers must have same numDocs: " + numDocs + "!=" + reader.NumDocs());
            }

            ICollection <string> fields = reader.GetFieldNames(IndexReader.FieldOption.ALL);

            readerToFields[reader] = fields;
            IEnumerator <string> i = fields.GetEnumerator();

            while (i.MoveNext())
            {
                //// update fieldToReader map
                string field = i.Current;
                //if (fieldToReader[field] == null)
                if (!fieldToReader.ContainsKey(field))
                {
                    fieldToReader[field] = reader;
                }
            }

            if (!ignoreStoredFields)
            {
                storedFieldReaders.Add(reader);                 // add to storedFieldReaders
            }
            readers.Add(reader);

            if (incRefReaders)
            {
                reader.IncRef();
            }
            decrefOnClose.Add(incRefReaders);
        }
 public override System.Collections.Generic.ICollection <string> GetFieldNames(IndexReader.FieldOption fieldNames)
 {
     EnsureOpen();
     System.Collections.Generic.Dictionary <string, string> fieldSet = new System.Collections.Generic.Dictionary <string, string>();
     for (int i = 0; i < readers.Count; i++)
     {
         IndexReader reader = readers[i];
         System.Collections.Generic.ICollection <string> names = reader.GetFieldNames(fieldNames);
         for (System.Collections.Generic.IEnumerator <string> iterator = names.GetEnumerator(); iterator.MoveNext();)
         {
             string s = iterator.Current;
             fieldSet[s] = s;
         }
     }
     return(fieldSet.Keys);
 }
Beispiel #9
0
        /// <summary>Add an IndexReader whose stored fields will not be returned.  This can
        /// accellerate search when stored fields are only needed from a subset of
        /// the IndexReaders.
        ///
        /// </summary>
        /// <throws>  IllegalArgumentException if not all indexes contain the same number </throws>
        /// <summary>     of documents
        /// </summary>
        /// <throws>  IllegalArgumentException if not all indexes have the same value </throws>
        /// <summary>     of <see cref="IndexReader.MaxDoc" />
        /// </summary>
        /// <throws>  IOException if there is a low-level IO error </throws>
        public virtual void  Add(IndexReader reader, bool ignoreStoredFields)
        {
            EnsureOpen();
            if (readers.Count == 0)
            {
                this.maxDoc       = reader.MaxDoc;
                this.numDocs      = reader.NumDocs();
                this.hasDeletions = reader.HasDeletions;
            }

            if (reader.MaxDoc != maxDoc)
            {
                // check compatibility
                throw new System.ArgumentException("All readers must have same maxDoc: " + maxDoc + "!=" + reader.MaxDoc);
            }
            if (reader.NumDocs() != numDocs)
            {
                throw new System.ArgumentException("All readers must have same numDocs: " + numDocs + "!=" + reader.NumDocs());
            }

            ICollection <string> fields = reader.GetFieldNames(IndexReader.FieldOption.ALL);

            readerToFields[reader] = fields;
            foreach (var field in fields)
            {
                // update fieldToReader map
                // Do a containskey firt to mimic java behavior
                if (!fieldToReader.ContainsKey(field) || fieldToReader[field] == null)
                {
                    fieldToReader[field] = reader;
                }
            }

            if (!ignoreStoredFields)
            {
                storedFieldReaders.Add(reader);                 // add to storedFieldReaders
            }
            readers.Add(reader);

            if (incRefReaders)
            {
                reader.IncRef();
            }
            decrefOnClose.Add(incRefReaders);
        }
 public override System.Collections.ICollection GetFieldNames(IndexReader.FieldOption fieldNames)
 {
     System.Collections.Hashtable fieldSet = new System.Collections.Hashtable();
     for (int i = 0; i < readers.Count; i++)
     {
         IndexReader reader = ((IndexReader)readers[i]);
         System.Collections.ICollection names = reader.GetFieldNames(fieldNames);
         for (System.Collections.IEnumerator iterator = names.GetEnumerator(); iterator.MoveNext();)
         {
             System.Collections.DictionaryEntry fi = (System.Collections.DictionaryEntry)iterator.Current;
             System.String s = fi.Key.ToString();
             if (fieldSet.ContainsKey(s) == false)
             {
                 fieldSet.Add(s, s);
             }
         }
     }
     return(fieldSet);
 }
Beispiel #11
0
 /// <seealso cref="IndexReader.GetFieldNames(boolean)">
 /// </seealso>
 public override System.Collections.ICollection GetFieldNames(bool indexed)
 {
     // maintain a unique set of field names
     System.Collections.Hashtable fieldSet = new System.Collections.Hashtable();
     for (int i = 0; i < subReaders.Length; i++)
     {
         IndexReader reader = subReaders[i];
         System.Collections.ICollection names = reader.GetFieldNames(indexed);
         for (System.Collections.IEnumerator iterator = names.GetEnumerator(); iterator.MoveNext();)
         {
             System.Collections.DictionaryEntry fi = (System.Collections.DictionaryEntry)iterator.Current;
             System.String s = fi.Key.ToString();
             if (fieldSet.ContainsKey(s) == false)
             {
                 fieldSet.Add(s, s);
             }
         }
     }
     return(fieldSet);
 }
        /// <summary>Add an IndexReader whose stored fields will not be returned.  This can
        /// accellerate search when stored fields are only needed from a subset of
        /// the IndexReaders.
        ///
        /// </summary>
        /// <throws>  IllegalArgumentException if not all indexes contain the same number </throws>
        /// <summary>     of documents
        /// </summary>
        /// <throws>  IllegalArgumentException if not all indexes have the same value </throws>
        /// <summary>     of {@link IndexReader#MaxDoc()}
        /// </summary>
        public virtual void  Add(IndexReader reader, bool ignoreStoredFields)
        {
            if (readers.Count == 0)
            {
                this.maxDoc       = reader.MaxDoc();
                this.numDocs      = reader.NumDocs();
                this.hasDeletions = reader.HasDeletions();
            }

            if (reader.MaxDoc() != maxDoc)
            {
                // check compatibility
                throw new System.ArgumentException("All readers must have same maxDoc: " + maxDoc + "!=" + reader.MaxDoc());
            }
            if (reader.NumDocs() != numDocs)
            {
                throw new System.ArgumentException("All readers must have same numDocs: " + numDocs + "!=" + reader.NumDocs());
            }

            System.Collections.ICollection fields = reader.GetFieldNames(IndexReader.FieldOption.ALL);
            readerToFields[reader] = fields;
            System.Collections.IEnumerator i = fields.GetEnumerator();
            while (i.MoveNext())
            {
                System.Collections.DictionaryEntry fi = (System.Collections.DictionaryEntry)i.Current;

                // update fieldToReader map
                System.String field = fi.Key.ToString();
                if (fieldToReader[field] == null)
                {
                    fieldToReader[field] = reader;
                }
            }

            if (!ignoreStoredFields)
            {
                storedFieldReaders.Add(reader);                 // add to storedFieldReaders
            }
            readers.Add(reader);
        }
Beispiel #13
0
        /// <summary> </summary>
        /// <returns> The number of documents in all of the readers
        /// </returns>
        /// <throws>  IOException </throws>
        private int MergeFields()
        {
            fieldInfos = new FieldInfos();             // merge field names
            int docCount = 0;

            for (int i = 0; i < readers.Count; i++)
            {
                IndexReader reader = (IndexReader)readers[i];
                AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION_OFFSET), true, true, true);
                AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION), true, true, false);
                AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_OFFSET), true, false, true);
                AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.TERMVECTOR), true, false, false);
                AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.INDEXED), false, false, false);
                fieldInfos.Add(reader.GetFieldNames(IndexReader.FieldOption.UNINDEXED), false);
            }
            fieldInfos.Write(directory, segment + ".fnm");

            FieldsWriter fieldsWriter = new FieldsWriter(directory, segment, fieldInfos);

            // for merging we don't want to compress/uncompress the data, so to tell the FieldsReader that we're
            // in  merge mode, we use this FieldSelector
            FieldSelector fieldSelectorMerge = new AnonymousClassFieldSelector(this);

            try
            {
                for (int i = 0; i < readers.Count; i++)
                {
                    IndexReader reader = (IndexReader)readers[i];
                    int         maxDoc = reader.MaxDoc();
                    for (int j = 0; j < maxDoc; j++)
                    {
                        if (!reader.IsDeleted(j))
                        {
                            // skip deleted docs
                            fieldsWriter.AddDocument(reader.Document(j, fieldSelectorMerge));
                            docCount++;
                        }
                    }
                }
            }
            finally
            {
                fieldsWriter.Close();
            }
            return(docCount);
        }
Beispiel #14
0
        public override DocIdSet GetDocIdSet(IndexReader reader)
        {
            var result = new FixedBitSet(reader.MaxDoc);
            var fields = reader.GetFieldNames(IndexReader.FieldOption.ALL);

            if (fields == null || fields.Count == 0)
            {
                return result;
            }

            String lastField = null;
            TermsEnumCompatibility termsEnum = null;
            foreach (Term term in terms)
            {
                if (!term.Field.Equals(lastField))
                {
                    var termsC = new TermsEnumCompatibility(reader, term.Field);
                    if (termsC.Term() == null)
                    {
                        return result;
                    }
                    termsEnum = termsC;
                    lastField = term.Field;
                }

                if (terms != null)
                {
                    // TODO this check doesn't make sense, decide which variable its supposed to be for
                    Debug.Assert(termsEnum != null);
                    if (termsEnum.SeekCeil(term.Text) == TermsEnumCompatibility.SeekStatus.FOUND)
                    {
                        termsEnum.Docs(result);
                    }
                }
            }
            return result;
        }
Beispiel #15
0
        /// <summary> </summary>
        /// <returns> The number of documents in all of the readers
        /// </returns>
        /// <throws>  IOException </throws>
        private int MergeFields()
        {
            fieldInfos = new FieldInfos();             // merge field names
            int docCount = 0;

            for (int i = 0; i < readers.Count; i++)
            {
                IndexReader reader = (IndexReader)readers[i];
                AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION_OFFSET), true, true, true);
                AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION), true, true, false);
                AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_OFFSET), true, false, true);
                AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.TERMVECTOR), true, false, false);
                AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.INDEXED), false, false, false);
                fieldInfos.Add(reader.GetFieldNames(IndexReader.FieldOption.UNINDEXED), false);
            }
            fieldInfos.Write(directory, segment + ".fnm");

            FieldsWriter fieldsWriter = new FieldsWriter(directory, segment, fieldInfos);

            try
            {
                for (int i = 0; i < readers.Count; i++)
                {
                    IndexReader reader = (IndexReader)readers[i];
                    int         maxDoc = reader.MaxDoc();
                    for (int j = 0; j < maxDoc; j++)
                    {
                        if (!reader.IsDeleted(j))
                        {
                            // skip deleted docs
                            fieldsWriter.AddDocument(reader.Document(j));
                            docCount++;
                        }
                    }
                }
            }
            finally
            {
                fieldsWriter.Close();
            }
            return(docCount);
        }
		/// <summary>Add an IndexReader whose stored fields will not be returned.  This can
		/// accellerate search when stored fields are only needed from a subset of
		/// the IndexReaders.
		/// 
		/// </summary>
		/// <throws>  IllegalArgumentException if not all indexes contain the same number </throws>
		/// <summary>     of documents
		/// </summary>
		/// <throws>  IllegalArgumentException if not all indexes have the same value </throws>
		/// <summary>     of <see cref="IndexReader.MaxDoc" />
		/// </summary>
		/// <throws>  IOException if there is a low-level IO error </throws>
		public virtual void  Add(IndexReader reader, bool ignoreStoredFields)
		{
			
			EnsureOpen();
			if (readers.Count == 0)
			{
				this.maxDoc = reader.MaxDoc;
				this.numDocs = reader.NumDocs();
				this.hasDeletions = reader.HasDeletions;
			}
			
			if (reader.MaxDoc != maxDoc)
			// check compatibility
				throw new System.ArgumentException("All readers must have same maxDoc: " + maxDoc + "!=" + reader.MaxDoc);
			if (reader.NumDocs() != numDocs)
				throw new System.ArgumentException("All readers must have same numDocs: " + numDocs + "!=" + reader.NumDocs());
			
			ICollection<string> fields = reader.GetFieldNames(IndexReader.FieldOption.ALL);
			readerToFields[reader] = fields;
			foreach(var field in fields)
			{
				// update fieldToReader map
                // Do a containskey firt to mimic java behavior
				if (!fieldToReader.ContainsKey(field) || fieldToReader[field] == null)
					fieldToReader[field] = reader;
			}
			
			if (!ignoreStoredFields)
				storedFieldReaders.Add(reader); // add to storedFieldReaders
			readers.Add(reader);
			
			if (incRefReaders)
			{
				reader.IncRef();
			}
			decrefOnClose.Add(incRefReaders);
		}
 internal static System.Collections.ICollection GetFieldNames(IndexReader.FieldOption fieldNames, IndexReader[] subReaders)
 {
     // maintain a unique set of field names
     System.Collections.Hashtable fieldSet = new System.Collections.Hashtable();
     for (int i = 0; i < subReaders.Length; i++)
     {
         IndexReader reader = subReaders[i];
         System.Collections.IEnumerator names = ((System.Collections.IDictionary)reader.GetFieldNames(fieldNames)).Keys.GetEnumerator();
         while (names.MoveNext())
         {
             if (!fieldSet.ContainsKey(names.Current))
             {
                 fieldSet.Add(names.Current, names.Current);
             }
         }
     }
     return(fieldSet);
 }
Beispiel #18
0
		public static void  AssertIndexEquals(IndexReader index1, IndexReader index2)
		{
			Assert.AreEqual(index1.NumDocs(), index2.NumDocs(), "IndexReaders have different values for numDocs.");
			Assert.AreEqual(index1.MaxDoc, index2.MaxDoc, "IndexReaders have different values for maxDoc.");
			Assert.AreEqual(index1.HasDeletions, index2.HasDeletions, "Only one IndexReader has deletions.");
			Assert.AreEqual(index1.IsOptimized(), index2.IsOptimized(), "Only one index is optimized.");
			
			// check field names
			System.Collections.Generic.ICollection<string> fieldsNames1 = index1.GetFieldNames(FieldOption.ALL);
			System.Collections.Generic.ICollection<string> fieldsNames2 = index1.GetFieldNames(FieldOption.ALL);

            System.Collections.Generic.ICollection<IFieldable> fields1 = null;
            System.Collections.Generic.ICollection<IFieldable> fields2 = null;

            Assert.AreEqual(fieldsNames1.Count, fieldsNames2.Count, "IndexReaders have different numbers of fields.");
            System.Collections.IEnumerator it1 = fieldsNames1.GetEnumerator();
            System.Collections.IEnumerator it2 = fieldsNames2.GetEnumerator();
			while (it1.MoveNext() && it2.MoveNext())
			{
				Assert.AreEqual((System.String) it1.Current, (System.String) it2.Current, "Different field names.");
			}
			
			// check norms
            it1 = fieldsNames1.GetEnumerator();
			while (it1.MoveNext())
			{
				System.String curField = (System.String) it1.Current;
				byte[] norms1 = index1.Norms(curField);
				byte[] norms2 = index2.Norms(curField);
				if (norms1 != null && norms2 != null)
				{
					Assert.AreEqual(norms1.Length, norms2.Length);
					for (int i = 0; i < norms1.Length; i++)
					{
						Assert.AreEqual(norms1[i], norms2[i], "Norm different for doc " + i + " and field '" + curField + "'.");
					}
				}
				else
				{
					Assert.AreSame(norms1, norms2);
				}
			}
			
			// check deletions
			for (int i = 0; i < index1.MaxDoc; i++)
			{
				Assert.AreEqual(index1.IsDeleted(i), index2.IsDeleted(i), "Doc " + i + " only deleted in one index.");
			}
			
			// check stored fields
			for (int i = 0; i < index1.MaxDoc; i++)
			{
				if (!index1.IsDeleted(i))
				{
					Document doc1 = index1.Document(i);
					Document doc2 = index2.Document(i);
					fields1 = doc1.GetFields();
					fields2 = doc2.GetFields();
					Assert.AreEqual(fields1.Count, fields2.Count, "Different numbers of fields for doc " + i + ".");
					it1 = fields1.GetEnumerator();
					it2 = fields2.GetEnumerator();
					while (it1.MoveNext() && it2.MoveNext())
					{
						Field curField1 = (Field) it1.Current;
						Field curField2 = (Field) it2.Current;
						Assert.AreEqual(curField1.Name, curField2.Name, "Different fields names for doc " + i + ".");
						Assert.AreEqual(curField1.StringValue, curField2.StringValue, "Different field values for doc " + i + ".");
					}
				}
			}
			
			// check dictionary and posting lists
			TermEnum enum1 = index1.Terms();
			TermEnum enum2 = index2.Terms();
			TermPositions tp1 = index1.TermPositions();
			TermPositions tp2 = index2.TermPositions();
			while (enum1.Next())
			{
				Assert.IsTrue(enum2.Next());
				Assert.AreEqual(enum1.Term, enum2.Term, "Different term in dictionary.");
				tp1.Seek(enum1.Term);
				tp2.Seek(enum1.Term);
				while (tp1.Next())
				{
					Assert.IsTrue(tp2.Next());
					Assert.AreEqual(tp1.Doc, tp2.Doc, "Different doc id in postinglist of term " + enum1.Term + ".");
					Assert.AreEqual(tp1.Freq, tp2.Freq, "Different term frequence in postinglist of term " + enum1.Term + ".");
					for (int i = 0; i < tp1.Freq; i++)
					{
						Assert.AreEqual(tp1.NextPosition(), tp2.NextPosition(), "Different positions in postinglist of term " + enum1.Term + ".");
					}
				}
			}
		}
Beispiel #19
0
        /// <summary> </summary>
        /// <returns> The number of documents in all of the readers
        /// </returns>
        /// <throws>  CorruptIndexException if the index is corrupt </throws>
        /// <throws>  IOException if there is a low-level IO error </throws>
        private int MergeFields()
        {
            if (!mergeDocStores)
            {
                // When we are not merging by doc stores, that means
                // all segments were written as part of a single
                // autoCommit=false IndexWriter session, so their field
                // name -> number mapping are the same.  So, we start
                // with the fieldInfos of the last segment in this
                // case, to keep that numbering.
                SegmentReader sr = (SegmentReader)readers[readers.Count - 1];
                fieldInfos = (FieldInfos)sr.fieldInfos.Clone();
            }
            else
            {
                fieldInfos = new FieldInfos();                 // merge field names
            }

            for (int i = 0; i < readers.Count; i++)
            {
                IndexReader reader = (IndexReader)readers[i];
                if (reader is SegmentReader)
                {
                    SegmentReader segmentReader = (SegmentReader)reader;
                    for (int j = 0; j < segmentReader.GetFieldInfos().Size(); j++)
                    {
                        FieldInfo fi = segmentReader.GetFieldInfos().FieldInfo(j);
                        fieldInfos.Add(fi.name, fi.isIndexed, fi.storeTermVector, fi.storePositionWithTermVector, fi.storeOffsetWithTermVector, !reader.HasNorms(fi.name), fi.storePayloads);
                    }
                }
                else
                {
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION_OFFSET), true, true, true, false);
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION), true, true, false, false);
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_OFFSET), true, false, true, false);
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.TERMVECTOR), true, false, false, false);
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.STORES_PAYLOADS), false, false, false, true);
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.INDEXED), false, false, false, false);
                    fieldInfos.Add(reader.GetFieldNames(IndexReader.FieldOption.UNINDEXED), false);
                }
            }
            fieldInfos.Write(directory, segment + ".fnm");

            int docCount = 0;

            if (mergeDocStores)
            {
                // If the i'th reader is a SegmentReader and has
                // identical fieldName -> number mapping, then this
                // array will be non-null at position i:
                SegmentReader[] matchingSegmentReaders = new SegmentReader[readers.Count];

                // If this reader is a SegmentReader, and all of its
                // field name -> number mappings match the "merged"
                // FieldInfos, then we can do a bulk copy of the
                // stored fields:
                for (int i = 0; i < readers.Count; i++)
                {
                    IndexReader reader = (IndexReader)readers[i];
                    if (reader is SegmentReader)
                    {
                        SegmentReader segmentReader     = (SegmentReader)reader;
                        bool          same              = true;
                        FieldInfos    segmentFieldInfos = segmentReader.GetFieldInfos();
                        for (int j = 0; same && j < segmentFieldInfos.Size(); j++)
                        {
                            same = fieldInfos.FieldName(j).Equals(segmentFieldInfos.FieldName(j));
                        }
                        if (same)
                        {
                            matchingSegmentReaders[i] = segmentReader;
                        }
                    }
                }

                // Used for bulk-reading raw bytes for stored fields
                int[] rawDocLengths = new int[MAX_RAW_MERGE_DOCS];

                // for merging we don't want to compress/uncompress the data, so to tell the FieldsReader that we're
                // in  merge mode, we use this FieldSelector
                FieldSelector fieldSelectorMerge = new AnonymousClassFieldSelector(this);

                // merge field values
                FieldsWriter fieldsWriter = new FieldsWriter(directory, segment, fieldInfos);

                try
                {
                    for (int i = 0; i < readers.Count; i++)
                    {
                        IndexReader   reader = (IndexReader)readers[i];
                        SegmentReader matchingSegmentReader = matchingSegmentReaders[i];
                        FieldsReader  matchingFieldsReader;
                        if (matchingSegmentReader != null)
                        {
                            matchingFieldsReader = matchingSegmentReader.GetFieldsReader();
                        }
                        else
                        {
                            matchingFieldsReader = null;
                        }
                        int maxDoc = reader.MaxDoc();
                        for (int j = 0; j < maxDoc;)
                        {
                            if (!reader.IsDeleted(j))
                            {
                                // skip deleted docs
                                if (matchingSegmentReader != null)
                                {
                                    // We can optimize this case (doing a bulk
                                    // byte copy) since the field numbers are
                                    // identical
                                    int start   = j;
                                    int numDocs = 0;
                                    do
                                    {
                                        j++;
                                        numDocs++;
                                    }while (j < maxDoc && !matchingSegmentReader.IsDeleted(j) && numDocs < MAX_RAW_MERGE_DOCS);

                                    IndexInput stream = matchingFieldsReader.RawDocs(rawDocLengths, start, numDocs);
                                    fieldsWriter.AddRawDocuments(stream, rawDocLengths, numDocs);
                                    docCount += numDocs;
                                    if (checkAbort != null)
                                    {
                                        checkAbort.Work(300 * numDocs);
                                    }
                                }
                                else
                                {
                                    fieldsWriter.AddDocument(reader.Document(j, fieldSelectorMerge));
                                    j++;
                                    docCount++;
                                    if (checkAbort != null)
                                    {
                                        checkAbort.Work(300);
                                    }
                                }
                            }
                            else
                            {
                                j++;
                            }
                        }
                    }
                }
                finally
                {
                    fieldsWriter.Close();
                }
            }
            // If we are skipping the doc stores, that means there
            // are no deletions in any of these segments, so we
            // just sum numDocs() of each segment to get total docCount
            else
            {
                for (int i = 0; i < readers.Count; i++)
                {
                    docCount += ((IndexReader)readers[i]).NumDocs();
                }
            }

            return(docCount);
        }
        /// <summary> </summary>
        /// <returns> The number of documents in all of the readers
        /// </returns>
        /// <throws>  CorruptIndexException if the index is corrupt </throws>
        /// <throws>  IOException if there is a low-level IO error </throws>
        private int MergeFields()
        {
            if (!mergeDocStores)
            {
                // When we are not merging by doc stores, that means
                // all segments were written as part of a single
                // autoCommit=false IndexWriter session, so their field
                // name -> number mapping are the same.  So, we start
                // with the fieldInfos of the last segment in this
                // case, to keep that numbering.
                SegmentReader sr = (SegmentReader)readers[readers.Count - 1];
                fieldInfos = (FieldInfos)sr.core.fieldInfos.Clone();
            }
            else
            {
                fieldInfos = new FieldInfos();                 // merge field names
            }

            for (System.Collections.IEnumerator iter = readers.GetEnumerator(); iter.MoveNext();)
            {
                IndexReader reader = (IndexReader)iter.Current;
                if (reader is SegmentReader)
                {
                    SegmentReader segmentReader       = (SegmentReader)reader;
                    FieldInfos    readerFieldInfos    = segmentReader.FieldInfos();
                    int           numReaderFieldInfos = readerFieldInfos.Size();
                    for (int j = 0; j < numReaderFieldInfos; j++)
                    {
                        FieldInfo fi = readerFieldInfos.FieldInfo(j);
                        fieldInfos.Add(fi.name, fi.isIndexed, fi.storeTermVector, fi.storePositionWithTermVector, fi.storeOffsetWithTermVector, !reader.HasNorms(fi.name), fi.storePayloads, fi.omitTermFreqAndPositions);
                    }
                }
                else
                {
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR_WITH_POSITION_OFFSET), true, true, true, false, false);
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR_WITH_POSITION), true, true, false, false, false);
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR_WITH_OFFSET), true, false, true, false, false);
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR), true, false, false, false, false);
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.OMIT_TERM_FREQ_AND_POSITIONS), false, false, false, false, true);
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.STORES_PAYLOADS), false, false, false, true, false);
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.INDEXED), false, false, false, false, false);
                    fieldInfos.Add(reader.GetFieldNames(FieldOption.UNINDEXED), false);
                }
            }
            fieldInfos.Write(directory, segment + ".fnm");

            int docCount = 0;

            SetMatchingSegmentReaders();

            if (mergeDocStores)
            {
                // for merging we don't want to compress/uncompress the data, so to tell the FieldsReader that we're
                // in  merge mode, we use this FieldSelector
                FieldSelector fieldSelectorMerge = new AnonymousClassFieldSelector(this);

                // merge field values
                FieldsWriter fieldsWriter = new FieldsWriter(directory, segment, fieldInfos);

                try
                {
                    int idx = 0;
                    for (System.Collections.IEnumerator iter = readers.GetEnumerator(); iter.MoveNext();)
                    {
                        IndexReader   reader = (IndexReader)iter.Current;
                        SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++];
                        FieldsReader  matchingFieldsReader  = null;
                        if (matchingSegmentReader != null)
                        {
                            FieldsReader fieldsReader = matchingSegmentReader.GetFieldsReader();
                            if (fieldsReader != null && fieldsReader.CanReadRawDocs())
                            {
                                matchingFieldsReader = fieldsReader;
                            }
                        }
                        if (reader.HasDeletions())
                        {
                            docCount += CopyFieldsWithDeletions(fieldSelectorMerge, fieldsWriter, reader, matchingFieldsReader);
                        }
                        else
                        {
                            docCount += CopyFieldsNoDeletions(fieldSelectorMerge, fieldsWriter, reader, matchingFieldsReader);
                        }
                    }
                }
                finally
                {
                    fieldsWriter.Close();
                }

                System.String fileName      = segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION;
                long          fdxFileLength = directory.FileLength(fileName);

                if (4 + ((long)docCount) * 8 != fdxFileLength)
                {
                    // This is most likely a bug in Sun JRE 1.6.0_04/_05;
                    // we detect that the bug has struck, here, and
                    // throw an exception to prevent the corruption from
                    // entering the index.  See LUCENE-1282 for
                    // details.
                    throw new System.SystemException("mergeFields produced an invalid result: docCount is " + docCount + " but fdx file size is " + fdxFileLength + " file=" + fileName + " file exists?=" + directory.FileExists(fileName) + "; now aborting this merge to prevent index corruption");
                }
            }
            // If we are skipping the doc stores, that means there
            // are no deletions in any of these segments, so we
            // just sum numDocs() of each segment to get total docCount
            else
            {
                for (System.Collections.IEnumerator iter = readers.GetEnumerator(); iter.MoveNext();)
                {
                    docCount += ((IndexReader)iter.Current).NumDocs();
                }
            }

            return(docCount);
        }
		private static string[] GetFieldNames(IndexReader indexReader)
		{
			var fields = indexReader.GetFieldNames(IndexReader.FieldOption.INDEXED);
			return fields
				.Where(x => x != Constants.DocumentIdFieldName && x != Constants.ReduceKeyFieldName)
				.ToArray();
		}
Beispiel #22
0
        /// <summary> </summary>
        /// <returns> The number of documents in all of the readers
        /// </returns>
        /// <throws>  CorruptIndexException if the index is corrupt </throws>
        /// <throws>  IOException if there is a low-level IO error </throws>
        private int MergeFields()
        {
            if (!mergeDocStores)
            {
                // When we are not merging by doc stores, that means
                // all segments were written as part of a single
                // autoCommit=false IndexWriter session, so their field
                // name -> number mapping are the same.  So, we start
                // with the fieldInfos of the last segment in this
                // case, to keep that numbering.
                SegmentReader sr = (SegmentReader)readers[readers.Count - 1];
                fieldInfos = (FieldInfos)sr.fieldInfos.Clone();
            }
            else
            {
                fieldInfos = new FieldInfos();                 // merge field names
            }

            for (int i = 0; i < readers.Count; i++)
            {
                IndexReader reader = (IndexReader)readers[i];
                if (reader is SegmentReader)
                {
                    SegmentReader segmentReader = (SegmentReader)reader;
                    for (int j = 0; j < segmentReader.GetFieldInfos().Size(); j++)
                    {
                        FieldInfo fi = segmentReader.GetFieldInfos().FieldInfo(j);
                        fieldInfos.Add(fi.name, fi.isIndexed, fi.storeTermVector, fi.storePositionWithTermVector, fi.storeOffsetWithTermVector, !reader.HasNorms(fi.name), fi.storePayloads, fi.omitTf);
                    }
                }
                else
                {
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION_OFFSET), true, true, true, false, false);
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION), true, true, false, false, false);
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_OFFSET), true, false, true, false, false);
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.TERMVECTOR), true, false, false, false, false);
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.OMIT_TF), false, false, false, false, true);
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.STORES_PAYLOADS), false, false, false, true, false);
                    AddIndexed(reader, fieldInfos, reader.GetFieldNames(IndexReader.FieldOption.INDEXED), false, false, false, false, false);
                    fieldInfos.Add(reader.GetFieldNames(IndexReader.FieldOption.UNINDEXED), false);
                }
            }
            fieldInfos.Write(directory, segment + ".fnm");

            int docCount = 0;

            SetMatchingSegmentReaders();

            if (mergeDocStores)
            {
                // for merging we don't want to compress/uncompress the data, so to tell the FieldsReader that we're
                // in  merge mode, we use this FieldSelector
                FieldSelector fieldSelectorMerge = new AnonymousClassFieldSelector(this);

                // merge field values
                FieldsWriter fieldsWriter = new FieldsWriter(directory, segment, fieldInfos);

                try
                {
                    for (int i = 0; i < readers.Count; i++)
                    {
                        IndexReader   reader = (IndexReader)readers[i];
                        SegmentReader matchingSegmentReader = matchingSegmentReaders[i];
                        FieldsReader  matchingFieldsReader;
                        bool          hasMatchingReader;
                        if (matchingSegmentReader != null)
                        {
                            FieldsReader fieldsReader = matchingSegmentReader.GetFieldsReader();
                            if (fieldsReader != null && !fieldsReader.CanReadRawDocs())
                            {
                                matchingFieldsReader = null;
                                hasMatchingReader    = false;
                            }
                            else
                            {
                                matchingFieldsReader = fieldsReader;
                                hasMatchingReader    = true;
                            }
                        }
                        else
                        {
                            hasMatchingReader    = false;
                            matchingFieldsReader = null;
                        }
                        int  maxDoc       = reader.MaxDoc();
                        bool hasDeletions = reader.HasDeletions();
                        for (int j = 0; j < maxDoc;)
                        {
                            if (!hasDeletions || !reader.IsDeleted(j))
                            { // skip deleted docs
                                if (hasMatchingReader)
                                {
                                    // We can optimize this case (doing a bulk
                                    // byte copy) since the field numbers are
                                    // identical
                                    int start   = j;
                                    int numDocs = 0;
                                    do
                                    {
                                        j++;
                                        numDocs++;
                                        if (j >= maxDoc)
                                        {
                                            break;
                                        }
                                        if (hasDeletions && matchingSegmentReader.IsDeleted(j))
                                        {
                                            j++;
                                            break;
                                        }
                                    } while (numDocs < MAX_RAW_MERGE_DOCS);

                                    IndexInput stream = matchingFieldsReader.RawDocs(rawDocLengths, start, numDocs);
                                    fieldsWriter.AddRawDocuments(stream, rawDocLengths, numDocs);
                                    docCount += numDocs;
                                    if (checkAbort != null)
                                    {
                                        checkAbort.Work(300 * numDocs);
                                    }
                                }
                                else
                                {
                                    // NOTE: it's very important to first assign
                                    // to doc then pass it to
                                    // termVectorsWriter.addAllDocVectors; see
                                    // LUCENE-1282
                                    Document doc = reader.Document(j, fieldSelectorMerge);
                                    fieldsWriter.AddDocument(doc);
                                    j++;
                                    docCount++;
                                    if (checkAbort != null)
                                    {
                                        checkAbort.Work(300);
                                    }
                                }
                            }
                            else
                            {
                                j++;
                            }
                        }
                    }
                }
                finally
                {
                    fieldsWriter.Close();
                }

                long fdxFileLength = directory.FileLength(segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION);

                // {{dougsale-2.4.0}
                // this shouldn't be a problem for us - if it is,
                // then it's not a JRE bug...
                //if (4+docCount*8 != fdxFileLength)
                //  // This is most likely a bug in Sun JRE 1.6.0_04/_05;
                //  // we detect that the bug has struck, here, and
                //  // throw an exception to prevent the corruption from
                //  // entering the index.  See LUCENE-1282 for
                //  // details.
                //  throw new RuntimeException("mergeFields produced an invalid result: docCount is " + docCount + " but fdx file size is " + fdxFileLength + "; now aborting this merge to prevent index corruption");
            }
            else
            {
                // If we are skipping the doc stores, that means there
                // are no deletions in any of these segments, so we
                // just sum numDocs() of each segment to get total docCount
                for (int i = 0; i < readers.Count; i++)
                {
                    docCount += ((IndexReader)readers[i]).NumDocs();
                }
            }

            return(docCount);
        }
Beispiel #23
0
 public override System.Collections.Generic.ICollection <string> GetFieldNames(IndexReader.FieldOption fieldNames)
 {
     EnsureOpen();
     return(in_Renamed.GetFieldNames(fieldNames));
 }
Beispiel #24
0
		/// <summary>Add an IndexReader whose stored fields will not be returned.  This can
		/// accellerate search when stored fields are only needed from a subset of
		/// the IndexReaders.
		/// 
		/// </summary>
		/// <throws>  IllegalArgumentException if not all indexes contain the same number </throws>
		/// <summary>     of documents
		/// </summary>
		/// <throws>  IllegalArgumentException if not all indexes have the same value </throws>
		/// <summary>     of {@link IndexReader#MaxDoc()}
		/// </summary>
		/// <throws>  IOException if there is a low-level IO error </throws>
		public virtual void  Add(IndexReader reader, bool ignoreStoredFields)
		{
			
			EnsureOpen();
			if (readers.Count == 0)
			{
				this.maxDoc = reader.MaxDoc();
				this.numDocs = reader.NumDocs();
				this.hasDeletions = reader.HasDeletions();
			}
			
			if (reader.MaxDoc() != maxDoc)
			// check compatibility
				throw new System.ArgumentException("All readers must have same maxDoc: " + maxDoc + "!=" + reader.MaxDoc());
			if (reader.NumDocs() != numDocs)
				throw new System.ArgumentException("All readers must have same numDocs: " + numDocs + "!=" + reader.NumDocs());
			
			System.Collections.Generic.ICollection<string> fields = reader.GetFieldNames(IndexReader.FieldOption.ALL);
			readerToFields[reader] = fields;
			System.Collections.IEnumerator i = fields.GetEnumerator();
			while (i.MoveNext())
			{
				// update fieldToReader map
				System.String field = (System.String) i.Current;
				if (fieldToReader[field] == null)
					fieldToReader[field] = reader;
			}
			
			if (!ignoreStoredFields)
				storedFieldReaders.Add(reader); // add to storedFieldReaders
			readers.Add(reader);
			
			if (incRefReaders)
			{
				reader.IncRef();
			}
			decrefOnClose.Add(incRefReaders);
		}
 /*
    * Automatically adds stop words for all fields with terms exceeding the maxDocFreqPercent
    *
    * @param reader        The {@link IndexReader} which will be consulted to identify potential stop words that
    *                      exceed the required document frequency
    * @param maxPercentDocs The maximum percentage (between 0.0 and 1.0) of index documents which
    *                      contain a term, after which the word is considered to be a stop word.
    * @return The number of stop words identified.
    * @throws IOException
    */
 public int AddStopWords(IndexReader reader, float maxPercentDocs)
 {
     int numStopWords = 0;
     ICollection<String> fieldNames = reader.GetFieldNames(IndexReader.FieldOption.INDEXED);
     for (IEnumerator<String> iter = fieldNames.GetEnumerator(); iter.MoveNext();) {
       String fieldName = iter.Current;
       numStopWords += AddStopWords(reader, fieldName, maxPercentDocs);
     }
     return numStopWords;
 }
		/// <summary>Add an IndexReader whose stored fields will not be returned.  This can
		/// accellerate search when stored fields are only needed from a subset of
		/// the IndexReaders.
		/// 
		/// </summary>
		/// <throws>  IllegalArgumentException if not all indexes contain the same number  </throws>
		/// <summary>     of documents
		/// </summary>
		/// <throws>  IllegalArgumentException if not all indexes have the same value  </throws>
		/// <summary>     of {@link IndexReader#MaxDoc()}
		/// </summary>
		public virtual void  Add(IndexReader reader, bool ignoreStoredFields)
		{
			
			if (readers.Count == 0)
			{
				this.maxDoc = reader.MaxDoc();
				this.numDocs = reader.NumDocs();
				this.hasDeletions = reader.HasDeletions();
			}
			
			if (reader.MaxDoc() != maxDoc)
			// check compatibility
				throw new System.ArgumentException("All readers must have same maxDoc: " + maxDoc + "!=" + reader.MaxDoc());
			if (reader.NumDocs() != numDocs)
				throw new System.ArgumentException("All readers must have same numDocs: " + numDocs + "!=" + reader.NumDocs());
			
			System.Collections.IEnumerator i = reader.GetFieldNames(IndexReader.FieldOption.ALL).GetEnumerator();
			while (i.MoveNext())
			{
                System.Collections.DictionaryEntry fi = (System.Collections.DictionaryEntry) i.Current;

				// update fieldToReader map
				System.String field = fi.Key.ToString();
				if (fieldToReader[field] == null)
					fieldToReader[field] = reader;
			}
			
			if (!ignoreStoredFields)
				storedFieldReaders.Add(reader); // add to storedFieldReaders
			readers.Add(reader);
		}
Beispiel #27
0
 public override System.Collections.ICollection  GetFieldNames(IndexReader.FieldOption fieldNames)
 {
     return(in_Renamed.GetFieldNames(fieldNames));
 }
 public override System.Collections.ICollection GetFieldNames()
 {
     return(in_Renamed.GetFieldNames());
 }