Inheritance: System.Collections.ArrayList
示例#1
0
        /// <summary> Current version number from segments file.</summary>
        public static long ReadCurrentVersion(Directory directory)
        {
            IndexInput input = directory.OpenInput(IndexFileNames.SEGMENTS);
            int format = 0;
            long version = 0;
            try
            {
                format = input.ReadInt();
                if (format < 0)
                {
                    if (format < FORMAT)
                        throw new System.IO.IOException("Unknown format version: " + format);
                    version = input.ReadLong(); // read version
                }
            }
            finally
            {
                input.Close();
            }

            if (format < 0)
                return version;

            // We cannot be sure about the format of the file.
            // Therefore we have to read the whole file and cannot simply seek to the version entry.

            SegmentInfos sis = new SegmentInfos();
            sis.Read(directory);
            return sis.GetVersion();
        }
 protected internal override object DoBody(string segmentFileName)
 {
     var sis = new SegmentInfos();
     sis.Read(directory, segmentFileName);
     var readers = new SegmentReader[sis.Size()];
     for (int i = sis.Size() - 1; i >= 0; i--)
     {
         System.IO.IOException prior = null;
         bool success = false;
         try
         {
             readers[i] = new SegmentReader(sis.Info(i), termInfosIndexDivisor, IOContext.READ);
             success = true;
         }
         catch (System.IO.IOException ex)
         {
             prior = ex;
         }
         finally
         {
             if (!success)
             {
                 IOUtils.CloseWhileHandlingException(prior, readers);
             }
         }
     }
     return new StandardDirectoryReader(directory, readers, null, sis, termInfosIndexDivisor, false);
 }
        public virtual void TestAllSegmentsLarge()
        {
            Directory dir = new RAMDirectory();

            IndexWriterConfig conf = NewWriterConfig();
            IndexWriter writer = new IndexWriter(dir, conf);

            AddDocs(writer, 3);
            AddDocs(writer, 3);
            AddDocs(writer, 3);

            writer.Dispose();

            conf = NewWriterConfig();
            LogMergePolicy lmp = new LogDocMergePolicy();
            lmp.MaxMergeDocs = 2;
            conf.SetMergePolicy(lmp);

            writer = new IndexWriter(dir, conf);
            writer.ForceMerge(1);
            writer.Dispose();

            SegmentInfos sis = new SegmentInfos();
            sis.Read(dir);
            Assert.AreEqual(3, sis.Size());
        }
        public virtual void TestByteSizeLimit()
        {
            // tests that the max merge size constraint is applied during forceMerge.
            Directory dir = new RAMDirectory();

            // Prepare an index w/ several small segments and a large one.
            IndexWriterConfig conf = NewWriterConfig();
            IndexWriter writer = new IndexWriter(dir, conf);
            const int numSegments = 15;
            for (int i = 0; i < numSegments; i++)
            {
                int numDocs = i == 7 ? 30 : 1;
                AddDocs(writer, numDocs);
            }
            writer.Dispose();

            SegmentInfos sis = new SegmentInfos();
            sis.Read(dir);
            double min = sis.Info(0).SizeInBytes();

            conf = NewWriterConfig();
            LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
            lmp.MaxMergeMBForForcedMerge = (min + 1) / (1 << 20);
            conf.SetMergePolicy(lmp);

            writer = new IndexWriter(dir, conf);
            writer.ForceMerge(1);
            writer.Dispose();

            // Should only be 3 segments in the index, because one of them exceeds the size limit
            sis = new SegmentInfos();
            sis.Read(dir);
            Assert.AreEqual(3, sis.Size());
        }
示例#5
0
			public OneMerge(SegmentInfos segments, bool useCompoundFile)
			{
				if (0 == segments.Count)
					throw new System.SystemException("segments must include at least one segment");
				this.segments = segments;
				this.useCompoundFile = useCompoundFile;
			}
        private int[] starts; // 1st docno for each segment

        #endregion Fields

        #region Constructors

        /// <summary>Construct reading the named set of readers. </summary>
        internal MultiSegmentReader(Directory directory, SegmentInfos sis, bool closeDirectory, bool readOnly)
            : base(directory, sis, closeDirectory, readOnly)
        {
            // To reduce the chance of hitting FileNotFound
            // (and having to retry), we open segments in
            // reverse because IndexWriter merges & deletes
            // the newest segments first.

            SegmentReader[] readers = new SegmentReader[sis.Count];
            for (int i = sis.Count - 1; i >= 0; i--)
            {
                try
                {
                    readers[i] = SegmentReader.Get(readOnly, sis.Info(i));
                }
                catch (System.IO.IOException e)
                {
                    // Close all readers we had opened:
                    for (i++; i < sis.Count; i++)
                    {
                        try
                        {
                            readers[i].Close();
                        }
                        catch (System.IO.IOException)
                        {
                            // keep going - we want to clean up as much as possible
                        }
                    }
                    throw e;
                }
            }

            Initialize(readers);
        }
 /// <summary>
 /// called only from static open() methods </summary>
 internal StandardDirectoryReader(Directory directory, AtomicReader[] readers, IndexWriter writer, SegmentInfos sis, int termInfosIndexDivisor, bool applyAllDeletes)
     : base(directory, readers)
 {
     this.Writer = writer;
     this.SegmentInfos = sis;
     this.TermInfosIndexDivisor = termInfosIndexDivisor;
     this.ApplyAllDeletes = applyAllDeletes;
 }
示例#8
0
 public /*protected internal*/ override System.Object DoBody(System.String segmentFileName)
 {
     var infos = new SegmentInfos();
     infos.Read(directory, segmentFileName);
     if (readOnly)
         return new ReadOnlyDirectoryReader(directory, infos, deletionPolicy, termInfosIndexDivisor);
     else
         return new DirectoryReader(directory, infos, deletionPolicy, false, termInfosIndexDivisor);
 }
示例#9
0
		public override void  SetUp()
		{
			base.SetUp();
			dir = new RAMDirectory();
			doc1 = new Document();
			doc2 = new Document();
			DocHelper.SetupDoc(doc1);
			DocHelper.SetupDoc(doc2);
			DocHelper.WriteDoc(dir, doc1);
			DocHelper.WriteDoc(dir, doc2);
			sis = new SegmentInfos();
			sis.Read(dir);
		}
示例#10
0
        /// <summary>Construct reading the named set of readers. </summary>
        internal DirectoryReader(Directory directory, SegmentInfos sis, IndexDeletionPolicy deletionPolicy, bool readOnly, int termInfosIndexDivisor)
        {
            this.directory = directory;
            this.readOnly = readOnly;
            this.segmentInfos = sis;
            this.deletionPolicy = deletionPolicy;
            this.termInfosIndexDivisor = termInfosIndexDivisor;

            if (!readOnly)
            {
                // We assume that this segments_N was previously
                // properly sync'd:
                SupportClass.CollectionsHelper.AddAllIfNotContains(synced, sis.Files(directory, true));
            }

            // To reduce the chance of hitting FileNotFound
            // (and having to retry), we open segments in
            // reverse because IndexWriter merges & deletes
            // the newest segments first.

            SegmentReader[] readers = new SegmentReader[sis.Count];
            for (int i = sis.Count - 1; i >= 0; i--)
            {
                bool success = false;
                try
                {
                    readers[i] = SegmentReader.Get(readOnly, sis.Info(i), termInfosIndexDivisor);
                    success = true;
                }
                finally
                {
                    if (!success)
                    {
                        // Close all readers we had opened:
                        for (i++; i < sis.Count; i++)
                        {
                            try
                            {
                                readers[i].Close();
                            }
                            catch (System.Exception ignore)
                            {
                                // keep going - we want to clean up as much as possible
                            }
                        }
                    }
                }
            }

            Initialize(readers);
        }
示例#11
0
			public override System.Object DoBody()
			{
				SegmentInfos infos = new SegmentInfos();
				infos.Read(directory);
				if (infos.Count == 1)
				{
					// index is optimized
					return SegmentReader.Get(infos, infos.Info(0), closeDirectory);
				}
				IndexReader[] readers = new IndexReader[infos.Count];
				for (int i = 0; i < infos.Count; i++)
					readers[i] = SegmentReader.Get(infos.Info(i));
				return new MultiReader(directory, infos, closeDirectory, readers);
			}
        public virtual void TestAddIndexes()
        {
            Directory dir1 = NewDirectory();
            Directory dir2 = NewDirectory();
            IndexWriter writer = new IndexWriter(dir1, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.COMPOUND_FILES));

            Document d1 = new Document();
            d1.Add(new TextField("f1", "first field", Field.Store.YES));
            d1.Add(new TextField("f2", "second field", Field.Store.YES));
            writer.AddDocument(d1);

            writer.Dispose();
            writer = new IndexWriter(dir2, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.COMPOUND_FILES));

            Document d2 = new Document();
            FieldType customType2 = new FieldType(TextField.TYPE_STORED);
            customType2.StoreTermVectors = true;
            d2.Add(new TextField("f2", "second field", Field.Store.YES));
            d2.Add(new Field("f1", "first field", customType2));
            d2.Add(new TextField("f3", "third field", Field.Store.YES));
            d2.Add(new TextField("f4", "fourth field", Field.Store.YES));
            writer.AddDocument(d2);

            writer.Dispose();

            writer = new IndexWriter(dir1, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.COMPOUND_FILES));
            writer.AddIndexes(dir2);
            writer.Dispose();

            SegmentInfos sis = new SegmentInfos();
            sis.Read(dir1);
            Assert.AreEqual(2, sis.Size());

            FieldInfos fis1 = SegmentReader.ReadFieldInfos(sis.Info(0));
            FieldInfos fis2 = SegmentReader.ReadFieldInfos(sis.Info(1));

            Assert.AreEqual("f1", fis1.FieldInfo(0).Name);
            Assert.AreEqual("f2", fis1.FieldInfo(1).Name);
            // make sure the ordering of the "external" segment is preserved
            Assert.AreEqual("f2", fis2.FieldInfo(0).Name);
            Assert.AreEqual("f1", fis2.FieldInfo(1).Name);
            Assert.AreEqual("f3", fis2.FieldInfo(2).Name);
            Assert.AreEqual("f4", fis2.FieldInfo(3).Name);

            dir1.Dispose();
            dir2.Dispose();
        }
		internal int docShift; // total # deleted docs that were compacted by this merge
		
		public MergeDocIDRemapper(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergedDocCount)
		{
			this.docMaps = docMaps;
			SegmentInfo firstSegment = merge.segments.Info(0);
			int i = 0;
			while (true)
			{
				SegmentInfo info = infos.Info(i);
				if (info.Equals(firstSegment))
					break;
				minDocID += info.docCount;
				i++;
			}
			
			int numDocs = 0;
			for (int j = 0; j < docMaps.Length; i++, j++)
			{
				numDocs += infos.Info(i).docCount;
				System.Diagnostics.Debug.Assert(infos.Info(i).Equals(merge.segments.Info(j)));
			}
			maxDocID = minDocID + numDocs;
			
			starts = new int[docMaps.Length];
			newStarts = new int[docMaps.Length];
			
			starts[0] = minDocID;
			newStarts[0] = minDocID;
			for (i = 1; i < docMaps.Length; i++)
			{
				int lastDocCount = merge.segments.Info(i - 1).docCount;
				starts[i] = starts[i - 1] + lastDocCount;
				newStarts[i] = newStarts[i - 1] + lastDocCount - delCounts[i - 1];
			}
			docShift = numDocs - mergedDocCount;
			
			// There are rare cases when docShift is 0.  It happens
			// if you try to delete a docID that's out of bounds,
			// because the SegmentReader still allocates deletedDocs
			// and pretends it has deletions ... so we can't make
			// this assert here
			// assert docShift > 0;
			
			// Make sure it all adds up:
			System.Diagnostics.Debug.Assert(docShift == maxDocID -(newStarts [docMaps.Length - 1] + merge.segments.Info(docMaps.Length - 1).docCount - delCounts [docMaps.Length - 1]));
		}
        public virtual void TestPartialMerge()
        {
            Directory dir = NewDirectory();

            Document doc = new Document();
            doc.Add(NewStringField("content", "aaa", Field.Store.NO));
            int incrMin = TEST_NIGHTLY ? 15 : 40;
            for (int numDocs = 10; numDocs < 500; numDocs += TestUtil.NextInt(Random(), incrMin, 5 * incrMin))
            {
                LogDocMergePolicy ldmp = new LogDocMergePolicy();
                ldmp.MinMergeDocs = 1;
                ldmp.MergeFactor = 5;
                IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.CREATE).SetMaxBufferedDocs(2).SetMergePolicy(ldmp));
                for (int j = 0; j < numDocs; j++)
                {
                    writer.AddDocument(doc);
                }
                writer.Dispose();

                SegmentInfos sis = new SegmentInfos();
                sis.Read(dir);
                int segCount = sis.Size();

                ldmp = new LogDocMergePolicy();
                ldmp.MergeFactor = 5;
                writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(ldmp));
                writer.ForceMerge(3);
                writer.Dispose();

                sis = new SegmentInfos();
                sis.Read(dir);
                int optSegCount = sis.Size();

                if (segCount < 3)
                {
                    Assert.AreEqual(segCount, optSegCount);
                }
                else
                {
                    Assert.AreEqual(3, optSegCount);
                }
            }
            dir.Dispose();
        }
			protected internal override System.Object DoBody(System.String segmentFileName)
			{
				
				SegmentInfos infos = new SegmentInfos();
				infos.Read(directory, segmentFileName);
				
				DirectoryIndexReader reader;
				
				if (infos.Count == 1)
				{
					// index is optimized
					reader = SegmentReader.Get(infos, infos.Info(0), closeDirectory);
				}
				else
				{
					reader = new MultiSegmentReader(directory, infos, closeDirectory);
				}
				reader.SetDeletionPolicy(deletionPolicy);
				return reader;
			}
        public override MergeSpecification FindForcedMerges(SegmentInfos segmentInfos, int maxSegmentCount, IDictionary<SegmentCommitInfo, bool?> segmentsToMerge)
        {
            IList<SegmentCommitInfo> eligibleSegments = new List<SegmentCommitInfo>();
            foreach (SegmentCommitInfo info in segmentInfos.Segments)
            {
                if (segmentsToMerge.ContainsKey(info))
                {
                    eligibleSegments.Add(info);
                }
            }

            //System.out.println("MRMP: findMerges sis=" + segmentInfos + " eligible=" + eligibleSegments);
            MergeSpecification mergeSpec = null;
            if (eligibleSegments.Count > 1 || (eligibleSegments.Count == 1 && eligibleSegments[0].HasDeletions()))
            {
                mergeSpec = new MergeSpecification();
                // Already shuffled having come out of a set but
                // shuffle again for good measure:
                eligibleSegments = CollectionsHelper.Shuffle(eligibleSegments);
                int upto = 0;
                while (upto < eligibleSegments.Count)
                {
                    int max = Math.Min(10, eligibleSegments.Count - upto);
                    int inc = max <= 2 ? max : TestUtil.NextInt(Random, 2, max);
                    mergeSpec.Add(new OneMerge(eligibleSegments.SubList(upto, upto + inc)));
                    upto += inc;
                }
            }

            if (mergeSpec != null)
            {
                foreach (OneMerge merge in mergeSpec.Merges)
                {
                    foreach (SegmentCommitInfo info in merge.Segments)
                    {
                        Debug.Assert(segmentsToMerge.ContainsKey(info));
                    }
                }
            }
            return mergeSpec;
        }
        public virtual void TestBackgroundForceMerge()
        {
            Directory dir = NewDirectory();
            for (int pass = 0; pass < 2; pass++)
            {
                IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.CREATE).SetMaxBufferedDocs(2).SetMergePolicy(NewLogMergePolicy(51)));
                Document doc = new Document();
                doc.Add(NewStringField("field", "aaa", Field.Store.NO));
                for (int i = 0; i < 100; i++)
                {
                    writer.AddDocument(doc);
                }
                writer.ForceMerge(1, false);

                if (0 == pass)
                {
                    writer.Dispose();
                    DirectoryReader reader = DirectoryReader.Open(dir);
                    Assert.AreEqual(1, reader.Leaves.Count);
                    reader.Dispose();
                }
                else
                {
                    // Get another segment to flush so we can verify it is
                    // NOT included in the merging
                    writer.AddDocument(doc);
                    writer.AddDocument(doc);
                    writer.Dispose();

                    DirectoryReader reader = DirectoryReader.Open(dir);
                    Assert.IsTrue(reader.Leaves.Count > 1);
                    reader.Dispose();

                    SegmentInfos infos = new SegmentInfos();
                    infos.Read(dir);
                    Assert.AreEqual(2, infos.Size());
                }
            }

            dir.Dispose();
        }
示例#18
0
			public override System.Object DoBody(System.String segmentFileName)
			{
				
				SegmentInfos infos = new SegmentInfos();
				infos.Read(directory, segmentFileName);
				
				if (infos.Count == 1)
				{
					// index is optimized
					return SegmentReader.Get(infos, infos.Info(0), closeDirectory);
				}
				else
				{
					
					// To reduce the chance of hitting FileNotFound
					// (and having to retry), we open segments in
					// reverse because IndexWriter merges & deletes
					// the newest segments first.
					
					IndexReader[] readers = new IndexReader[infos.Count];
					for (int i = infos.Count - 1; i >= 0; i--)
					{
						try
						{
							readers[i] = SegmentReader.Get(infos.Info(i));
						}
						catch (System.IO.IOException e)
						{
							// Close all readers we had opened:
							for (i++; i < infos.Count; i++)
							{
								readers[i].Close();
							}
							throw e;
						}
					}
					
					return new MultiReader(directory, infos, closeDirectory, readers);
				}
			}
示例#19
0
            protected internal override object DoBody(System.String segmentFileName)
            {
                IndexInput input = directory.OpenInput(segmentFileName);

                int format = 0;
                long version = 0;
                try
                {
                    format = input.ReadInt();
                    if (format < 0)
                    {
                        if (format < Lucene.Net.Index.SegmentInfos.CURRENT_FORMAT)
                            throw new CorruptIndexException("Unknown format version: " + format);
                        version = input.ReadLong(); // read version
                    }
                }
                finally
                {
                    input.Close();
                }

                if (format < 0)
                    return (long) version;

                // We cannot be sure about the format of the file.
                // Therefore we have to read the whole file and cannot simply seek to the version entry.
                SegmentInfos sis = new SegmentInfos();
                sis.Read(directory, segmentFileName);
                return (long) sis.GetVersion();
            }
示例#20
0
 private void InitBlock(SegmentInfos enclosingInstance)
 {
     this.enclosingInstance = enclosingInstance;
 }
 private int GetNumberOfSegments(Directory dir)
 {
     SegmentInfos infos = new SegmentInfos();
     infos.Read(dir);
     return infos.Size();
 }
 private int CheckAllSegmentsUpgraded(Directory dir)
 {
     SegmentInfos infos = new SegmentInfos();
     infos.Read(dir);
     if (VERBOSE)
     {
         Console.WriteLine("checkAllSegmentsUpgraded: " + infos);
     }
     foreach (SegmentCommitInfo si in infos.Segments)
     {
         Assert.AreEqual(Constants.LUCENE_MAIN_VERSION, si.Info.Version);
     }
     return infos.Size();
 }
示例#23
0
		/// <summary> Returns true if the doc store files should use the
		/// compound file format.
		/// </summary>
		public abstract bool UseCompoundDocStore(SegmentInfos segments);
示例#24
0
		/// <summary> Returns true if a newly flushed (not from merge)
		/// segment should use the compound file format.
		/// </summary>
		public abstract bool UseCompoundFile(SegmentInfos segments, SegmentInfo newSegment);
示例#25
0
		/// <summary> Determine what set of merge operations are necessary in
		/// order to optimize the index.  The IndexWriter calls
		/// this when its optimize() method is called.  This call
		/// is always synchronized on the IndexWriter instance so
		/// only one thread at a time will call this method.
		/// 
		/// </summary>
		/// <param name="segmentInfos">the total set of segments in the index
		/// </param>
		/// <param name="writer">IndexWriter instance
		/// </param>
		/// <param name="maxSegmentCount">requested maximum number of
		/// segments in the index (currently this is always 1)
		/// </param>
		/// <param name="segmentsToOptimize">contains the specific
		/// SegmentInfo instances that must be merged away.  This
		/// may be a subset of all SegmentInfos.
		/// </param>
		public abstract MergeSpecification FindMergesForOptimize(SegmentInfos segmentInfos, IndexWriter writer, int maxSegmentCount, System.Collections.Hashtable segmentsToOptimize);
示例#26
0
 // carry over generation numbers from another SegmentInfos
 internal void UpdateGeneration(SegmentInfos other)
 {
     lastGeneration = other.lastGeneration;
     generation = other.generation;
     version = other.version;
 }
示例#27
0
 /// <summary> Returns a copy of this instance, also copying each
 /// SegmentInfo.
 /// </summary>
 public override object Clone()
 {
     SegmentInfos si = new SegmentInfos();
     for (int i = 0; i < base.Count; i++)
     {
         si.Add(((SegmentInfo)base[i]).Clone());
     }
     si.counter = this.counter;
     si.version = this.version;
     si.generation = this.generation;
     si.lastGeneration = this.lastGeneration;
     return si;
 }
示例#28
0
        public virtual void TestSameFieldNumbersAcrossSegments()
        {
            for (int i = 0; i < 2; i++)
            {
                Directory   dir    = NewDirectory();
                IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.COMPOUND_FILES));

                Document d1 = new Document();
                d1.Add(new StringField("f1", "first field", Field.Store.YES));
                d1.Add(new StringField("f2", "second field", Field.Store.YES));
                writer.AddDocument(d1);

                if (i == 1)
                {
                    writer.Dispose();
                    writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.COMPOUND_FILES));
                }
                else
                {
                    writer.Commit();
                }

                Document  d2          = new Document();
                FieldType customType2 = new FieldType(TextField.TYPE_STORED);
                customType2.StoreTermVectors = true;
                d2.Add(new TextField("f2", "second field", Field.Store.NO));
                d2.Add(new Field("f1", "first field", customType2));
                d2.Add(new TextField("f3", "third field", Field.Store.NO));
                d2.Add(new TextField("f4", "fourth field", Field.Store.NO));
                writer.AddDocument(d2);

                writer.Dispose();

                SegmentInfos sis = new SegmentInfos();
                sis.Read(dir);
                Assert.AreEqual(2, sis.Size());

                FieldInfos fis1 = SegmentReader.ReadFieldInfos(sis.Info(0));
                FieldInfos fis2 = SegmentReader.ReadFieldInfos(sis.Info(1));

                Assert.AreEqual("f1", fis1.FieldInfo(0).Name);
                Assert.AreEqual("f2", fis1.FieldInfo(1).Name);
                Assert.AreEqual("f1", fis2.FieldInfo(0).Name);
                Assert.AreEqual("f2", fis2.FieldInfo(1).Name);
                Assert.AreEqual("f3", fis2.FieldInfo(2).Name);
                Assert.AreEqual("f4", fis2.FieldInfo(3).Name);

                writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())));
                writer.ForceMerge(1);
                writer.Dispose();

                sis = new SegmentInfos();
                sis.Read(dir);
                Assert.AreEqual(1, sis.Size());

                FieldInfos fis3 = SegmentReader.ReadFieldInfos(sis.Info(0));

                Assert.AreEqual("f1", fis3.FieldInfo(0).Name);
                Assert.AreEqual("f2", fis3.FieldInfo(1).Name);
                Assert.AreEqual("f3", fis3.FieldInfo(2).Name);
                Assert.AreEqual("f4", fis3.FieldInfo(3).Name);

                dir.Dispose();
            }
        }
示例#29
0
 /// <summary> Returns a new SegmentInfos containg the SegmentInfo
 /// instances in the specified range first (inclusive) to
 /// last (exclusive), so total number of segments returned
 /// is last-first.
 /// </summary>
 public SegmentInfos Range(int first, int last)
 {
     SegmentInfos infos = new SegmentInfos();
     infos.AddRange((System.Collections.IList)((System.Collections.ArrayList)this).GetRange(first, last - first));
     return infos;
 }
示例#30
0
		/// <summary> Determine what set of merge operations are now
		/// necessary on the index.  The IndexWriter calls this
		/// whenever there is a change to the segments.  This call
		/// is always synchronized on the IndexWriter instance so
		/// only one thread at a time will call this method.
		/// 
		/// </summary>
		/// <param name="segmentInfos">the total set of segments in the index
		/// </param>
		/// <param name="writer">IndexWriter instance
		/// </param>
		public abstract MergeSpecification FindMerges(SegmentInfos segmentInfos, IndexWriter writer);
示例#31
0
 internal AnonymousClassFindSegmentsFile(SegmentInfos enclosingInstance, Lucene.Net.Store.Directory Param1)
     : base(Param1)
 {
     InitBlock(enclosingInstance);
 }
示例#32
0
        public virtual void TestFieldNumberGaps()
        {
            int numIters = AtLeast(13);

            for (int i = 0; i < numIters; i++)
            {
                Directory dir = NewDirectory();
                {
                    IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(NoMergePolicy.NO_COMPOUND_FILES));
                    Document    d      = new Document();
                    d.Add(new TextField("f1", "d1 first field", Field.Store.YES));
                    d.Add(new TextField("f2", "d1 second field", Field.Store.YES));
                    writer.AddDocument(d);
                    writer.Dispose();
                    SegmentInfos sis = new SegmentInfos();
                    sis.Read(dir);
                    Assert.AreEqual(1, sis.Size());
                    FieldInfos fis1 = SegmentReader.ReadFieldInfos(sis.Info(0));
                    Assert.AreEqual("f1", fis1.FieldInfo(0).Name);
                    Assert.AreEqual("f2", fis1.FieldInfo(1).Name);
                }

                {
                    IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(Random().NextBoolean() ? NoMergePolicy.NO_COMPOUND_FILES : NoMergePolicy.COMPOUND_FILES));
                    Document    d      = new Document();
                    d.Add(new TextField("f1", "d2 first field", Field.Store.YES));
                    d.Add(new StoredField("f3", new sbyte[] { 1, 2, 3 }));
                    writer.AddDocument(d);
                    writer.Dispose();
                    SegmentInfos sis = new SegmentInfos();
                    sis.Read(dir);
                    Assert.AreEqual(2, sis.Size());
                    FieldInfos fis1 = SegmentReader.ReadFieldInfos(sis.Info(0));
                    FieldInfos fis2 = SegmentReader.ReadFieldInfos(sis.Info(1));
                    Assert.AreEqual("f1", fis1.FieldInfo(0).Name);
                    Assert.AreEqual("f2", fis1.FieldInfo(1).Name);
                    Assert.AreEqual("f1", fis2.FieldInfo(0).Name);
                    Assert.IsNull(fis2.FieldInfo(1));
                    Assert.AreEqual("f3", fis2.FieldInfo(2).Name);
                }

                {
                    IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(Random().NextBoolean() ? NoMergePolicy.NO_COMPOUND_FILES : NoMergePolicy.COMPOUND_FILES));
                    Document    d      = new Document();
                    d.Add(new TextField("f1", "d3 first field", Field.Store.YES));
                    d.Add(new TextField("f2", "d3 second field", Field.Store.YES));
                    d.Add(new StoredField("f3", new sbyte[] { 1, 2, 3, 4, 5 }));
                    writer.AddDocument(d);
                    writer.Dispose();
                    SegmentInfos sis = new SegmentInfos();
                    sis.Read(dir);
                    Assert.AreEqual(3, sis.Size());
                    FieldInfos fis1 = SegmentReader.ReadFieldInfos(sis.Info(0));
                    FieldInfos fis2 = SegmentReader.ReadFieldInfos(sis.Info(1));
                    FieldInfos fis3 = SegmentReader.ReadFieldInfos(sis.Info(2));
                    Assert.AreEqual("f1", fis1.FieldInfo(0).Name);
                    Assert.AreEqual("f2", fis1.FieldInfo(1).Name);
                    Assert.AreEqual("f1", fis2.FieldInfo(0).Name);
                    Assert.IsNull(fis2.FieldInfo(1));
                    Assert.AreEqual("f3", fis2.FieldInfo(2).Name);
                    Assert.AreEqual("f1", fis3.FieldInfo(0).Name);
                    Assert.AreEqual("f2", fis3.FieldInfo(1).Name);
                    Assert.AreEqual("f3", fis3.FieldInfo(2).Name);
                }

                {
                    IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(Random().NextBoolean() ? NoMergePolicy.NO_COMPOUND_FILES : NoMergePolicy.COMPOUND_FILES));
                    writer.DeleteDocuments(new Term("f1", "d1"));
                    // nuke the first segment entirely so that the segment with gaps is
                    // loaded first!
                    writer.ForceMergeDeletes();
                    writer.Dispose();
                }

                IndexWriter writer_ = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergePolicy(new LogByteSizeMergePolicy()).SetInfoStream(new FailOnNonBulkMergesInfoStream()));
                writer_.ForceMerge(1);
                writer_.Dispose();

                SegmentInfos sis_ = new SegmentInfos();
                sis_.Read(dir);
                Assert.AreEqual(1, sis_.Size());
                FieldInfos fis1_ = SegmentReader.ReadFieldInfos(sis_.Info(0));
                Assert.AreEqual("f1", fis1_.FieldInfo(0).Name);
                Assert.AreEqual("f2", fis1_.FieldInfo(1).Name);
                Assert.AreEqual("f3", fis1_.FieldInfo(2).Name);
                dir.Dispose();
            }
        }