Пример #1
0
 // Appends a new packet of buffered deletes to the stream,
 // setting its generation:
 public virtual long Push(FrozenBufferedUpdates packet)
 {
     lock (this)
     {
         /*
          * The insert operation must be atomic. If we let threads increment the gen
          * and push the packet afterwards we risk that packets are out of order.
          * With DWPT this is possible if two or more flushes are racing for pushing
          * updates. If the pushed packets get our of order would loose documents
          * since deletes are applied to the wrong segments.
          */
         packet.DelGen = NextGen_Renamed++;
         Debug.Assert(packet.Any());
         Debug.Assert(CheckDeleteStats());
         Debug.Assert(packet.DelGen < NextGen_Renamed);
         Debug.Assert(Updates.Count == 0 || Updates[Updates.Count - 1].DelGen < packet.DelGen, "Delete packets must be in order");
         Updates.Add(packet);
         numTerms.AddAndGet(packet.NumTermDeletes);
         bytesUsed.AddAndGet(packet.BytesUsed);
         if (InfoStream.IsEnabled("BD"))
         {
             InfoStream.Message("BD", "push deletes " + packet + " delGen=" + packet.DelGen + " packetCount=" + Updates.Count + " totBytesUsed=" + bytesUsed.Get());
         }
         Debug.Assert(CheckDeleteStats());
         return(packet.DelGen);
     }
 }
Пример #2
0
 private void Prune(int count)
 {
     lock (this)
     {
         if (count > 0)
         {
             if (infoStream.IsEnabled("BD"))
             {
                 infoStream.Message("BD", "pruneDeletes: prune " + count + " packets; " + (updates.Count - count) + " packets remain");
             }
             for (int delIDX = 0; delIDX < count; delIDX++)
             {
                 FrozenBufferedUpdates packet = updates[delIDX];
                 numTerms.AddAndGet(-packet.numTermDeletes);
                 if (Debugging.AssertsEnabled)
                 {
                     Debugging.Assert(numTerms >= 0);
                 }
                 bytesUsed.AddAndGet(-packet.bytesUsed);
                 if (Debugging.AssertsEnabled)
                 {
                     Debugging.Assert(bytesUsed >= 0);
                 }
             }
             updates.SubList(0, count).Clear();
         }
     }
 }
 private void Prune(int count)
 {
     lock (this)
     {
         if (count > 0)
         {
             if (infoStream.IsEnabled("BD"))
             {
                 infoStream.Message("BD", "pruneDeletes: prune " + count + " packets; " + (updates.Count - count) + " packets remain");
             }
             for (int delIDX = 0; delIDX < count; delIDX++)
             {
                 FrozenBufferedUpdates packet = updates[delIDX];
                 numTerms.AddAndGet(-packet.numTermDeletes);
                 if (Debugging.AssertsEnabled)
                 {
                     Debugging.Assert(numTerms >= 0);
                 }
                 bytesUsed.AddAndGet(-packet.bytesUsed);
                 if (Debugging.AssertsEnabled)
                 {
                     Debugging.Assert(bytesUsed >= 0);
                 }
             }
             updates.RemoveRange(0, count); // LUCENENET: Checked count parameter for correctness
         }
     }
 }
Пример #4
0
        public virtual void TestAnyChanges()
        {
            DocumentsWriterDeleteQueue queue = new DocumentsWriterDeleteQueue();
            int size               = 200 + Random.Next(500) * RANDOM_MULTIPLIER;
            int termsSinceFreeze   = 0;
            int queriesSinceFreeze = 0;

            for (int i = 0; i < size; i++)
            {
                Term term = new Term("id", "" + i);
                if (Random.Next(10) == 0)
                {
                    queue.AddDelete(new TermQuery(term));
                    queriesSinceFreeze++;
                }
                else
                {
                    queue.AddDelete(term);
                    termsSinceFreeze++;
                }
                Assert.IsTrue(queue.AnyChanges());
                if (Random.Next(5) == 0)
                {
                    FrozenBufferedUpdates freezeGlobalBuffer = queue.FreezeGlobalBuffer(null);
                    Assert.AreEqual(termsSinceFreeze, freezeGlobalBuffer.termCount);
                    Assert.AreEqual(queriesSinceFreeze, ((Query[])freezeGlobalBuffer.queries.Clone()).Length);
                    queriesSinceFreeze = 0;
                    termsSinceFreeze   = 0;
                    Assert.IsFalse(queue.AnyChanges());
                }
            }
        }
        internal FrozenBufferedUpdates FreezeGlobalBuffer(DeleteSlice callerSlice)
        {
            globalBufferLock.@Lock();

            /*
             * Here we freeze the global buffer so we need to lock it, apply all
             * deletes in the queue and reset the global slice to let the GC prune the
             * queue.
             */
            Node currentTail = tail; // take the current tail make this local any

            // Changes after this call are applied later
            // and not relevant here
            if (callerSlice != null)
            {
                // Update the callers slices so we are on the same page
                callerSlice.sliceTail = currentTail;
            }
            try
            {
                if (globalSlice.sliceTail != currentTail)
                {
                    globalSlice.sliceTail = currentTail;
                    globalSlice.Apply(globalBufferedUpdates, BufferedUpdates.MAX_INT32);
                }

                FrozenBufferedUpdates packet = new FrozenBufferedUpdates(globalBufferedUpdates, false);
                globalBufferedUpdates.Clear();
                return(packet);
            }
            finally
            {
                globalBufferLock.Unlock();
            }
        }
 protected FlushTicket(FrozenBufferedUpdates frozenUpdates)
 {
     if (Debugging.AssertsEnabled)
     {
         Debugging.Assert(frozenUpdates != null);
     }
     this.m_frozenUpdates = frozenUpdates;
 }
Пример #7
0
 internal FlushedSegment(SegmentCommitInfo segmentInfo, FieldInfos fieldInfos, BufferedUpdates segmentUpdates, IMutableBits liveDocs, int delCount)
 {
     this.segmentInfo    = segmentInfo;
     this.fieldInfos     = fieldInfos;
     this.segmentUpdates = segmentUpdates != null && segmentUpdates.Any() ? new FrozenBufferedUpdates(segmentUpdates, true) : null;
     this.liveDocs       = liveDocs;
     this.delCount       = delCount;
 }
        public virtual int NumDocsInRAM => numDocsInRAM; // public for FlushPolicy

        /// <summary>
        /// Prepares this DWPT for flushing. this method will freeze and return the
        /// <see cref="DocumentsWriterDeleteQueue"/>s global buffer and apply all pending
        /// deletes to this DWPT.
        /// </summary>
        internal virtual FrozenBufferedUpdates PrepareFlush()
        {
            Debug.Assert(numDocsInRAM > 0);
            FrozenBufferedUpdates globalUpdates = deleteQueue.FreezeGlobalBuffer(deleteSlice);

            /* deleteSlice can possibly be null if we have hit non-aborting exceptions during indexing and never succeeded
             * adding a document. */
            if (deleteSlice != null)
            {
                // apply all deletes before we flush and release the delete slice
                deleteSlice.Apply(pendingUpdates, numDocsInRAM);
                Debug.Assert(deleteSlice.IsEmpty);
                deleteSlice.Reset();
            }
            return(globalUpdates);
        }
            /// <summary>
            /// Publishes the flushed segment, segment private deletes (if any) and its
            /// associated global delete (if present) to <see cref="IndexWriter"/>.  The actual
            /// publishing operation is synced on IW -> BDS so that the <see cref="SegmentInfo"/>'s
            /// delete generation is always <see cref="FrozenBufferedUpdates.DelGen"/> (<paramref name="globalPacket"/>) + 1
            /// </summary>
            protected void PublishFlushedSegment(IndexWriter indexWriter, FlushedSegment newSegment, FrozenBufferedUpdates globalPacket)
            {
                Debug.Assert(newSegment != null);
                Debug.Assert(newSegment.segmentInfo != null);
                FrozenBufferedUpdates segmentUpdates = newSegment.segmentUpdates;

                //System.out.println("FLUSH: " + newSegment.segmentInfo.info.name);
                if (indexWriter.infoStream.IsEnabled("DW"))
                {
                    indexWriter.infoStream.Message("DW", "publishFlushedSegment seg-private updates=" + segmentUpdates);
                }

                if (segmentUpdates != null && indexWriter.infoStream.IsEnabled("DW"))
                {
                    indexWriter.infoStream.Message("DW", "flush: push buffered seg private updates: " + segmentUpdates);
                }
                // now publish!
                indexWriter.PublishFlushedSegment(newSegment.segmentInfo, segmentUpdates, globalPacket);
            }
Пример #10
0
 /// <summary>
 /// Appends a new packet of buffered deletes to the stream,
 /// setting its generation:
 /// </summary>
 public virtual long Push(FrozenBufferedUpdates packet)
 {
     UninterruptableMonitor.Enter(this);
     try
     {
         /*
          * The insert operation must be atomic. If we let threads increment the gen
          * and push the packet afterwards we risk that packets are out of order.
          * With DWPT this is possible if two or more flushes are racing for pushing
          * updates. If the pushed packets get our of order would loose documents
          * since deletes are applied to the wrong segments.
          */
         packet.DelGen = nextGen++;
         if (Debugging.AssertsEnabled)
         {
             Debugging.Assert(packet.Any());
             Debugging.Assert(CheckDeleteStats());
             Debugging.Assert(packet.DelGen < nextGen);
             Debugging.Assert(updates.Count == 0 || updates[updates.Count - 1].DelGen < packet.DelGen, "Delete packets must be in order");
         }
         updates.Add(packet);
         numTerms.AddAndGet(packet.numTermDeletes);
         bytesUsed.AddAndGet(packet.bytesUsed);
         if (infoStream.IsEnabled("BD"))
         {
             infoStream.Message("BD", "push deletes " + packet + " delGen=" + packet.DelGen + " packetCount=" + updates.Count + " totBytesUsed=" + bytesUsed);
         }
         if (Debugging.AssertsEnabled)
         {
             Debugging.Assert(CheckDeleteStats());
         }
         return(packet.DelGen);
     }
     finally
     {
         UninterruptableMonitor.Exit(this);
     }
 }
Пример #11
0
        public virtual void TestPartiallyAppliedGlobalSlice()
        {
            DocumentsWriterDeleteQueue queue = new DocumentsWriterDeleteQueue();

            System.Reflection.FieldInfo field = typeof(DocumentsWriterDeleteQueue).GetField("globalBufferLock",
                                                                                            BindingFlags.NonPublic | BindingFlags.GetField | BindingFlags.Instance);
            ReentrantLock @lock = (ReentrantLock)field.GetValue(queue);

            @lock.Lock();
            var t = new ThreadAnonymousInnerClassHelper(this, queue);

            t.Start();
            t.Join();
            @lock.Unlock();
            Assert.IsTrue(queue.AnyChanges(), "changes in del queue but not in slice yet");
            queue.TryApplyGlobalSlice();
            Assert.IsTrue(queue.AnyChanges(), "changes in global buffer");
            FrozenBufferedUpdates freezeGlobalBuffer = queue.FreezeGlobalBuffer(null);

            Assert.IsTrue(freezeGlobalBuffer.Any());
            Assert.AreEqual(1, freezeGlobalBuffer.termCount);
            Assert.IsFalse(queue.AnyChanges(), "all changes applied");
        }
Пример #12
0
        internal virtual void Update(FrozenBufferedUpdates @in)
        {
            iterables.Add(@in.GetTermsEnumerable());

            for (int queryIdx = 0; queryIdx < @in.queries.Length; queryIdx++)
            {
                Query query = @in.queries[queryIdx];
                queries[query] = BufferedUpdates.MAX_INT32;
            }

            foreach (NumericDocValuesUpdate nu in @in.numericDVUpdates)
            {
                NumericDocValuesUpdate clone = new NumericDocValuesUpdate(nu.term, nu.field, (long?)nu.value);
                clone.docIDUpto = int.MaxValue;
                numericDVUpdates.Add(clone);
            }

            foreach (BinaryDocValuesUpdate bu in @in.binaryDVUpdates)
            {
                BinaryDocValuesUpdate clone = new BinaryDocValuesUpdate(bu.term, bu.field, (BytesRef)bu.value);
                clone.docIDUpto = int.MaxValue;
                binaryDVUpdates.Add(clone);
            }
        }
Пример #13
0
        internal virtual void Update(FrozenBufferedUpdates @in)
        {
            Iterables.Add(@in.TermsIterable());

            for (int queryIdx = 0; queryIdx < @in.Queries.Length; queryIdx++)
            {
                Query query = @in.Queries[queryIdx];
                Queries[query] = BufferedUpdates.MAX_INT;
            }

            foreach (NumericDocValuesUpdate nu in @in.NumericDVUpdates)
            {
                NumericDocValuesUpdate clone = new NumericDocValuesUpdate(nu.Term, nu.Field, (long?)nu.Value);
                clone.DocIDUpto = int.MaxValue;
                NumericDVUpdates.Add(clone);
            }

            foreach (BinaryDocValuesUpdate bu in @in.BinaryDVUpdates)
            {
                BinaryDocValuesUpdate clone = new BinaryDocValuesUpdate(bu.Term, bu.Field, (BytesRef)bu.Value);
                clone.DocIDUpto = int.MaxValue;
                BinaryDVUpdates.Add(clone);
            }
        }
 internal GlobalDeletesTicket(FrozenBufferedUpdates frozenUpdates) // LUCENENET NOTE: Made internal rather than protected because class is sealed
     : base(frozenUpdates)
 {
 }
 public IterableAnonymousClass2(FrozenBufferedUpdates outerInstance)
 {
     this.outerInstance = outerInstance;
 }
Пример #16
0
        /// <summary>
        /// Resolves the buffered deleted Term/Query/docIDs, into
        ///  actual deleted docIDs in the liveDocs MutableBits for
        ///  each SegmentReader.
        /// </summary>
        public virtual ApplyDeletesResult ApplyDeletesAndUpdates(IndexWriter.ReaderPool readerPool, IList <SegmentCommitInfo> infos)
        {
            lock (this)
            {
                long t0 = DateTime.Now.Millisecond;

                if (infos.Count == 0)
                {
                    return(new ApplyDeletesResult(false, NextGen_Renamed++, null));
                }

                Debug.Assert(CheckDeleteStats());

                if (!Any())
                {
                    if (InfoStream.IsEnabled("BD"))
                    {
                        InfoStream.Message("BD", "applyDeletes: no deletes; skipping");
                    }
                    return(new ApplyDeletesResult(false, NextGen_Renamed++, null));
                }

                if (InfoStream.IsEnabled("BD"))
                {
                    InfoStream.Message("BD", "applyDeletes: infos=" + infos + " packetCount=" + Updates.Count);
                }

                long gen = NextGen_Renamed++;

                List <SegmentCommitInfo> infos2 = new List <SegmentCommitInfo>();
                infos2.AddRange(infos);
                infos2.Sort(sortSegInfoByDelGen);

                CoalescedUpdates coalescedUpdates = null;
                bool             anyNewDeletes    = false;

                int infosIDX = infos2.Count - 1;
                int delIDX   = Updates.Count - 1;

                IList <SegmentCommitInfo> allDeleted = null;

                while (infosIDX >= 0)
                {
                    //System.out.println("BD: cycle delIDX=" + delIDX + " infoIDX=" + infosIDX);

                    FrozenBufferedUpdates packet = delIDX >= 0 ? Updates[delIDX] : null;
                    SegmentCommitInfo     info   = infos2[infosIDX];
                    long segGen = info.BufferedDeletesGen;

                    if (packet != null && segGen < packet.DelGen)
                    {
                        //        System.out.println("  coalesce");
                        if (coalescedUpdates == null)
                        {
                            coalescedUpdates = new CoalescedUpdates();
                        }
                        if (!packet.IsSegmentPrivate)
                        {
                            /*
                             * Only coalesce if we are NOT on a segment private del packet: the segment private del packet
                             * must only applied to segments with the same delGen.  Yet, if a segment is already deleted
                             * from the SI since it had no more documents remaining after some del packets younger than
                             * its segPrivate packet (higher delGen) have been applied, the segPrivate packet has not been
                             * removed.
                             */
                            coalescedUpdates.Update(packet);
                        }

                        delIDX--;
                    }
                    else if (packet != null && segGen == packet.DelGen)
                    {
                        Debug.Assert(packet.IsSegmentPrivate, "Packet and Segments deletegen can only match on a segment private del packet gen=" + segGen);
                        //System.out.println("  eq");

                        // Lock order: IW -> BD -> RP
                        Debug.Assert(readerPool.InfoIsLive(info));
                        ReadersAndUpdates rld    = readerPool.Get(info, true);
                        SegmentReader     reader = rld.GetReader(IOContext.READ);
                        int  delCount            = 0;
                        bool segAllDeletes;
                        try
                        {
                            DocValuesFieldUpdates.Container dvUpdates = new DocValuesFieldUpdates.Container();
                            if (coalescedUpdates != null)
                            {
                                //System.out.println("    del coalesced");
                                delCount += (int)ApplyTermDeletes(coalescedUpdates.TermsIterable(), rld, reader);
                                delCount += (int)ApplyQueryDeletes(coalescedUpdates.QueriesIterable(), rld, reader);
                                ApplyDocValuesUpdates(coalescedUpdates.NumericDVUpdates, rld, reader, dvUpdates);
                                ApplyDocValuesUpdates(coalescedUpdates.BinaryDVUpdates, rld, reader, dvUpdates);
                            }
                            //System.out.println("    del exact");
                            // Don't delete by Term here; DocumentsWriterPerThread
                            // already did that on flush:
                            delCount += (int)ApplyQueryDeletes(packet.QueriesIterable(), rld, reader);
                            ApplyDocValuesUpdates(Arrays.AsList(packet.NumericDVUpdates), rld, reader, dvUpdates);
                            ApplyDocValuesUpdates(Arrays.AsList(packet.BinaryDVUpdates), rld, reader, dvUpdates);
                            if (dvUpdates.Any())
                            {
                                rld.WriteFieldUpdates(info.Info.Dir, dvUpdates);
                            }
                            int fullDelCount = rld.Info.DelCount + rld.PendingDeleteCount;
                            Debug.Assert(fullDelCount <= rld.Info.Info.DocCount);
                            segAllDeletes = fullDelCount == rld.Info.Info.DocCount;
                        }
                        finally
                        {
                            rld.Release(reader);
                            readerPool.Release(rld);
                        }
                        anyNewDeletes |= delCount > 0;

                        if (segAllDeletes)
                        {
                            if (allDeleted == null)
                            {
                                allDeleted = new List <SegmentCommitInfo>();
                            }
                            allDeleted.Add(info);
                        }

                        if (InfoStream.IsEnabled("BD"))
                        {
                            InfoStream.Message("BD", "seg=" + info + " segGen=" + segGen + " segDeletes=[" + packet + "]; coalesced deletes=[" + (coalescedUpdates == null ? "null" : coalescedUpdates.ToString()) + "] newDelCount=" + delCount + (segAllDeletes ? " 100% deleted" : ""));
                        }

                        if (coalescedUpdates == null)
                        {
                            coalescedUpdates = new CoalescedUpdates();
                        }

                        /*
                         * Since we are on a segment private del packet we must not
                         * update the coalescedDeletes here! We can simply advance to the
                         * next packet and seginfo.
                         */
                        delIDX--;
                        infosIDX--;
                        info.BufferedDeletesGen = gen;
                    }
                    else
                    {
                        //System.out.println("  gt");

                        if (coalescedUpdates != null)
                        {
                            // Lock order: IW -> BD -> RP
                            Debug.Assert(readerPool.InfoIsLive(info));
                            ReadersAndUpdates rld    = readerPool.Get(info, true);
                            SegmentReader     reader = rld.GetReader(IOContext.READ);
                            int  delCount            = 0;
                            bool segAllDeletes;
                            try
                            {
                                delCount += (int)ApplyTermDeletes(coalescedUpdates.TermsIterable(), rld, reader);
                                delCount += (int)ApplyQueryDeletes(coalescedUpdates.QueriesIterable(), rld, reader);
                                DocValuesFieldUpdates.Container dvUpdates = new DocValuesFieldUpdates.Container();
                                ApplyDocValuesUpdates(coalescedUpdates.NumericDVUpdates, rld, reader, dvUpdates);
                                ApplyDocValuesUpdates(coalescedUpdates.BinaryDVUpdates, rld, reader, dvUpdates);
                                if (dvUpdates.Any())
                                {
                                    rld.WriteFieldUpdates(info.Info.Dir, dvUpdates);
                                }
                                int fullDelCount = rld.Info.DelCount + rld.PendingDeleteCount;
                                Debug.Assert(fullDelCount <= rld.Info.Info.DocCount);
                                segAllDeletes = fullDelCount == rld.Info.Info.DocCount;
                            }
                            finally
                            {
                                rld.Release(reader);
                                readerPool.Release(rld);
                            }
                            anyNewDeletes |= delCount > 0;

                            if (segAllDeletes)
                            {
                                if (allDeleted == null)
                                {
                                    allDeleted = new List <SegmentCommitInfo>();
                                }
                                allDeleted.Add(info);
                            }

                            if (InfoStream.IsEnabled("BD"))
                            {
                                InfoStream.Message("BD", "seg=" + info + " segGen=" + segGen + " coalesced deletes=[" + coalescedUpdates + "] newDelCount=" + delCount + (segAllDeletes ? " 100% deleted" : ""));
                            }
                        }
                        info.BufferedDeletesGen = gen;

                        infosIDX--;
                    }
                }

                Debug.Assert(CheckDeleteStats());
                if (InfoStream.IsEnabled("BD"))
                {
                    InfoStream.Message("BD", "applyDeletes took " + (DateTime.Now.Millisecond - t0) + " msec");
                }
                // assert infos != segmentInfos || !any() : "infos=" + infos + " segmentInfos=" + segmentInfos + " any=" + any;

                return(new ApplyDeletesResult(anyNewDeletes, gen, allDeleted));
            }
        }
Пример #17
0
 protected internal SegmentFlushTicket(FrozenBufferedUpdates frozenDeletes)
     : base(frozenDeletes)
 {
 }
Пример #18
0
 public IterableAnonymousInnerClassHelper2(FrozenBufferedUpdates outerInstance)
 {
     this.outerInstance = outerInstance;
 }
 protected void FinishFlush(IndexWriter indexWriter, FlushedSegment newSegment, FrozenBufferedUpdates bufferedUpdates)
 {
     // Finish the flushed segment and publish it to IndexWriter
     if (newSegment == null)
     {
         Debug.Assert(bufferedUpdates != null);
         if (bufferedUpdates != null && bufferedUpdates.Any())
         {
             indexWriter.PublishFrozenUpdates(bufferedUpdates);
             if (indexWriter.infoStream.IsEnabled("DW"))
             {
                 indexWriter.infoStream.Message("DW", "flush: push buffered updates: " + bufferedUpdates);
             }
         }
     }
     else
     {
         PublishFlushedSegment(indexWriter, newSegment, bufferedUpdates);
     }
 }
 protected FlushTicket(FrozenBufferedUpdates frozenUpdates)
 {
     Debug.Assert(frozenUpdates != null);
     this.m_frozenUpdates = frozenUpdates;
 }
 internal SegmentFlushTicket(FrozenBufferedUpdates frozenDeletes) // LUCENENET NOTE: Made internal rather than protected because class is sealed
     : base(frozenDeletes)
 {
 }
Пример #22
0
 protected internal GlobalDeletesTicket(FrozenBufferedUpdates frozenUpdates)
     : base(frozenUpdates)
 {
 }