Exemple #1
0
 public DataInputAnonymousInnerClassHelper(CompressingStoredFieldsReader outerInstance, int offset, int length)
 {
     this.outerInstance = outerInstance;
     this.offset        = offset;
     this.length        = length;
     decompressed       = outerInstance.bytes.Length;
 }
Exemple #2
0
        public override int Merge(MergeState mergeState)
        {
            int docCount = 0;
            int idx      = 0;

            foreach (AtomicReader reader in mergeState.Readers)
            {
                SegmentReader matchingSegmentReader = mergeState.MatchingSegmentReaders[idx++];
                CompressingStoredFieldsReader matchingFieldsReader = null;
                if (matchingSegmentReader != null)
                {
                    StoredFieldsReader fieldsReader = matchingSegmentReader.FieldsReader;
                    // we can only bulk-copy if the matching reader is also a CompressingStoredFieldsReader
                    if (fieldsReader != null && fieldsReader is CompressingStoredFieldsReader compressingStoredFieldsReader)
                    {
                        matchingFieldsReader = compressingStoredFieldsReader;
                    }
                }

                int   maxDoc   = reader.MaxDoc;
                IBits liveDocs = reader.LiveDocs;

                if (matchingFieldsReader is null || matchingFieldsReader.Version != VERSION_CURRENT || matchingFieldsReader.CompressionMode != compressionMode || matchingFieldsReader.ChunkSize != chunkSize) // the way data is decompressed depends on the chunk size -  means reader version is not the same as the writer version
                {
                    // naive merge...
                    for (int i = NextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; i = NextLiveDoc(i + 1, liveDocs, maxDoc))
                    {
                        Document doc = reader.Document(i);
                        AddDocument(doc, mergeState.FieldInfos);
                        ++docCount;
                        mergeState.CheckAbort.Work(300);
                    }
                }
Exemple #3
0
 // used by clone
 private CompressingStoredFieldsReader(CompressingStoredFieldsReader reader)
 {
     this.version           = reader.version;
     this.fieldInfos        = reader.fieldInfos;
     this.fieldsStream      = (IndexInput)reader.fieldsStream.Clone();
     this.indexReader       = (CompressingStoredFieldsIndexReader)reader.indexReader.Clone();
     this.maxPointer        = reader.maxPointer;
     this.chunkSize         = reader.chunkSize;
     this.packedIntsVersion = reader.packedIntsVersion;
     this.compressionMode   = reader.compressionMode;
     this.decompressor      = (Decompressor)reader.decompressor.Clone();
     this.numDocs           = reader.numDocs;
     this.bytes             = new BytesRef(reader.bytes.Bytes.Length);
     this.closed            = false;
 }
Exemple #4
0
            internal ChunkIterator(CompressingStoredFieldsReader outerInstance, int startDocId)
            {
                this.outerInstance = outerInstance;
                this.docBase       = -1;
                bytes           = new BytesRef();
                spare           = new BytesRef();
                numStoredFields = new int[1];
                lengths         = new int[1];

                IndexInput @in = outerInstance.fieldsStream;

                @in.Seek(0);
                fieldsStream = new BufferedChecksumIndexInput(@in);
                fieldsStream.Seek(outerInstance.indexReader.GetStartPointer(startDocId));
            }
 // used by clone
 private CompressingStoredFieldsReader(CompressingStoredFieldsReader reader)
 {
     this.Version_Renamed         = reader.Version_Renamed;
     this.FieldInfos              = reader.FieldInfos;
     this.FieldsStream            = (IndexInput)reader.FieldsStream.Clone();
     this.IndexReader             = (CompressingStoredFieldsIndexReader)reader.IndexReader.Clone();
     this.MaxPointer              = reader.MaxPointer;
     this.ChunkSize_Renamed       = reader.ChunkSize_Renamed;
     this.PackedIntsVersion       = reader.PackedIntsVersion;
     this.CompressionMode_Renamed = reader.CompressionMode_Renamed;
     this.Decompressor            = (Decompressor)reader.Decompressor.Clone();
     this.NumDocs = reader.NumDocs;
     this.Bytes   = new BytesRef(reader.Bytes.Bytes.Length);
     this.Closed  = false;
 }
 // used by clone
 private CompressingStoredFieldsReader(CompressingStoredFieldsReader reader)
 {
     this.Version_Renamed = reader.Version_Renamed;
     this.FieldInfos = reader.FieldInfos;
     this.FieldsStream = (IndexInput)reader.FieldsStream.Clone();
     this.IndexReader = (CompressingStoredFieldsIndexReader)reader.IndexReader.Clone();
     this.MaxPointer = reader.MaxPointer;
     this.ChunkSize_Renamed = reader.ChunkSize_Renamed;
     this.PackedIntsVersion = reader.PackedIntsVersion;
     this.CompressionMode_Renamed = reader.CompressionMode_Renamed;
     this.Decompressor = (Decompressor)reader.Decompressor.Clone();
     this.NumDocs = reader.NumDocs;
     this.Bytes = new BytesRef(reader.Bytes.Bytes.Length);
     this.Closed = false;
 }
        public override int Merge(MergeState mergeState)
        {
            int docCount = 0;
            int idx      = 0;

            foreach (AtomicReader reader in mergeState.Readers)
            {
                SegmentReader matchingSegmentReader = mergeState.MatchingSegmentReaders[idx++];
                CompressingStoredFieldsReader matchingFieldsReader = null;
                if (matchingSegmentReader != null)
                {
                    StoredFieldsReader fieldsReader = matchingSegmentReader.FieldsReader;
                    // we can only bulk-copy if the matching reader is also a CompressingStoredFieldsReader
                    if (fieldsReader != null && fieldsReader is CompressingStoredFieldsReader)
                    {
                        matchingFieldsReader = (CompressingStoredFieldsReader)fieldsReader;
                    }
                }

                int  maxDoc   = reader.MaxDoc;
                Bits liveDocs = reader.LiveDocs;

                if (matchingFieldsReader == null || matchingFieldsReader.Version != VERSION_CURRENT || matchingFieldsReader.CompressionMode != CompressionMode || matchingFieldsReader.ChunkSize != ChunkSize) // the way data is decompressed depends on the chunk size -  means reader version is not the same as the writer version
                {
                    // naive merge...
                    for (int i = NextLiveDoc(0, liveDocs, maxDoc); i < maxDoc; i = NextLiveDoc(i + 1, liveDocs, maxDoc))
                    {
                        Document doc = reader.Document(i);
                        AddDocument(doc, mergeState.FieldInfos);
                        ++docCount;
                        mergeState.checkAbort.Work(300);
                    }
                }
                else
                {
                    int docID = NextLiveDoc(0, liveDocs, maxDoc);
                    if (docID < maxDoc)
                    {
                        // not all docs were deleted
                        CompressingStoredFieldsReader.ChunkIterator it = matchingFieldsReader.GetChunkIterator(docID);
                        int[] startOffsets = new int[0];
                        do
                        {
                            // go to the next chunk that contains docID
                            it.Next(docID);
                            // transform lengths into offsets
                            if (startOffsets.Length < it.ChunkDocs)
                            {
                                startOffsets = new int[ArrayUtil.Oversize(it.ChunkDocs, 4)];
                            }
                            for (int i = 1; i < it.ChunkDocs; ++i)
                            {
                                startOffsets[i] = startOffsets[i - 1] + it.Lengths[i - 1];
                            }

                            if (NumBufferedDocs == 0 && startOffsets[it.ChunkDocs - 1] < ChunkSize && startOffsets[it.ChunkDocs - 1] + it.Lengths[it.ChunkDocs - 1] >= ChunkSize && NextDeletedDoc(it.DocBase, liveDocs, it.DocBase + it.ChunkDocs) == it.DocBase + it.ChunkDocs) // no deletion in the chunk -  chunk is large enough -  chunk is small enough -  starting a new chunk
                            {
                                Debug.Assert(docID == it.DocBase);

                                // no need to decompress, just copy data
                                IndexWriter.WriteIndex(it.ChunkDocs, FieldsStream.FilePointer);
                                WriteHeader(this.DocBase, it.ChunkDocs, it.NumStoredFields, it.Lengths);
                                it.CopyCompressedData(FieldsStream);
                                this.DocBase += it.ChunkDocs;
                                docID         = NextLiveDoc(it.DocBase + it.ChunkDocs, liveDocs, maxDoc);
                                docCount     += it.ChunkDocs;
                                mergeState.checkAbort.Work(300 * it.ChunkDocs);
                            }
                            else
                            {
                                // decompress
                                it.Decompress();
                                if (startOffsets[it.ChunkDocs - 1] + it.Lengths[it.ChunkDocs - 1] != it.Bytes.Length)
                                {
                                    throw new CorruptIndexException("Corrupted: expected chunk size=" + startOffsets[it.ChunkDocs - 1] + it.Lengths[it.ChunkDocs - 1] + ", got " + it.Bytes.Length);
                                }
                                // copy non-deleted docs
                                for (; docID < it.DocBase + it.ChunkDocs; docID = NextLiveDoc(docID + 1, liveDocs, maxDoc))
                                {
                                    int diff = docID - it.DocBase;
                                    StartDocument(it.NumStoredFields[diff]);
                                    BufferedDocs.WriteBytes(it.Bytes.Bytes, it.Bytes.Offset + startOffsets[diff], it.Lengths[diff]);
                                    FinishDocument();
                                    ++docCount;
                                    mergeState.checkAbort.Work(300);
                                }
                            }
                        } while (docID < maxDoc);

                        it.CheckIntegrity();
                    }
                }
            }
            Finish(mergeState.FieldInfos, docCount);
            return(docCount);
        }
Exemple #8
0
 public DataInputAnonymousClass(CompressingStoredFieldsReader outerInstance, int length)
 {
     this.outerInstance = outerInstance;
     this.length        = length;
     decompressed       = outerInstance.bytes.Length;
 }
            internal ChunkIterator(CompressingStoredFieldsReader outerInstance, int startDocId)
            {
                this.OuterInstance = outerInstance;
                this.DocBase = -1;
                Bytes = new BytesRef();
                Spare = new BytesRef();
                NumStoredFields = new int[1];
                Lengths = new int[1];

                IndexInput @in = outerInstance.FieldsStream;
                @in.Seek(0);
                FieldsStream = new BufferedChecksumIndexInput(@in);
                FieldsStream.Seek(outerInstance.IndexReader.GetStartPointer(startDocId));
            }
 public DataInputAnonymousInnerClassHelper(CompressingStoredFieldsReader outerInstance, int offset, int length)
 {
     this.OuterInstance = outerInstance;
     this.Offset = offset;
     this.Length = length;
     decompressed = outerInstance.Bytes.Length;
 }