public void Read(IndexInput input, FieldInfos fieldInfos) { this.term = null; // invalidate cache int start = input.ReadVInt(); int length = input.ReadVInt(); int totalLength = start + length; if (preUTF8Strings) { text.SetLength(totalLength); input.ReadChars(text.result, start, length); } else { if (dirty) { // Fully convert all bytes since bytes is dirty UnicodeUtil.UTF16toUTF8(text.result, 0, text.length, bytes); bytes.SetLength(totalLength); input.ReadBytes(bytes.result, start, length); UnicodeUtil.UTF8toUTF16(bytes.result, 0, totalLength, text); dirty = false; } else { // Incrementally convert only the UTF8 bytes that are new: bytes.SetLength(totalLength); input.ReadBytes(bytes.result, start, length); UnicodeUtil.UTF8toUTF16(bytes.result, start, length, text); } } this.field = fieldInfos.FieldName(input.ReadVInt()); }
public void Read(IndexInput input, FieldInfos fieldInfos) { this.Term = null; // invalidate cache NewSuffixStart = input.ReadVInt(); int length = input.ReadVInt(); int totalLength = NewSuffixStart + length; Debug.Assert(totalLength <= ByteBlockPool.BYTE_BLOCK_SIZE - 2, "termLength=" + totalLength + ",resource=" + input); if (Bytes.Bytes.Length < totalLength) { Bytes.Grow(totalLength); } Bytes.Length = totalLength; input.ReadBytes(Bytes.Bytes, NewSuffixStart, length); int fieldNumber = input.ReadVInt(); if (fieldNumber != CurrentFieldNumber) { CurrentFieldNumber = fieldNumber; // NOTE: too much sneakiness here, seriously this is a negative vint?! if (CurrentFieldNumber == -1) { Field = ""; } else { Debug.Assert(fieldInfos.FieldInfo(CurrentFieldNumber) != null, CurrentFieldNumber.ToString()); Field = String.Intern(fieldInfos.FieldInfo(CurrentFieldNumber).Name); } } else { Debug.Assert(Field.Equals(fieldInfos.FieldInfo(fieldNumber).Name), "currentFieldNumber=" + CurrentFieldNumber + " field=" + Field + " vs " + fieldInfos.FieldInfo(fieldNumber) == null ? "null" : fieldInfos.FieldInfo(fieldNumber).Name); } }
public Norm(SegmentReader enclosingInstance, IndexInput in_Renamed, int number, long normSeek) { InitBlock(enclosingInstance); this.in_Renamed = in_Renamed; this.number = number; this.normSeek = normSeek; }
public Lucene41SkipReader(IndexInput skipStream, int maxSkipLevels, int blockSize, bool hasPos, bool hasOffsets, bool hasPayloads) : base(skipStream, maxSkipLevels, blockSize, 8) { this.BlockSize = blockSize; DocPointer_Renamed = new long[maxSkipLevels]; if (hasPos) { PosPointer_Renamed = new long[maxSkipLevels]; PosBufferUpto_Renamed = new int[maxSkipLevels]; if (hasPayloads) { PayloadByteUpto_Renamed = new int[maxSkipLevels]; } else { PayloadByteUpto_Renamed = null; } if (hasOffsets || hasPayloads) { PayPointer_Renamed = new long[maxSkipLevels]; } else { PayPointer_Renamed = null; } } else { PosPointer_Renamed = null; } }
public Lucene3xSkipListReader(IndexInput skipStream, int maxSkipLevels, int skipInterval) : base(skipStream, maxSkipLevels, skipInterval) { FreqPointer_Renamed = new long[maxSkipLevels]; ProxPointer_Renamed = new long[maxSkipLevels]; PayloadLength_Renamed = new int[maxSkipLevels]; }
internal DefaultSkipListReader(IndexInput skipStream, int maxSkipLevels, int skipInterval) : base(skipStream, maxSkipLevels, skipInterval) { freqPointer = new long[maxSkipLevels]; proxPointer = new long[maxSkipLevels]; payloadLength = new int[maxSkipLevels]; }
public SegmentTermDocs(SegmentReader parent) { this.parent = parent; this.freqStream = (IndexInput) parent.freqStream.Clone(); this.deletedDocs = parent.deletedDocs; this.skipInterval = parent.tis.GetSkipInterval(); }
internal SegmentTermEnum(IndexInput i, FieldInfos fis, bool isi) { input = i; fieldInfos = fis; isIndex = isi; maxSkipLevels = 1; // use single-level skip lists for formats > -3 int firstInt = input.ReadInt(); if (firstInt >= 0) { // original-format file, without explicit format version number format = 0; size = firstInt; // back-compatible settings indexInterval = 128; skipInterval = System.Int32.MaxValue; // switch off skipTo optimization } else { // we have a format version number format = firstInt; // check that it is a format we can understand if (format < TermInfosWriter.FORMAT_CURRENT) throw new CorruptIndexException("Unknown format version:" + format + " expected " + TermInfosWriter.FORMAT_CURRENT + " or higher"); size = input.ReadLong(); // read the size if (format == - 1) { if (!isIndex) { indexInterval = input.ReadInt(); formatM1SkipInterval = input.ReadInt(); } // switch off skipTo optimization for file format prior to 1.4rc2 in order to avoid a bug in // skipTo implementation of these versions skipInterval = System.Int32.MaxValue; } else { indexInterval = input.ReadInt(); skipInterval = input.ReadInt(); if (format <= TermInfosWriter.FORMAT) { // this new format introduces multi-level skipping maxSkipLevels = input.ReadInt(); } } System.Diagnostics.Debug.Assert(indexInterval > 0, "indexInterval=" + indexInterval + " is negative; must be > 0"); System.Diagnostics.Debug.Assert(skipInterval > 0, "skipInterval=" + skipInterval + " is negative; must be > 0"); } if (format > TermInfosWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES) { termBuffer.SetPreUTF8Strings(); scanBuffer.SetPreUTF8Strings(); prevBuffer.SetPreUTF8Strings(); } }
public SegmentTermDocs(IndexInput freqStream, TermInfosReader tis, FieldInfos fieldInfos) { this.FreqStream = (IndexInput)freqStream.Clone(); this.Tis = tis; this.FieldInfos = fieldInfos; SkipInterval = tis.SkipInterval; MaxSkipLevels = tis.MaxSkipLevels; }
internal DirectPacked64SingleBlockReader(int bitsPerValue, int valueCount, IndexInput @in) : base(valueCount, bitsPerValue) { this.@in = @in; StartPointer = @in.FilePointer; ValuesPerBlock = 64 / bitsPerValue; Mask = ~(~0L << bitsPerValue); }
/*internal*/ public FieldsReader(Directory d, System.String segment, FieldInfos fn) { fieldInfos = fn; fieldsStream = d.OpenInput(segment + ".fdt"); indexStream = d.OpenInput(segment + ".fdx"); size = (int) (indexStream.Length() / 8); }
private int CheckValidFormat(IndexInput in_Renamed) { int format = in_Renamed.ReadInt(); if (format > TermVectorsWriter.FORMAT_VERSION) { throw new System.IO.IOException("Incompatible format version: " + format + " expected " + TermVectorsWriter.FORMAT_VERSION + " or less"); } return format; }
public void Read(IndexInput input, FieldInfos fieldInfos) { this.term = null; // invalidate cache int start = input.ReadVInt(); int length = input.ReadVInt(); int totalLength = start + length; SetTextLength(totalLength); input.ReadChars(this.text, start, length); this.field = fieldInfos.FieldName(input.ReadVInt()); }
/*protected internal*/ public SegmentTermDocs(SegmentReader parent) { this.parent = parent; this.freqStream = (IndexInput) parent.core.freqStream.Clone(); lock (parent) { this.deletedDocs = parent.deletedDocs; } this.skipInterval = parent.core.GetTermsReader().SkipInterval; this.maxSkipLevels = parent.core.GetTermsReader().MaxSkipLevels; }
public /*protected internal*/ SegmentTermDocs(SegmentReader parent) { this.parent = parent; this.freqStream = (IndexInput) parent.core.freqStream.Clone(); lock (parent) { this.deletedDocs = parent.deletedDocs; } this.skipInterval = parent.core.GetTermsReader().GetSkipInterval(); this.maxSkipLevels = parent.core.GetTermsReader().GetMaxSkipLevels(); }
public CompoundFileReader(Directory dir, System.String name, int readBufferSize) { directory = dir; fileName = name; this.readBufferSize = readBufferSize; bool success = false; try { stream = dir.OpenInput(name, readBufferSize); // read the directory and init files int count = stream.ReadVInt(); FileEntry entry = null; for (int i = 0; i < count; i++) { long offset = stream.ReadLong(); System.String id = stream.ReadString(); if (entry != null) { // set length of the previous entry entry.length = offset - entry.offset; } entry = new FileEntry(); entry.offset = offset; entries[id] = entry; } // set the length of the final entry if (entry != null) { entry.length = stream.Length() - entry.offset; } success = true; } finally { if (!success && (stream != null)) { try { stream.Close(); } catch (System.IO.IOException e) { } } } }
// Used only by clone private FieldsReader(FieldInfos fieldInfos, int numTotalDocs, int size, int format, int formatSize, int docStoreOffset, IndexInput cloneableFieldsStream, IndexInput cloneableIndexStream) { this.fieldInfos = fieldInfos; this.numTotalDocs = numTotalDocs; this.size = size; this.format = format; this.formatSize = formatSize; this.docStoreOffset = docStoreOffset; this.cloneableFieldsStream = cloneableFieldsStream; this.cloneableIndexStream = cloneableIndexStream; fieldsStream = (IndexInput) cloneableFieldsStream.Clone(); indexStream = (IndexInput) cloneableIndexStream.Clone(); }
public DirectPackedReader(int bitsPerValue, int valueCount, IndexInput @in) : base(valueCount, bitsPerValue) { this.@in = @in; StartPointer = @in.FilePointer; if (bitsPerValue == 64) { ValueMask = -1L; } else { ValueMask = (1L << bitsPerValue) - 1; } }
/*internal*/ public TermVectorsReader(Directory d, System.String segment, FieldInfos fieldInfos) { if (d.FileExists(segment + TermVectorsWriter.TVX_EXTENSION)) { tvx = d.OpenInput(segment + TermVectorsWriter.TVX_EXTENSION); CheckValidFormat(tvx); tvd = d.OpenInput(segment + TermVectorsWriter.TVD_EXTENSION); tvdFormat = CheckValidFormat(tvd); tvf = d.OpenInput(segment + TermVectorsWriter.TVF_EXTENSION); tvfFormat = CheckValidFormat(tvf); size = (int) tvx.Length() / 8; } this.fieldInfos = fieldInfos; }
internal SegmentTermEnum(IndexInput i, FieldInfos fis, bool isi) { input = i; fieldInfos = fis; isIndex = isi; int firstInt = input.ReadInt(); if (firstInt >= 0) { // original-format file, without explicit format version number format = 0; size = firstInt; // back-compatible settings indexInterval = 128; skipInterval = System.Int32.MaxValue; // switch off skipTo optimization } else { // we have a format version number format = firstInt; // check that it is a format we can understand if (format < TermInfosWriter.FORMAT) throw new System.IO.IOException("Unknown format version:" + format); size = input.ReadLong(); // read the size if (format == - 1) { if (!isIndex) { indexInterval = input.ReadInt(); formatM1SkipInterval = input.ReadInt(); } // switch off skipTo optimization for file format prior to 1.4rc2 in order to avoid a bug in // skipTo implementation of these versions skipInterval = System.Int32.MaxValue; } else { indexInterval = input.ReadInt(); skipInterval = input.ReadInt(); } } }
internal TermVectorsReader(Directory d, System.String segment, FieldInfos fieldInfos, int readBufferSize, int docStoreOffset, int size) { bool success = false; try { if (d.FileExists(segment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION)) { tvx = d.OpenInput(segment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION, readBufferSize); CheckValidFormat(tvx); tvd = d.OpenInput(segment + "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION, readBufferSize); tvdFormat = CheckValidFormat(tvd); tvf = d.OpenInput(segment + "." + IndexFileNames.VECTORS_FIELDS_EXTENSION, readBufferSize); tvfFormat = CheckValidFormat(tvf); if (- 1 == docStoreOffset) { this.docStoreOffset = 0; this.size = (int) (tvx.Length() >> 3); } else { this.docStoreOffset = docStoreOffset; this.size = size; // Verify the file is long enough to hold all of our // docs System.Diagnostics.Debug.Assert(((int) (tvx.Length() / 8)) >= size + docStoreOffset); } } this.fieldInfos = fieldInfos; success = true; } finally { // With lock-less commits, it's entirely possible (and // fine) to hit a FileNotFound exception above. In // this case, we want to explicitly close any subset // of things that were opened so that we don't have to // wait for a GC to do so. if (!success) { Close(); } } }
private IndexInput[] skipStream; // skipStream for each level #endregion Fields #region Constructors public MultiLevelSkipListReader(IndexInput skipStream, int maxSkipLevels, int skipInterval) { this.skipStream = new IndexInput[maxSkipLevels]; this.skipPointer = new long[maxSkipLevels]; this.childPointer = new long[maxSkipLevels]; this.numSkipped = new int[maxSkipLevels]; this.maxNumberOfSkipLevels = maxSkipLevels; this.skipInterval = new int[maxSkipLevels]; this.skipStream[0] = skipStream; this.inputIsBuffered = (skipStream is BufferedIndexInput); this.skipInterval[0] = skipInterval; for (int i = 1; i < maxSkipLevels; i++) { // cache skip intervals this.skipInterval[i] = this.skipInterval[i - 1] * skipInterval; } skipDoc = new int[maxSkipLevels]; }
/// <summary> /// Sole constructor. </summary> public Lucene40StoredFieldsReader(Directory d, SegmentInfo si, FieldInfos fn, IOContext context) { string segment = si.Name; bool success = false; FieldInfos = fn; try { FieldsStream = d.OpenInput(IndexFileNames.SegmentFileName(segment, "", Lucene40StoredFieldsWriter.FIELDS_EXTENSION), context); string indexStreamFN = IndexFileNames.SegmentFileName(segment, "", Lucene40StoredFieldsWriter.FIELDS_INDEX_EXTENSION); IndexStream = d.OpenInput(indexStreamFN, context); CodecUtil.CheckHeader(IndexStream, Lucene40StoredFieldsWriter.CODEC_NAME_IDX, Lucene40StoredFieldsWriter.VERSION_START, Lucene40StoredFieldsWriter.VERSION_CURRENT); CodecUtil.CheckHeader(FieldsStream, Lucene40StoredFieldsWriter.CODEC_NAME_DAT, Lucene40StoredFieldsWriter.VERSION_START, Lucene40StoredFieldsWriter.VERSION_CURRENT); Debug.Assert(Lucene40StoredFieldsWriter.HEADER_LENGTH_DAT == FieldsStream.FilePointer); Debug.Assert(Lucene40StoredFieldsWriter.HEADER_LENGTH_IDX == IndexStream.FilePointer); long indexSize = IndexStream.Length() - Lucene40StoredFieldsWriter.HEADER_LENGTH_IDX; this.Size_Renamed = (int)(indexSize >> 3); // Verify two sources of "maxDoc" agree: if (this.Size_Renamed != si.DocCount) { throw new CorruptIndexException("doc counts differ for segment " + segment + ": fieldsReader shows " + this.Size_Renamed + " but segmentInfo shows " + si.DocCount); } NumTotalDocs = (int)(indexSize >> 3); success = true; } finally { // With lock-less commits, it's entirely possible (and // fine) to hit a FileNotFound exception above. In // this case, we want to explicitly close any subset // of things that were opened so that we don't have to // wait for a GC to do so. if (!success) { try { Dispose(); } // ensure we throw our original exception catch (Exception) { } } } }
internal FieldsReader(Directory d, System.String segment, FieldInfos fn, int readBufferSize, int docStoreOffset, int size) { bool success = false; try { fieldInfos = fn; cloneableFieldsStream = d.OpenInput(segment + ".fdt", readBufferSize); fieldsStream = (IndexInput) cloneableFieldsStream.Clone(); indexStream = d.OpenInput(segment + ".fdx", readBufferSize); if (docStoreOffset != - 1) { // We read only a slice out of this shared fields file this.docStoreOffset = docStoreOffset; this.size = size; // Verify the file is long enough to hold all of our // docs System.Diagnostics.Debug.Assert(((int)(indexStream.Length() / 8)) >= size + this.docStoreOffset); } else { this.docStoreOffset = 0; this.size = (int) (indexStream.Length() >> 3); } numTotalDocs = (int) (indexStream.Length() >> 3); success = true; } finally { // With lock-less commits, it's entirely possible (and // fine) to hit a FileNotFound exception above. In // this case, we want to explicitly close any subset // of things that were opened so that we don't have to // wait for a GC to do so. if (!success) { Close(); } } }
private void AssertSameStreams(System.String msg, IndexInput expected, IndexInput test) { Assert.IsNotNull(expected, msg + " null expected"); Assert.IsNotNull(test, msg + " null test"); Assert.AreEqual(expected.Length(), test.Length(), msg + " length"); Assert.AreEqual(expected.FilePointer, test.FilePointer, msg + " position"); byte[] expectedBuffer = new byte[512]; byte[] testBuffer = new byte[expectedBuffer.Length]; long remainder = expected.Length() - expected.FilePointer; while (remainder > 0) { int readLen = (int)System.Math.Min(remainder, expectedBuffer.Length); expected.ReadBytes(expectedBuffer, 0, readLen); test.ReadBytes(testBuffer, 0, readLen); AssertEqualArrays(msg + ", remainder " + remainder, expectedBuffer, testBuffer, 0, readLen); remainder -= readLen; } }
public virtual void TestSingleFile() { int[] data = new int[] { 0, 1, 10, 100 }; for (int i = 0; i < data.Length; i++) { System.String name = "t" + data[i]; CreateSequenceFile(dir, name, (byte)0, data[i]); CompoundFileWriter csw = new CompoundFileWriter(dir, name + ".cfs"); csw.AddFile(name); csw.Close(); CompoundFileReader csr = new CompoundFileReader(dir, name + ".cfs"); IndexInput expected = dir.OpenInput(name); IndexInput actual = csr.OpenInput(name); AssertSameStreams(name, expected, actual); AssertSameSeekBehavior(name, expected, actual); expected.Close(); actual.Close(); csr.Close(); } }
/// <summary>Constructs a bit vector from the file <code>name</code> in Directory /// <code>d</code>, as written by the {@link #write} method. /// </summary> public BitVector(Directory d, System.String name) { IndexInput input = d.OpenInput(name); try { size = input.ReadInt(); // read size if (size == -1) { ReadDgaps(input); } else { ReadBits(input); } } finally { input.Close(); } }
/** Copy numBytes from srcIn to destIn */ void copyBytes(IndexInput srcIn, IndexOutput destIn, long numBytes) { // TODO: we could do this more efficiently (save a copy) // because it's always from a ByteSliceReader -> // IndexOutput while (numBytes > 0) { int chunk; if (numBytes > 4096) { chunk = 4096; } else { chunk = (int)numBytes; } srcIn.ReadBytes(copyByteBuffer, 0, chunk); destIn.WriteBytes(copyByteBuffer, 0, chunk); numBytes -= chunk; } }
/// <summary>Copy the contents of the file with specified extension into the /// provided output stream. Use the provided buffer for moving data /// to reduce memory allocation. /// </summary> private void CopyFile(FileEntry source, IndexOutput os, byte[] buffer) { IndexInput isRenamed = null; try { long startPtr = os.FilePointer; isRenamed = directory.OpenInput(source.file); long length = isRenamed.Length(); long remainder = length; int chunk = buffer.Length; while (remainder > 0) { var len = (int) Math.Min(chunk, remainder); isRenamed.ReadBytes(buffer, 0, len, false); os.WriteBytes(buffer, len); remainder -= len; if (checkAbort != null) // Roughly every 2 MB we will check if // it's time to abort checkAbort.Work(80); } // Verify that remainder is 0 if (remainder != 0) throw new System.IO.IOException("Non-zero remainder length after copying: " + remainder + " (id: " + source.file + ", length: " + length + ", buffer size: " + chunk + ")"); // Verify that the output length diff is equal to original file long endPtr = os.FilePointer; long diff = endPtr - startPtr; if (diff != length) throw new System.IO.IOException("Difference in the output file offsets " + diff + " does not match the original file length " + length); } finally { if (isRenamed != null) isRenamed.Close(); } }
private void Demo_FSIndexInputBug(Directory fsdir, System.String file) { // Setup the test file - we need more than 1024 bytes IndexOutput os = fsdir.CreateOutput(file); for (int i = 0; i < 2000; i++) { os.WriteByte((byte)i); } os.Close(); IndexInput in_Renamed = fsdir.OpenInput(file); // This read primes the buffer in IndexInput byte b = in_Renamed.ReadByte(); // Close the file in_Renamed.Close(); // ERROR: this call should fail, but succeeds because the buffer // is still filled b = in_Renamed.ReadByte(); // ERROR: this call should fail, but succeeds for some reason as well in_Renamed.Seek(1099); try { // OK: this call correctly fails. We are now past the 1024 internal // buffer, so an actual IO is attempted, which fails b = in_Renamed.ReadByte(); Assert.Fail("expected readByte() to throw exception"); } catch (System.Exception e) { // expected exception } }
private void Initialize(SegmentInfo si) { segment = si.name; // Use compound file directory for some files, if it exists Directory cfsDir = Directory(); if (Directory().FileExists(segment + ".cfs")) { cfsReader = new CompoundFileReader(Directory(), segment + ".cfs"); cfsDir = cfsReader; } // No compound file exists - use the multi-file format fieldInfos = new FieldInfos(cfsDir, segment + ".fnm"); fieldsReader = new FieldsReader(cfsDir, segment, fieldInfos); tis = new TermInfosReader(cfsDir, segment, fieldInfos); // NOTE: the bitvector is stored using the regular directory, not cfs if (HasDeletions(si)) { deletedDocs = new BitVector(Directory(), segment + ".del"); } // make sure that all index files have been read or are kept open // so that if an index update removes them we'll still have them freqStream = cfsDir.OpenInput(segment + ".frq"); proxStream = cfsDir.OpenInput(segment + ".prx"); OpenNorms(cfsDir); if (fieldInfos.HasVectors()) { // open term vector files only as needed termVectorsReaderOrig = new TermVectorsReader(cfsDir, segment, fieldInfos); } }
// It is not always neccessary to move the prox pointer // to a new document after the freq pointer has been moved. // Consider for example a phrase query with two terms: // the freq pointer for term 1 has to move to document x // to answer the question if the term occurs in that document. But // only if term 2 also matches document x, the positions have to be // read to figure out if term 1 and term 2 appear next // to each other in document x and thus satisfy the query. // So we move the prox pointer lazily to the document // as soon as positions are requested. private void LazySkip() { if (proxStream == null) { // clone lazily proxStream = (IndexInput)parent.core.proxStream.Clone(); } // we might have to skip the current payload // if it was not read yet SkipPayload(); if (lazySkipPointer != -1) { proxStream.Seek(lazySkipPointer); lazySkipPointer = -1; } if (lazySkipProxCount != 0) { SkipPositions(lazySkipProxCount); lazySkipProxCount = 0; } }
public virtual void TestReadPastEOF() { SetUp_2(); CompoundFileReader cr = new CompoundFileReader(dir, "f.comp"); IndexInput is_Renamed = cr.OpenInput("f2"); is_Renamed.Seek(is_Renamed.Length() - 10); byte[] b = new byte[100]; is_Renamed.ReadBytes(b, 0, 10); try { byte test = is_Renamed.ReadByte(); Assert.Fail("Single byte read past end of file"); } catch (System.IO.IOException e) { /* success */ //System.out.println("SUCCESS: single byte read past end of file: " + e); } is_Renamed.Seek(is_Renamed.Length() - 10); try { is_Renamed.ReadBytes(b, 0, 50); Assert.Fail("Block read past end of file"); } catch (System.IO.IOException e) { /* success */ //System.out.println("SUCCESS: block read past end of file: " + e); } is_Renamed.Close(); cr.Close(); }
public override System.Object DoBody(System.String segmentFileName) { IndexInput input = directory.OpenInput(segmentFileName); int format = 0; long version = 0; try { format = input.ReadInt(); if (format < 0) { if (format < Lucene.Net.Index.SegmentInfos.FORMAT_SINGLE_NORM_FILE) { throw new System.IO.IOException("Unknown format version: " + format); } version = input.ReadLong(); // read version } } finally { input.Close(); } if (format < 0) { return((long)version); } // We cannot be sure about the format of the file. // Therefore we have to read the whole file and cannot simply seek to the version entry. SegmentInfos sis = new SegmentInfos(); sis.Read(directory, segmentFileName); return((long)sis.GetVersion()); }
/// <summary> Current version number from segments file.</summary> public static long ReadCurrentVersion(Directory directory) { IndexInput input = directory.OpenInput(IndexFileNames.SEGMENTS); int format = 0; long version = 0; try { format = input.ReadInt(); if (format < 0) { if (format < FORMAT) { throw new System.IO.IOException("Unknown format version: " + format); } version = input.ReadLong(); // read version } } finally { input.Close(); } if (format < 0) { return(version); } // We cannot be sure about the format of the file. // Therefore we have to read the whole file and cannot simply seek to the version entry. SegmentInfos sis = new SegmentInfos(); sis.Read(directory); return(sis.GetVersion()); }
private System.Collections.ArrayList ReadDeleteableFiles() { System.Collections.ArrayList result = System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(10)); if (!directory.FileExists(IndexFileNames.DELETABLE)) { return(result); } IndexInput input = directory.OpenInput(IndexFileNames.DELETABLE); try { for (int i = input.ReadInt(); i > 0; i--) { // read file names result.Add(input.ReadString()); } } finally { input.Close(); } return(result); }
private void Initialize(SegmentInfo si) { segment = si.name; this.si = si; bool success = false; try { // Use compound file directory for some files, if it exists Directory cfsDir = Directory(); if (si.GetUseCompoundFile()) { cfsReader = new CompoundFileReader(Directory(), segment + ".cfs"); cfsDir = cfsReader; } // No compound file exists - use the multi-file format fieldInfos = new FieldInfos(cfsDir, segment + ".fnm"); fieldsReader = new FieldsReader(cfsDir, segment, fieldInfos); // Verify two sources of "maxDoc" agree: if (fieldsReader.Size() != si.docCount) { throw new System.SystemException("doc counts differ for segment " + si.name + ": fieldsReader shows " + fieldsReader.Size() + " but segmentInfo shows " + si.docCount); } tis = new TermInfosReader(cfsDir, segment, fieldInfos); // NOTE: the bitvector is stored using the regular directory, not cfs if (HasDeletions(si)) { deletedDocs = new BitVector(Directory(), si.GetDelFileName()); // Verify # deletes does not exceed maxDoc for this segment: if (deletedDocs.Count() > MaxDoc()) { throw new System.SystemException("number of deletes (" + deletedDocs.Count() + ") exceeds max doc (" + MaxDoc() + ") for segment " + si.name); } } // make sure that all index files have been read or are kept open // so that if an index update removes them we'll still have them freqStream = cfsDir.OpenInput(segment + ".frq"); proxStream = cfsDir.OpenInput(segment + ".prx"); OpenNorms(cfsDir); if (fieldInfos.HasVectors()) { // open term vector files only as needed termVectorsReaderOrig = new TermVectorsReader(cfsDir, segment, fieldInfos); } success = true; } finally { // With lock-less commits, it's entirely possible (and // fine) to hit a FileNotFound exception above. In // this case, we want to explicitly close any subset // of things that were opened so that we don't have to // wait for a GC to do so. if (!success) { DoClose(); } } }
internal CSIndexInput(IndexInput base_Renamed, long fileOffset, long length) { this.base_Renamed = base_Renamed; this.fileOffset = fileOffset; this.length = length; }
internal CountingStream(TestMultiLevelSkipList enclosingInstance, IndexInput input) { InitBlock(enclosingInstance); this.input = input; }
internal SegmentTermPositions(SegmentReader p) : base(p) { this.proxStream = (IndexInput)parent.proxStream.Clone(); }
/// <summary>Read as a bit set </summary> private void ReadBits(IndexInput input) { count = input.ReadInt(); // read count bits = new byte[(size >> 3) + 1]; // allocate bits input.ReadBytes(bits, 0, bits.Length); }
/// <summary> Construct a new SegmentInfo instance by reading a /// previously saved SegmentInfo from input. /// /// </summary> /// <param name="dir">directory to load from /// </param> /// <param name="format">format of the segments info file /// </param> /// <param name="input">input handle to read segment info from /// </param> internal SegmentInfo(Directory dir, int format, IndexInput input) { this.dir = dir; name = input.ReadString(); docCount = input.ReadInt(); if (format <= SegmentInfos.FORMAT_LOCKLESS) { delGen = input.ReadLong(); if (format <= SegmentInfos.FORMAT_SHARED_DOC_STORE) { docStoreOffset = input.ReadInt(); if (docStoreOffset != -1) { docStoreSegment = input.ReadString(); docStoreIsCompoundFile = (1 == input.ReadByte()); } else { docStoreSegment = name; docStoreIsCompoundFile = false; } } else { docStoreOffset = -1; docStoreSegment = name; docStoreIsCompoundFile = false; } if (format <= SegmentInfos.FORMAT_SINGLE_NORM_FILE) { hasSingleNormFile = (1 == input.ReadByte()); } else { hasSingleNormFile = false; } int numNormGen = input.ReadInt(); if (numNormGen == NO) { normGen = null; } else { normGen = new long[numNormGen]; for (int j = 0; j < numNormGen; j++) { normGen[j] = input.ReadLong(); } } isCompoundFile = (sbyte)input.ReadByte(); preLockless = (isCompoundFile == CHECK_DIR); if (format <= SegmentInfos.FORMAT_DEL_COUNT) { delCount = input.ReadInt(); System.Diagnostics.Debug.Assert(delCount <= docCount); } else { delCount = -1; } if (format <= SegmentInfos.FORMAT_HAS_PROX) { hasProx = input.ReadByte() == 1; } else { hasProx = true; } if (format <= SegmentInfos.FORMAT_DIAGNOSTICS) { diagnostics = input.ReadStringStringMap(); } else { diagnostics = new Dictionary <string, string>(); } } else { delGen = CHECK_DIR; normGen = null; isCompoundFile = (sbyte)(CHECK_DIR); preLockless = true; hasSingleNormFile = false; docStoreOffset = -1; docStoreIsCompoundFile = false; docStoreSegment = null; delCount = -1; hasProx = true; diagnostics = new Dictionary <string, string>(); } }
public System.Object run() { System.String segmentFileName = null; long lastGen = -1; long gen = 0; int genLookaheadCount = 0; System.IO.IOException exc = null; bool retry = false; int method = 0; // Loop until we succeed in calling doBody() without // hitting an IOException. An IOException most likely // means a commit was in process and has finished, in // the time it took us to load the now-old infos files // (and segments files). It's also possible it's a // true error (corrupt index). To distinguish these, // on each retry we must see "forward progress" on // which generation we are trying to load. If we // don't, then the original error is real and we throw // it. // We have three methods for determining the current // generation. We try each in sequence. while (true) { // Method 1: list the directory and use the highest // segments_N file. This method works well as long // as there is no stale caching on the directory // contents: System.String[] files = null; if (0 == method) { if (directory != null) { files = directory.List(); } else { files = System.IO.Directory.GetFileSystemEntries(fileDirectory.FullName); for (int i = 0; i < files.Length; i++) { files[i] = System.IO.Path.GetFileName(files[i]); } } gen = Lucene.Net.Index.SegmentInfos.GetCurrentSegmentGeneration(files); if (gen == -1) { System.String s = ""; for (int i = 0; i < files.Length; i++) { s += (" " + files[i]); } throw new System.IO.FileNotFoundException("no segments* file found: files:" + s); } } // Method 2 (fallback if Method 1 isn't reliable): // if the directory listing seems to be stale, then // try loading the "segments.gen" file. if (1 == method || (0 == method && lastGen == gen && retry)) { method = 1; for (int i = 0; i < Lucene.Net.Index.SegmentInfos.defaultGenFileRetryCount; i++) { IndexInput genInput = null; try { genInput = directory.OpenInput(IndexFileNames.SEGMENTS_GEN); } catch (System.IO.IOException e) { Lucene.Net.Index.SegmentInfos.Message("segments.gen open: IOException " + e); } if (genInput != null) { try { int version = genInput.ReadInt(); if (version == Lucene.Net.Index.SegmentInfos.FORMAT_LOCKLESS) { long gen0 = genInput.ReadLong(); long gen1 = genInput.ReadLong(); Lucene.Net.Index.SegmentInfos.Message("fallback check: " + gen0 + "; " + gen1); if (gen0 == gen1) { // The file is consistent. if (gen0 > gen) { Lucene.Net.Index.SegmentInfos.Message("fallback to '" + IndexFileNames.SEGMENTS_GEN + "' check: now try generation " + gen0 + " > " + gen); gen = gen0; } break; } } } catch (System.IO.IOException err2) { // will retry } finally { genInput.Close(); } } try { System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * Lucene.Net.Index.SegmentInfos.defaultGenFileRetryPauseMsec)); } catch (System.Threading.ThreadInterruptedException e) { // will retry } } } // Method 3 (fallback if Methods 2 & 3 are not // reliable): since both directory cache and file // contents cache seem to be stale, just advance the // generation. if (2 == method || (1 == method && lastGen == gen && retry)) { method = 2; if (genLookaheadCount < Lucene.Net.Index.SegmentInfos.defaultGenLookaheadCount) { gen++; genLookaheadCount++; Lucene.Net.Index.SegmentInfos.Message("look ahead increment gen to " + gen); } } if (lastGen == gen) { // This means we're about to try the same // segments_N last tried. This is allowed, // exactly once, because writer could have been in // the process of writing segments_N last time. if (retry) { // OK, we've tried the same segments_N file // twice in a row, so this must be a real // error. We throw the original exception we // got. throw exc; } else { retry = true; } } else { // Segment file has advanced since our last loop, so // reset retry: retry = false; } lastGen = gen; segmentFileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen); try { System.Object v = DoBody(segmentFileName); if (exc != null) { Lucene.Net.Index.SegmentInfos.Message("success on " + segmentFileName); } return(v); } catch (System.IO.IOException err) { // Save the original root cause: if (exc == null) { exc = err; } Lucene.Net.Index.SegmentInfos.Message("primary Exception on '" + segmentFileName + "': " + err + "'; will retry: retry=" + retry + "; gen = " + gen); if (!retry && gen > 1) { // This is our first time trying this segments // file (because retry is false), and, there is // possibly a segments_(N-1) (because gen > 1). // So, check if the segments_(N-1) exists and // try it if so: System.String prevSegmentFileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen - 1); if (directory.FileExists(prevSegmentFileName)) { Lucene.Net.Index.SegmentInfos.Message("fallback to prior segment file '" + prevSegmentFileName + "'"); try { System.Object v = DoBody(prevSegmentFileName); if (exc != null) { Lucene.Net.Index.SegmentInfos.Message("success on fallback " + prevSegmentFileName); } return(v); } catch (System.IO.IOException err2) { Lucene.Net.Index.SegmentInfos.Message("secondary Exception on '" + prevSegmentFileName + "': " + err2 + "'; will retry"); } } } } } }
internal static bool IsCSIndexInput(IndexInput is_Renamed) { return(is_Renamed is CompoundFileReader.CSIndexInput); }
internal CSIndexInput(IndexInput base_Renamed, long fileOffset, long length, int readBufferSize) : base(readBufferSize) { this.base_Renamed = (IndexInput)base_Renamed.Clone(); this.fileOffset = fileOffset; this.length = length; }
internal CSIndexInput(IndexInput base_Renamed, long fileOffset, long length) : this(base_Renamed, fileOffset, length, BufferedIndexInput.BUFFER_SIZE) { }
internal SkipBuffer(IndexInput input, int length) { data = new byte[length]; pointer = input.GetFilePointer(); input.ReadBytes(data, 0, length); }
/// <summary> Subclasses must implement the actual skip data encoding in this method. /// /// </summary> /// <param name="level">the level skip data shall be read from /// </param> /// <param name="skipStream">the skip stream to read from /// </param> protected internal abstract int ReadSkipData(int level, IndexInput skipStream);
// Used only by clone private FieldsReader(FieldInfos fieldInfos, int numTotalDocs, int size, int format, int formatSize, int docStoreOffset, IndexInput cloneableFieldsStream, IndexInput cloneableIndexStream) { this.fieldInfos = fieldInfos; this.numTotalDocs = numTotalDocs; this.size = size; this.format = format; this.formatSize = formatSize; this.docStoreOffset = docStoreOffset; this.cloneableFieldsStream = cloneableFieldsStream; this.cloneableIndexStream = cloneableIndexStream; fieldsStream = (IndexInput)cloneableFieldsStream.Clone(); indexStream = (IndexInput)cloneableIndexStream.Clone(); }
internal FaultyIndexInput(IndexInput delegate_Renamed) { this.delegate_Renamed = delegate_Renamed; }
/// <summary> Read a particular segmentFileName. Note that this may /// throw an IOException if a commit is in process. /// /// </summary> /// <param name="directory">-- directory containing the segments file /// </param> /// <param name="segmentFileName">-- segment file to load /// </param> public void Read(Directory directory, System.String segmentFileName) { bool success = false; IndexInput input = directory.OpenInput(segmentFileName); if (segmentFileName.Equals(IndexFileNames.SEGMENTS)) { generation = 0; } else { #if !PRE_LUCENE_NET_2_0_0_COMPATIBLE generation = Lucene.Net.Documents.NumberTools.ToLong(segmentFileName.Substring(1 + IndexFileNames.SEGMENTS.Length)); #else generation = System.Convert.ToInt64(segmentFileName.Substring(1 + IndexFileNames.SEGMENTS.Length), 16); #endif } lastGeneration = generation; try { int format = input.ReadInt(); if (format < 0) { // file contains explicit format info // check that it is a format we can understand if (format < FORMAT_SINGLE_NORM_FILE) { throw new System.IO.IOException("Unknown format version: " + format); } version = input.ReadLong(); // read version counter = input.ReadInt(); // read counter } else { // file is in old format without explicit format info counter = format; } for (int i = input.ReadInt(); i > 0; i--) { // read segmentInfos Add(new SegmentInfo(directory, format, input)); } if (format >= 0) { // in old format the version number may be at the end of the file if (input.GetFilePointer() >= input.Length()) { version = System.DateTime.Now.Millisecond; } // old file format without version number else { version = input.ReadLong(); // read version } } success = true; } finally { input.Close(); if (!success) { // Clear any segment infos we had loaded so we // have a clean slate on retry: Clear(); } } }
/// <summary>Optimized implementation. </summary> public virtual bool SkipTo(int target) { if (df >= skipInterval) { // optimized case if (skipStream == null) { skipStream = (IndexInput)freqStream.Clone(); // lazily clone } if (!haveSkipped) { // lazily seek skip stream skipStream.Seek(skipPointer); haveSkipped = true; } // scan skip data int lastSkipDoc = skipDoc; long lastFreqPointer = freqStream.GetFilePointer(); long lastProxPointer = -1; int numSkipped = -1 - (count % skipInterval); while (target > skipDoc) { lastSkipDoc = skipDoc; lastFreqPointer = freqPointer; lastProxPointer = proxPointer; if (skipDoc != 0 && skipDoc >= doc) { numSkipped += skipInterval; } if (skipCount >= numSkips) { break; } skipDoc += skipStream.ReadVInt(); freqPointer += skipStream.ReadVInt(); proxPointer += skipStream.ReadVInt(); skipCount++; } // if we found something to skip, then skip it if (lastFreqPointer > freqStream.GetFilePointer()) { freqStream.Seek(lastFreqPointer); SkipProx(lastProxPointer); doc = lastSkipDoc; count += numSkipped; } } // done skipping, now just scan do { if (!Next()) { return(false); } }while (target > doc); return(true); }
private void Read(IndexInput input, System.String fileName) { int firstInt = input.ReadVInt(); if (firstInt < 0) { // This is a real format format = firstInt; } else { format = FORMAT_PRE; } if (format != FORMAT_PRE & format != FORMAT_START) { throw new CorruptIndexException("unrecognized format " + format + " in file \"" + fileName + "\""); } int size; if (format == FORMAT_PRE) { size = firstInt; } else { size = input.ReadVInt(); //read in the size } for (int i = 0; i < size; i++) { System.String name = StringHelper.Intern(input.ReadString()); byte bits = input.ReadByte(); bool isIndexed = (bits & IS_INDEXED) != 0; bool storeTermVector = (bits & STORE_TERMVECTOR) != 0; bool storePositionsWithTermVector = (bits & STORE_POSITIONS_WITH_TERMVECTOR) != 0; bool storeOffsetWithTermVector = (bits & STORE_OFFSET_WITH_TERMVECTOR) != 0; bool omitNorms = (bits & OMIT_NORMS) != 0; bool storePayloads = (bits & STORE_PAYLOADS) != 0; bool omitTermFreqAndPositions = (bits & OMIT_TERM_FREQ_AND_POSITIONS) != 0; AddInternal(name, isIndexed, storeTermVector, storePositionsWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions); } if (input.GetFilePointer() != input.Length()) { throw new CorruptIndexException("did not read all bytes from file \"" + fileName + "\": read " + input.GetFilePointer() + " vs size " + input.Length()); } }
public virtual void TestRandomAccessClones() { SetUp_2(); CompoundFileReader cr = new CompoundFileReader(dir, "f.comp"); // Open two files IndexInput e1 = cr.OpenInput("f11"); IndexInput e2 = cr.OpenInput("f3"); IndexInput a1 = (IndexInput)e1.Clone(); IndexInput a2 = (IndexInput)e2.Clone(); // Seek the first pair e1.Seek(100); a1.Seek(100); Assert.AreEqual(100, e1.GetFilePointer()); Assert.AreEqual(100, a1.GetFilePointer()); byte be1 = e1.ReadByte(); byte ba1 = a1.ReadByte(); Assert.AreEqual(be1, ba1); // Now seek the second pair e2.Seek(1027); a2.Seek(1027); Assert.AreEqual(1027, e2.GetFilePointer()); Assert.AreEqual(1027, a2.GetFilePointer()); byte be2 = e2.ReadByte(); byte ba2 = a2.ReadByte(); Assert.AreEqual(be2, ba2); // Now make sure the first one didn't move Assert.AreEqual(101, e1.GetFilePointer()); Assert.AreEqual(101, a1.GetFilePointer()); be1 = e1.ReadByte(); ba1 = a1.ReadByte(); Assert.AreEqual(be1, ba1); // Now more the first one again, past the buffer length e1.Seek(1910); a1.Seek(1910); Assert.AreEqual(1910, e1.GetFilePointer()); Assert.AreEqual(1910, a1.GetFilePointer()); be1 = e1.ReadByte(); ba1 = a1.ReadByte(); Assert.AreEqual(be1, ba1); // Now make sure the second set didn't move Assert.AreEqual(1028, e2.GetFilePointer()); Assert.AreEqual(1028, a2.GetFilePointer()); be2 = e2.ReadByte(); ba2 = a2.ReadByte(); Assert.AreEqual(be2, ba2); // Move the second set back, again cross the buffer size e2.Seek(17); a2.Seek(17); Assert.AreEqual(17, e2.GetFilePointer()); Assert.AreEqual(17, a2.GetFilePointer()); be2 = e2.ReadByte(); ba2 = a2.ReadByte(); Assert.AreEqual(be2, ba2); // Finally, make sure the first set didn't move // Now make sure the first one didn't move Assert.AreEqual(1911, e1.GetFilePointer()); Assert.AreEqual(1911, a1.GetFilePointer()); be1 = e1.ReadByte(); ba1 = a1.ReadByte(); Assert.AreEqual(be1, ba1); e1.Close(); e2.Close(); a1.Close(); a2.Close(); cr.Close(); }
protected override void Dispose(bool disposing) { if (isDisposed) return; if (disposing) { if (delegate_Renamed != null) { delegate_Renamed.Close(); } } delegate_Renamed = null; isDisposed = true; }
internal FieldsReader(Directory d, System.String segment, FieldInfos fn, int readBufferSize, int docStoreOffset, int size) { bool success = false; isOriginal = true; try { fieldInfos = fn; cloneableFieldsStream = d.OpenInput(segment + "." + IndexFileNames.FIELDS_EXTENSION, readBufferSize); cloneableIndexStream = d.OpenInput(segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION, readBufferSize); // First version of fdx did not include a format // header, but, the first int will always be 0 in that // case int firstInt = cloneableIndexStream.ReadInt(); format = firstInt == 0 ? 0 : firstInt; if (format > FieldsWriter.FORMAT_CURRENT) { throw new CorruptIndexException("Incompatible format version: " + format + " expected " + FieldsWriter.FORMAT_CURRENT + " or lower"); } formatSize = format > FieldsWriter.FORMAT ? 4 : 0; if (format < FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES) { cloneableFieldsStream.SetModifiedUTF8StringsMode(); } fieldsStream = (IndexInput)cloneableFieldsStream.Clone(); long indexSize = cloneableIndexStream.Length() - formatSize; if (docStoreOffset != -1) { // We read only a slice out of this shared fields file this.docStoreOffset = docStoreOffset; this.size = size; // Verify the file is long enough to hold all of our // docs System.Diagnostics.Debug.Assert(((int)(indexSize / 8)) >= size + this.docStoreOffset, "indexSize=" + indexSize + " size=" + size + " docStoreOffset=" + docStoreOffset); } else { this.docStoreOffset = 0; this.size = (int)(indexSize >> 3); } indexStream = (IndexInput)cloneableIndexStream.Clone(); numTotalDocs = (int)(indexSize >> 3); success = true; } finally { // With lock-less commits, it's entirely possible (and // fine) to hit a FileNotFound exception above. In // this case, we want to explicitly close any subset // of things that were opened so that we don't have to // wait for a GC to do so. if (!success) { Dispose(); } } }
internal SegmentTermEnum(IndexInput i, FieldInfos fis, bool isi) { input = i; fieldInfos = fis; isIndex = isi; maxSkipLevels = 1; // use single-level skip lists for formats > -3 int firstInt = input.ReadInt(); if (firstInt >= 0) { // original-format file, without explicit format version number format = 0; size = firstInt; // back-compatible settings indexInterval = 128; skipInterval = System.Int32.MaxValue; // switch off skipTo optimization } else { // we have a format version number format = firstInt; // check that it is a format we can understand if (format < TermInfosWriter.FORMAT_CURRENT) { throw new CorruptIndexException("Unknown format version:" + format + " expected " + TermInfosWriter.FORMAT_CURRENT + " or higher"); } size = input.ReadLong(); // read the size if (format == -1) { if (!isIndex) { indexInterval = input.ReadInt(); formatM1SkipInterval = input.ReadInt(); } // switch off skipTo optimization for file format prior to 1.4rc2 in order to avoid a bug in // skipTo implementation of these versions skipInterval = System.Int32.MaxValue; } else { indexInterval = input.ReadInt(); skipInterval = input.ReadInt(); if (format <= TermInfosWriter.FORMAT) { // this new format introduces multi-level skipping maxSkipLevels = input.ReadInt(); } } } if (format > TermInfosWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES) { termBuffer.SetPreUTF8Strings(); scanBuffer.SetPreUTF8Strings(); prevBuffer.SetPreUTF8Strings(); } }
internal FieldsReader(Directory d, System.String segment, FieldInfos fn, int readBufferSize, int docStoreOffset, int size) { bool success = false; isOriginal = true; try { fieldInfos = fn; cloneableFieldsStream = d.OpenInput(segment + "." + IndexFileNames.FIELDS_EXTENSION, readBufferSize); cloneableIndexStream = d.OpenInput(segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION, readBufferSize); // First version of fdx did not include a format // header, but, the first int will always be 0 in that // case int firstInt = cloneableIndexStream.ReadInt(); format = firstInt == 0 ? 0 : firstInt; if (format > FieldsWriter.FORMAT_CURRENT) throw new CorruptIndexException("Incompatible format version: " + format + " expected " + FieldsWriter.FORMAT_CURRENT + " or lower"); formatSize = format > FieldsWriter.FORMAT ? 4 : 0; if (format < FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES) cloneableFieldsStream.SetModifiedUTF8StringsMode(); fieldsStream = (IndexInput) cloneableFieldsStream.Clone(); long indexSize = cloneableIndexStream.Length() - formatSize; if (docStoreOffset != - 1) { // We read only a slice out of this shared fields file this.docStoreOffset = docStoreOffset; this.size = size; // Verify the file is long enough to hold all of our // docs System.Diagnostics.Debug.Assert(((int)(indexSize / 8)) >= size + this.docStoreOffset, "indexSize=" + indexSize + " size=" + size + " docStoreOffset=" + docStoreOffset); } else { this.docStoreOffset = 0; this.size = (int) (indexSize >> 3); } indexStream = (IndexInput) cloneableIndexStream.Clone(); numTotalDocs = (int) (indexSize >> 3); success = true; } finally { // With lock-less commits, it's entirely possible (and // fine) to hit a FileNotFound exception above. In // this case, we want to explicitly close any subset // of things that were opened so that we don't have to // wait for a GC to do so. if (!success) { Dispose(); } } }