public TermVectorsWriter(Directory directory, System.String segment, FieldInfos fieldInfos, IState state) { // Open files for TermVector storage tvx = directory.CreateOutput(segment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION, state); tvx.WriteInt(TermVectorsReader.FORMAT_CURRENT); tvd = directory.CreateOutput(segment + "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION, state); tvd.WriteInt(TermVectorsReader.FORMAT_CURRENT); tvf = directory.CreateOutput(segment + "." + IndexFileNames.VECTORS_FIELDS_EXTENSION, state); tvf.WriteInt(TermVectorsReader.FORMAT_CURRENT); this.fieldInfos = fieldInfos; }
/// <summary>. </summary> public void SyncFile(Lucene.Net.Store.Directory directory, string fileName, bool CompressBlobs) { Trace.WriteLine($"INFO Syncing file {fileName} for {_rootFolderName}"); // then we will get it fresh into local deflatedName // StreamOutput deflatedStream = new StreamOutput(CacheDirectory.CreateOutput(deflatedName)); // seek back to begininng if (ShouldCompressFile(fileName, CompressBlobs)) { using (var deflatedStream = new MemoryStream()) { #if FULLDEBUG Trace.WriteLine($"GET {fileName} RETREIVED {deflatedStream.Length} bytes"); #endif // get the deflated blob blob.DownloadTo(deflatedStream); deflatedStream.Seek(0, SeekOrigin.Begin); // open output file for uncompressed contents using (var fileStream = new StreamOutput(directory.CreateOutput(fileName))) using (var decompressor = new DeflateStream(deflatedStream, CompressionMode.Decompress)) { var bytes = new byte[65535]; var nRead = 0; do { nRead = decompressor.Read(bytes, 0, 65535); if (nRead > 0) { fileStream.Write(bytes, 0, nRead); } } while (nRead == 65535); } } } else { using (var fileStream = new StreamOutput(directory.CreateOutput(fileName))) { // get the blob blob.DownloadTo(fileStream); fileStream.Flush(); #if FULLDEBUG Trace.WriteLine($"GET {fileName} RETREIVED {fileStream.Length} bytes"); #endif } } }
private void WriteIndexVersion(LuceneDirectory directory) { using (var indexOutput = directory.CreateOutput(IndexVersionFilename)) { indexOutput.WriteString(IndexVersion); indexOutput.Flush(); } }
/// <summary> /// Creates a new, empty file in the directory with the given name. /// Returns a stream writing this file. /// </summary> public override IndexOutput CreateOutput(string name) { //write to both indexes return(new MultiIndexOutput( base.CreateOutput(name), _realDirectory.CreateOutput(name))); }
private static void UnidirectionalSync(AzureDirectory sourceDirectory, Directory destinationDirectory) { var sourceFiles = sourceDirectory.ListAll(); var fileNameFilter = IndexFileNameFilter.Filter; byte[] buffer = new byte[16384]; foreach (string sourceFile in sourceFiles) { // only copy file if it is accepted by Lucene's default filter // and it does not already exist (except for segment map files, we always want those) if (fileNameFilter.Accept(null, sourceFile) && (!destinationDirectory.FileExists(sourceFile) || sourceFile.StartsWith("segment"))) { IndexOutput indexOutput = null; IndexInput indexInput = null; try { indexOutput = destinationDirectory.CreateOutput(sourceFile); indexInput = sourceDirectory.OpenInput(sourceFile); long length = indexInput.Length(); long position = 0; while (position < length) { int bytesToRead = position + 16384L > length ? (int)(length - position) : 16384; indexInput.ReadBytes(buffer, 0, bytesToRead); indexOutput.WriteBytes(buffer, bytesToRead); position += bytesToRead; } } finally { try { indexOutput?.Dispose(); } finally { indexInput?.Dispose(); } } } } // we'll remove old files from both AzureDirectory's cache directory, as well as our destination directory // (only when older than 45 minutes - old files may still have active searches on them so we need a margin) var referenceTimestamp = LuceneTimestampFromDateTime(DateTime.UtcNow.AddMinutes(-45)); // remove old files from AzureDirectory cache directory RemoveOldFiles(sourceDirectory.CacheDirectory, sourceFiles, referenceTimestamp); // remove old files from destination directory RemoveOldFiles(destinationDirectory, sourceFiles, referenceTimestamp); }
/// <summary> /// Creates a file of the specified size with sequential data. The first /// byte is written as the start byte provided. All subsequent bytes are /// computed as start + offset where offset is the number of the byte. /// </summary> private void CreateSequenceFile(Directory dir, string name, sbyte start, int size) { IndexOutput os = dir.CreateOutput(name, NewIOContext(Random())); for (int i = 0; i < size; i++) { os.WriteByte((byte)start); start++; } os.Dispose(); }
/// <summary> /// Creates a file of the specified size with random data. </summary> private void CreateRandomFile(Directory dir, string name, int size) { IndexOutput os = dir.CreateOutput(name, NewIOContext(Random())); for (int i = 0; i < size; i++) { var b = unchecked((sbyte)(new Random(1).NextDouble() * 256)); os.WriteByte((byte)b); } os.Dispose(); }
private static void UnidirectionalSync(AzureDirectory sourceDirectory, Directory destinationDirectory) { var sourceFiles = sourceDirectory.ListAll(); var fileNameFilter = IndexFileNameFilter.Filter; byte[] buffer = new byte[16384]; foreach (string sourceFile in sourceFiles) { // only copy file if it is accepted by Lucene's default filter // and it does not already exist (except for segment map files, we always want those) if (fileNameFilter.Accept(null, sourceFile) && (!destinationDirectory.FileExists(sourceFile) || sourceFile.StartsWith("segment"))) { IndexOutput indexOutput = null; IndexInput indexInput = null; try { indexOutput = destinationDirectory.CreateOutput(sourceFile); indexInput = sourceDirectory.OpenInput(sourceFile); long length = indexInput.Length(); long position = 0; while (position < length) { int bytesToRead = position + 16384L > length ? (int)(length - position) : 16384; indexInput.ReadBytes(buffer, 0, bytesToRead); indexOutput.WriteBytes(buffer, bytesToRead); position += bytesToRead; } } finally { try { indexOutput?.Close(); } finally { indexInput?.Close(); } } } } // we'll remove old files from both AzureDirectory's cache directory, as well as our destination directory // (only when older than 45 minutes - old files may still have active searches on them so we need a margin) var referenceTimestamp = LuceneTimestampFromDateTime(DateTime.UtcNow.AddMinutes(-45)); // remove old files from AzureDirectory cache directory RemoveOldFiles(sourceDirectory.CacheDirectory, sourceFiles, referenceTimestamp); // remove old files from destination directory RemoveOldFiles(destinationDirectory, sourceFiles, referenceTimestamp); }
public void Write(Directory d, System.String name, IState state) { IndexOutput output = d.CreateOutput(name, state); try { Write(output); } finally { output.Close(); } }
private void Initialize(Directory directory, System.String segment, FieldInfos fis, int interval, bool isi, IState state) { indexInterval = interval; fieldInfos = fis; isIndex = isi; output = directory.CreateOutput(segment + (isIndex?".tii":".tis"), state); output.WriteInt(FORMAT_CURRENT); // write format output.WriteLong(0); // leave space for size output.WriteInt(indexInterval); // write indexInterval output.WriteInt(skipInterval); // write skipInterval output.WriteInt(maxSkipLevels); // write maxSkipLevels System.Diagnostics.Debug.Assert(InitUTF16Results()); }
public MockSingleIntIndexOutput(Directory dir, string fileName, IOContext context) { @out = dir.CreateOutput(fileName, context); bool success = false; try { CodecUtil.WriteHeader(@out, CODEC, VERSION_CURRENT); success = true; } finally { if (!success) { IOUtils.CloseWhileHandlingException(@out); } } }
public SecureStoreIndexOutput(Directory cache, string cachePath, Func<DataWithMetadata, Task> saveTask) { _cache = cache; _name = cachePath; _saveTask = saveTask; _fileMutex = BlobMutexManager.GrabMutex(_name); _fileMutex.WaitOne(); try { // create the local cache one we will operate against... _indexOutput = _cache.CreateOutput(_name); } finally { _fileMutex.ReleaseMutex(); } }
public AzureIndexOutput(AzureDirectory azureDirectory, ICloudBlob blob) { _fileMutex = BlobMutexManager.GrabMutex(_name); _fileMutex.WaitOne(); try { _azureDirectory = azureDirectory; _blobContainer = _azureDirectory.BlobContainer; _blob = blob; _name = blob.Uri.Segments[blob.Uri.Segments.Length - 1]; // create the local cache one we will operate against... _indexOutput = CacheDirectory.CreateOutput(_name); } finally { _fileMutex.ReleaseMutex(); } }
/// <summary>Writes this vector to the file <c>name</c> in Directory /// <c>d</c>, in a format that can be read by the constructor /// <see cref="BitVector(Directory, String)" />. /// </summary> public void Write(Directory d, System.String name, IState state) { IndexOutput output = d.CreateOutput(name, state); try { if (IsSparse()) { WriteDgaps(output); // sparse bit-set more efficiently saved as d-gaps. } else { WriteBits(output); } } finally { output.Close(); } }
private void Initialize(Directory directory, string segment, FieldInfos fis, int interval, bool isi) { indexInterval = interval; fieldInfos = fis; isIndex = isi; output = directory.CreateOutput(IndexFileNames.SegmentFileName(segment, "", (isIndex ? Lucene3xPostingsFormat.TERMS_INDEX_EXTENSION : Lucene3xPostingsFormat.TERMS_EXTENSION)), IOContext.DEFAULT); bool success = false; try { output.WriteInt32(FORMAT_CURRENT); // write format output.WriteInt64(0); // leave space for size output.WriteInt32(indexInterval); // write indexInterval output.WriteInt32(skipInterval); // write skipInterval output.WriteInt32(maxSkipLevels); // write maxSkipLevels if (Debugging.AssertsEnabled) { Debugging.Assert(InitUTF16Results()); } success = true; } finally { if (!success) { IOUtils.DisposeWhileHandlingException(output); try { directory.DeleteFile(IndexFileNames.SegmentFileName(segment, "", (isIndex ? Lucene3xPostingsFormat.TERMS_INDEX_EXTENSION : Lucene3xPostingsFormat.TERMS_EXTENSION))); } #pragma warning disable 168 catch (IOException ignored) #pragma warning restore 168 { } } } }
private IndexOutput GetOutput() { lock (this) { if (DataOut == null) { bool success = false; try { DataOut = Directory.CreateOutput(DataFileName, IOContext.DEFAULT); CodecUtil.WriteHeader(DataOut, DATA_CODEC, VERSION_CURRENT); success = true; } finally { if (!success) { IOUtils.CloseWhileHandlingException((IDisposable)DataOut); } } } return(DataOut); } }
/// <summary> Copy contents of a directory src to a directory dest. /// If a file in src already exists in dest then the /// one in dest will be blindly overwritten. /// /// <p/><b>NOTE:</b> the source directory cannot change /// while this method is running. Otherwise the results /// are undefined and you could easily hit a /// FileNotFoundException. /// /// <p/><b>NOTE:</b> this method only copies files that look /// like index files (ie, have extensions matching the /// known extensions of index files). /// /// </summary> /// <param name="src">source directory /// </param> /// <param name="dest">destination directory /// </param> /// <param name="closeDirSrc">if <code>true</code>, call {@link #Close()} method on source directory /// </param> /// <throws> IOException </throws> public static void Copy(Directory src, Directory dest, bool closeDirSrc) { System.String[] files = src.ListAll(); IndexFileNameFilter filter = IndexFileNameFilter.GetFilter(); byte[] buf = new byte[BufferedIndexOutput.BUFFER_SIZE]; for (int i = 0; i < files.Length; i++) { if (!filter.Accept(null, files[i])) continue; IndexOutput os = null; IndexInput is_Renamed = null; try { // create file in dest directory os = dest.CreateOutput(files[i]); // read current file is_Renamed = src.OpenInput(files[i]); // and copy to dest directory long len = is_Renamed.Length(); long readCount = 0; while (readCount < len) { int toRead = readCount + BufferedIndexOutput.BUFFER_SIZE > len?(int) (len - readCount):BufferedIndexOutput.BUFFER_SIZE; is_Renamed.ReadBytes(buf, 0, toRead); os.WriteBytes(buf, toRead); readCount += toRead; } } finally { // graceful cleanup try { if (os != null) os.Close(); } finally { if (is_Renamed != null) is_Renamed.Close(); } } } if (closeDirSrc) src.Close(); }
public override IndexOutput CreateOutput(string name, IOContext context) { return(Dir.CreateOutput(name, context)); }
private int NumBufferedDocs; // docBase + numBufferedDocs == current doc ID /// <summary> /// Sole constructor. </summary> public CompressingStoredFieldsWriter(Directory directory, SegmentInfo si, string segmentSuffix, IOContext context, string formatName, CompressionMode compressionMode, int chunkSize) { Debug.Assert(directory != null); this.Directory = directory; this.Segment = si.Name; this.SegmentSuffix = segmentSuffix; this.CompressionMode = compressionMode; this.Compressor = compressionMode.NewCompressor(); this.ChunkSize = chunkSize; this.DocBase = 0; this.BufferedDocs = new GrowableByteArrayDataOutput(chunkSize); this.NumStoredFields = new int[16]; this.EndOffsets = new int[16]; this.NumBufferedDocs = 0; bool success = false; IndexOutput indexStream = directory.CreateOutput(IndexFileNames.SegmentFileName(Segment, segmentSuffix, Lucene40StoredFieldsWriter.FIELDS_INDEX_EXTENSION), context); try { FieldsStream = directory.CreateOutput(IndexFileNames.SegmentFileName(Segment, segmentSuffix, Lucene40StoredFieldsWriter.FIELDS_EXTENSION), context); string codecNameIdx = formatName + CODEC_SFX_IDX; string codecNameDat = formatName + CODEC_SFX_DAT; CodecUtil.WriteHeader(indexStream, codecNameIdx, VERSION_CURRENT); CodecUtil.WriteHeader(FieldsStream, codecNameDat, VERSION_CURRENT); Debug.Assert(CodecUtil.HeaderLength(codecNameDat) == FieldsStream.FilePointer); Debug.Assert(CodecUtil.HeaderLength(codecNameIdx) == indexStream.FilePointer); IndexWriter = new CompressingStoredFieldsIndexWriter(indexStream); indexStream = null; FieldsStream.WriteVInt(chunkSize); FieldsStream.WriteVInt(PackedInts.VERSION_CURRENT); success = true; } finally { if (!success) { IOUtils.CloseWhileHandlingException(indexStream); Abort(); } } }
/// <summary>Merge files with the extensions added up to now. /// All files with these extensions are combined sequentially into the /// compound stream. After successful merge, the source files /// are deleted. /// </summary> /// <throws> IllegalStateException if close() had been called before or </throws> /// <summary> if no file has been added to this object /// </summary> public void Dispose() { // Extract into protected method if class ever becomes unsealed // TODO: Dispose shouldn't throw exceptions! if (merged) { throw new SystemException("Merge already performed"); } if ((entries.Count == 0)) { throw new SystemException("No entries to merge have been defined"); } merged = true; // open the compound stream IndexOutput os = null; try { var state = StateHolder.Current.Value; os = directory.CreateOutput(fileName, state); // Write the number of entries os.WriteVInt(entries.Count); // Write the directory with all offsets at 0. // Remember the positions of directory entries so that we can // adjust the offsets later long totalSize = 0; foreach (FileEntry fe in entries) { fe.directoryOffset = os.FilePointer; os.WriteLong(0); // for now os.WriteString(fe.file); totalSize += directory.FileLength(fe.file, state); } // Pre-allocate size of file as optimization -- // this can potentially help IO performance as // we write the file and also later during // searching. It also uncovers a disk-full // situation earlier and hopefully without // actually filling disk to 100%: long finalLength = totalSize + os.FilePointer; os.SetLength(finalLength); // Open the files and copy their data into the stream. // Remember the locations of each file's data section. var buffer = new byte[16384]; foreach (FileEntry fe in entries) { fe.dataOffset = os.FilePointer; CopyFile(fe, os, buffer, state); } // Write the data offsets into the directory of the compound stream foreach (FileEntry fe in entries) { os.Seek(fe.directoryOffset); os.WriteLong(fe.dataOffset); } System.Diagnostics.Debug.Assert(finalLength == os.Length); // Close the output stream. Set the os to null before trying to // close so that if an exception occurs during the close, the // finally clause below will not attempt to close the stream // the second time. IndexOutput tmp = os; os = null; tmp.Close(); } finally { if (os != null) { try { os.Close(); } catch (System.IO.IOException) { } } } }
/// <summary> /// Copies the file <i>src</i> to <seealso cref="Directory"/> <i>to</i> under the new /// file name <i>dest</i>. /// <p> /// If you want to copy the entire source directory to the destination one, you /// can do so like this: /// /// <pre class="prettyprint"> /// Directory to; // the directory to copy to /// for (String file : dir.listAll()) { /// dir.copy(to, file, newFile, IOContext.DEFAULT); // newFile can be either file, or a new name /// } /// </pre> /// <p> /// <b>NOTE:</b> this method does not check whether <i>dest</i> exist and will /// overwrite it if it does. /// </summary> public virtual void Copy(Directory to, string src, string dest, IOContext context) { IndexOutput os = null; IndexInput @is = null; System.IO.IOException priorException = null; try { os = to.CreateOutput(dest, context); @is = OpenInput(src, context); os.CopyBytes(@is, @is.Length()); } catch (System.IO.IOException ioe) { priorException = ioe; } finally { bool success = false; try { IOUtils.CloseWhileHandlingException(priorException, os, @is); success = true; } finally { if (!success) { try { to.DeleteFile(dest); } catch (Exception) { } } } } }
internal void FinishCommit(Directory dir, IState state) { if (pendingSegnOutput == null) { throw new System.SystemException("prepareCommit was not called"); } bool success = false; try { pendingSegnOutput.FinishCommit(); pendingSegnOutput.Close(); pendingSegnOutput = null; success = true; } finally { if (!success) { RollbackCommit(dir, state); } } // NOTE: if we crash here, we have left a segments_N // file in the directory in a possibly corrupt state (if // some bytes made it to stable storage and others // didn't). But, the segments_N file includes checksum // at the end, which should catch this case. So when a // reader tries to read it, it will throw a // CorruptIndexException, which should cause the retry // logic in SegmentInfos to kick in and load the last // good (previous) segments_N-1 file. System.String fileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", generation); success = false; try { dir.Sync(fileName); success = true; } finally { if (!success) { try { dir.DeleteFile(fileName, state); } catch (System.Exception) { // Suppress so we keep throwing the original exception } } } lastGeneration = generation; try { IndexOutput genOutput = dir.CreateOutput(IndexFileNames.SEGMENTS_GEN, state); try { genOutput.WriteInt(FORMAT_LOCKLESS); genOutput.WriteLong(generation); genOutput.WriteLong(generation); } finally { genOutput.Close(); } } catch (System.Exception) { // It's OK if we fail to write this file since it's // used only as one of the retry fallbacks. } }
/// <summary> /// A utility for writing the <seealso cref="IndexFileNames#SEGMENTS_GEN"/> file to a /// <seealso cref="Directory"/>. /// /// <p> /// <b>NOTE:</b> this is an internal utility which is kept public so that it's /// accessible by code from other packages. You should avoid calling this /// method unless you're absolutely sure what you're doing! /// /// @lucene.internal /// </summary> public static void WriteSegmentsGen(Directory dir, long generation) { try { IndexOutput genOutput = dir.CreateOutput(IndexFileNames.SEGMENTS_GEN, IOContext.READONCE); try { genOutput.WriteInt(FORMAT_SEGMENTS_GEN_CURRENT); genOutput.WriteLong(generation); genOutput.WriteLong(generation); CodecUtil.WriteFooter(genOutput); } finally { genOutput.Dispose(); dir.Sync(Collections.Singleton(IndexFileNames.SEGMENTS_GEN)); } } catch (Exception) { // It's OK if we fail to write this file since it's // used only as one of the retry fallbacks. try { dir.DeleteFile(IndexFileNames.SEGMENTS_GEN); } catch (Exception) { // Ignore; this file is only used in a retry // fallback on init. } } }
internal FieldsWriter(Directory d, System.String segment, FieldInfos fn, IState state) { fieldInfos = fn; bool success = false; String fieldsName = segment + "." + IndexFileNames.FIELDS_EXTENSION; try { fieldsStream = d.CreateOutput(fieldsName, state); fieldsStream.WriteInt(FORMAT_CURRENT); success = true; } finally { if (!success) { try { Dispose(); } catch (System.Exception) { // Suppress so we keep throwing the original exception } try { d.DeleteFile(fieldsName, state); } catch (System.Exception) { // Suppress so we keep throwing the original exception } } } success = false; String indexName = segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION; try { indexStream = d.CreateOutput(indexName, state); indexStream.WriteInt(FORMAT_CURRENT); success = true; } finally { if (!success) { try { Dispose(); } catch (System.IO.IOException) { } try { d.DeleteFile(fieldsName, state); } catch (System.Exception) { // Suppress so we keep throwing the original exception } try { d.DeleteFile(indexName, state); } catch (System.Exception) { // Suppress so we keep throwing the original exception } } } doClose = true; }
public override IntIndexOutput CreateOutput(Directory dir, string fileName, IOContext context) { IndexOutput output = dir.CreateOutput(fileName, context); bool success = false; try { output.WriteInt(baseBlockSize); VariableIntBlockIndexOutput ret = new VariableIntBlockIndexOutputAnonymousHelper(output, 2 * baseBlockSize); success = true; return ret; } finally { if (!success) { IOUtils.CloseWhileHandlingException(output); } } }
private void Write(Directory directory) { string segmentsFileName = NextSegmentFileName; // Always advance the generation on write: if (_generation == -1) { _generation = 1; } else { _generation++; } IndexOutput segnOutput = null; bool success = false; var upgradedSIFiles = new HashSet<string>(); try { segnOutput = directory.CreateOutput(segmentsFileName, IOContext.DEFAULT); CodecUtil.WriteHeader(segnOutput, "segments", VERSION_48); segnOutput.WriteLong(Version); segnOutput.WriteInt(Counter); // write counter segnOutput.WriteInt(Size()); // write infos foreach (SegmentCommitInfo siPerCommit in segments) { SegmentInfo si = siPerCommit.Info; segnOutput.WriteString(si.Name); segnOutput.WriteString(si.Codec.Name); segnOutput.WriteLong(siPerCommit.DelGen); int delCount = siPerCommit.DelCount; if (delCount < 0 || delCount > si.DocCount) { throw new InvalidOperationException("cannot write segment: invalid docCount segment=" + si.Name + " docCount=" + si.DocCount + " delCount=" + delCount); } segnOutput.WriteInt(delCount); segnOutput.WriteLong(siPerCommit.FieldInfosGen); IDictionary<long, ISet<string>> genUpdatesFiles = siPerCommit.UpdatesFiles; segnOutput.WriteInt(genUpdatesFiles.Count); foreach (KeyValuePair<long, ISet<string>> e in genUpdatesFiles) { segnOutput.WriteLong(e.Key); segnOutput.WriteStringSet(e.Value); } Debug.Assert(si.Dir == directory); // If this segment is pre-4.x, perform a one-time // "ugprade" to write the .si file for it: string version = si.Version; if (version == null || StringHelper.VersionComparator.Compare(version, "4.0") < 0) { if (!SegmentWasUpgraded(directory, si)) { string markerFileName = IndexFileNames.SegmentFileName(si.Name, "upgraded", Lucene3xSegmentInfoFormat.UPGRADED_SI_EXTENSION); si.AddFile(markerFileName); string segmentFileName = Write3xInfo(directory, si, IOContext.DEFAULT); upgradedSIFiles.Add(segmentFileName); directory.Sync(/*Collections.singletonList(*/new[] { segmentFileName }/*)*/); // Write separate marker file indicating upgrade // is completed. this way, if there is a JVM // kill/crash, OS crash, power loss, etc. while // writing the upgraded file, the marker file // will be missing: IndexOutput @out = directory.CreateOutput(markerFileName, IOContext.DEFAULT); try { CodecUtil.WriteHeader(@out, SEGMENT_INFO_UPGRADE_CODEC, SEGMENT_INFO_UPGRADE_VERSION); } finally { @out.Dispose(); } upgradedSIFiles.Add(markerFileName); directory.Sync(/*Collections.SingletonList(*/new[] { markerFileName }/*)*/); } } } segnOutput.WriteStringStringMap(_userData); PendingSegnOutput = segnOutput; success = true; } finally { if (!success) { // We hit an exception above; try to close the file // but suppress any exception: IOUtils.CloseWhileHandlingException(segnOutput); foreach (string fileName in upgradedSIFiles) { try { directory.DeleteFile(fileName); } catch (Exception) { // Suppress so we keep throwing the original exception } } try { // Try not to leave a truncated segments_N file in // the index: directory.DeleteFile(segmentsFileName); } catch (Exception) { // Suppress so we keep throwing the original exception } } } }
public override IndexOutput CreateOutput(System.String name, IState state) { return(dir.CreateOutput(name, null)); }
public static string Write3xInfo(Directory dir, SegmentInfo si, IOContext context) { // NOTE: this is NOT how 3.x is really written... string fileName = IndexFileNames.SegmentFileName(si.Name, "", Lucene3xSegmentInfoFormat.UPGRADED_SI_EXTENSION); si.AddFile(fileName); //System.out.println("UPGRADE write " + fileName); bool success = false; IndexOutput output = dir.CreateOutput(fileName, context); try { // we are about to write this SI in 3.x format, dropping all codec information, etc. // so it had better be a 3.x segment or you will get very confusing errors later. if ((si.Codec is Lucene3xCodec) == false) { throw new InvalidOperationException("cannot write 3x SegmentInfo unless codec is Lucene3x (got: " + si.Codec + ")"); } CodecUtil.WriteHeader(output, Lucene3xSegmentInfoFormat.UPGRADED_SI_CODEC_NAME, Lucene3xSegmentInfoFormat.UPGRADED_SI_VERSION_CURRENT); // Write the Lucene version that created this segment, since 3.1 output.WriteString(si.Version); output.WriteInt(si.DocCount); output.WriteStringStringMap(si.Attributes()); output.WriteByte((byte)(sbyte)(si.UseCompoundFile ? SegmentInfo.YES : SegmentInfo.NO)); output.WriteStringStringMap(si.Diagnostics); output.WriteStringSet(si.Files); output.Dispose(); success = true; } finally { if (!success) { IOUtils.CloseWhileHandlingException(output); try { si.Dir.DeleteFile(fileName); } catch (Exception) { // Suppress so we keep throwing the original exception } } } return fileName; }
public virtual void TestCopyBytesMem() { int num = AtLeast(10); for (int iter = 0; iter < num; iter++) { Directory dir = NewDirectory(); if (VERBOSE) { Console.WriteLine("TEST: iter=" + iter + " dir=" + dir); } // make random file IndexOutput @out = dir.CreateOutput("test", NewIOContext(Random())); var bytes = new byte[TestUtil.NextInt(Random(), 1, 77777)]; int size = TestUtil.NextInt(Random(), 1, 1777777); int upto = 0; int byteUpto = 0; while (upto < size) { bytes[byteUpto++] = Value(upto); upto++; if (byteUpto == bytes.Length) { @out.WriteBytes(bytes, 0, bytes.Length); byteUpto = 0; } } @out.WriteBytes(bytes, 0, byteUpto); Assert.AreEqual(size, @out.GetFilePointer()); @out.Dispose(); Assert.AreEqual(size, dir.FileLength("test")); // copy from test -> test2 IndexInput @in = dir.OpenInput("test", NewIOContext(Random())); @out = dir.CreateOutput("test2", NewIOContext(Random())); upto = 0; while (upto < size) { if (Random().NextBoolean()) { @out.WriteByte(@in.ReadByte()); upto++; } else { int chunk = Math.Min(TestUtil.NextInt(Random(), 1, bytes.Length), size - upto); @out.CopyBytes(@in, chunk); upto += chunk; } } Assert.AreEqual(size, upto); @out.Dispose(); @in.Dispose(); // verify IndexInput in2 = dir.OpenInput("test2", NewIOContext(Random())); upto = 0; while (upto < size) { if (Random().NextBoolean()) { var v = in2.ReadByte(); Assert.AreEqual(Value(upto), v); upto++; } else { int limit = Math.Min(TestUtil.NextInt(Random(), 1, bytes.Length), size - upto); in2.ReadBytes(bytes, 0, limit); for (int byteIdx = 0; byteIdx < limit; byteIdx++) { Assert.AreEqual(Value(upto), bytes[byteIdx]); upto++; } } } in2.Dispose(); dir.DeleteFile("test"); dir.DeleteFile("test2"); dir.Dispose(); } }
/// <exception cref="IOException"></exception> private void DoUpdate() { SessionToken session = null; Dictionary<string, Directory> sourceDirectory = new Dictionary<string, Directory>(); Dictionary<string, IList<string>> copiedFiles = new Dictionary<string, IList<string>>(); bool notify = false; try { string version = handler.CurrentVersion; session = replicator.CheckForUpdate(version); WriteToInfoStream(string.Format("doUpdate(): handlerVersion={0} session={1}", version, session)); if (session == null) return; IDictionary<string, IList<RevisionFile>> requiredFiles = RequiredFiles(session.SourceFiles); WriteToInfoStream(string.Format("doUpdate(): handlerVersion={0} session={1}", version, session)); foreach (KeyValuePair<string, IList<RevisionFile>> pair in requiredFiles) { string source = pair.Key; Directory directory = factory.GetDirectory(session.Id, source); sourceDirectory.Add(source, directory); List<string> cpFiles = new List<string>(); copiedFiles.Add(source, cpFiles); foreach (RevisionFile file in pair.Value) { if (disposed) { // if we're closed, abort file copy WriteToInfoStream("doUpdate(): detected client was closed); abort file copy"); return; } Stream input = null; IndexOutput output = null; try { input = replicator.ObtainFile(session.Id, source, file.FileName); output = directory.CreateOutput(file.FileName, IOContext.DEFAULT); CopyBytes(output, input); cpFiles.Add(file.FileName); // TODO add some validation, on size / checksum } finally { IOUtils.Dispose(input, output); } } // only notify if all required files were successfully obtained. notify = true; } } finally { if (session != null) { try { replicator.Release(session.Id); } finally { if (!notify) { // cleanup after ourselves IOUtils.Dispose(sourceDirectory.Values); factory.CleanupSession(session.Id); } } } } // notify outside the try-finally above, so the session is released sooner. // the handler may take time to finish acting on the copied files, but the // session itself is no longer needed. try { if (notify && !disposed) { // no use to notify if we are closed already handler.RevisionReady(session.Version, session.SourceFiles, new ReadOnlyDictionary<string, IList<string>>(copiedFiles), sourceDirectory); } } finally { IOUtils.Dispose(sourceDirectory.Values); //TODO: Resharper Message, Expression is always true -> Verify and if so then we can remove the null check. if (session != null) { factory.CleanupSession(session.Id); } } }
internal virtual FST <T> DoTest(int prune1, int prune2, bool allowRandomSuffixSharing) { if (LuceneTestCase.VERBOSE) { Console.WriteLine("\nTEST: prune1=" + prune1 + " prune2=" + prune2); } bool willRewrite = random.NextBoolean(); Builder <T> builder = new Builder <T>(inputMode == 0 ? FST.INPUT_TYPE.BYTE1 : FST.INPUT_TYPE.BYTE4, prune1, prune2, prune1 == 0 && prune2 == 0, allowRandomSuffixSharing ? random.NextBoolean() : true, allowRandomSuffixSharing ? TestUtil.NextInt32(random, 1, 10) : int.MaxValue, outputs, null, willRewrite, PackedInt32s.DEFAULT, true, 15); if (LuceneTestCase.VERBOSE) { if (willRewrite) { Console.WriteLine("TEST: packed FST"); } else { Console.WriteLine("TEST: non-packed FST"); } } foreach (InputOutput <T> pair in pairs) { if (pair.Output is IEnumerable) { Builder <object> builderObject = builder as Builder <object>; var values = pair.Output as IEnumerable; foreach (object value in values) { builderObject.Add(pair.Input, value); } } else { builder.Add(pair.Input, pair.Output); } } FST <T> fst = builder.Finish(); if (random.NextBoolean() && fst != null && !willRewrite) { IOContext context = LuceneTestCase.NewIOContext(random); using (IndexOutput @out = dir.CreateOutput("fst.bin", context)) { fst.Save(@out); } IndexInput @in = dir.OpenInput("fst.bin", context); try { fst = new FST <T>(@in, outputs); } finally { @in.Dispose(); dir.DeleteFile("fst.bin"); } } if (LuceneTestCase.VERBOSE && pairs.Count <= 20 && fst != null) { using (TextWriter w = new StreamWriter(new FileStream("out.dot", FileMode.OpenOrCreate), Encoding.UTF8)) { Util.ToDot(fst, w, false, false); } Console.WriteLine("SAVED out.dot"); } if (LuceneTestCase.VERBOSE) { if (fst == null) { Console.WriteLine(" fst has 0 nodes (fully pruned)"); } else { Console.WriteLine(" fst has " + fst.NodeCount + " nodes and " + fst.ArcCount + " arcs"); } } if (prune1 == 0 && prune2 == 0) { VerifyUnPruned(inputMode, fst); } else { VerifyPruned(inputMode, fst, prune1, prune2); } return(fst); }
/// <summary> Copy contents of a directory src to a directory dest. /// If a file in src already exists in dest then the /// one in dest will be blindly overwritten. /// /// </summary> /// <param name="src">source directory /// </param> /// <param name="dest">destination directory /// </param> /// <param name="closeDirSrc">if <code>true</code>, call {@link #close()} method on source directory /// </param> /// <throws> IOException </throws> public static void Copy(Directory src, Directory dest, bool closeDirSrc) { System.String[] files = src.List(); if (files == null) { throw new System.IO.IOException("cannot read directory " + src + ": list() returned null"); } byte[] buf = new byte[BufferedIndexOutput.BUFFER_SIZE]; for (int i = 0; i < files.Length; i++) { IndexOutput os = null; IndexInput is_Renamed = null; try { // create file in dest directory os = dest.CreateOutput(files[i]); // read current file is_Renamed = src.OpenInput(files[i]); // and copy to dest directory long len = is_Renamed.Length(); long readCount = 0; while (readCount < len) { int toRead = readCount + BufferedIndexOutput.BUFFER_SIZE > len ? (int) (len - readCount) : BufferedIndexOutput.BUFFER_SIZE; is_Renamed.ReadBytes(buf, 0, toRead); os.WriteBytes(buf, toRead); readCount += toRead; } } finally { // graceful cleanup try { if (os != null) os.Close(); } finally { if (is_Renamed != null) is_Renamed.Close(); } } } if (closeDirSrc) src.Close(); }
// LUCENE-1468 private void CheckDirectoryFilter(Directory dir) { System.String name = "file"; try { dir.CreateOutput(name).Close(); Assert.IsTrue(dir.FileExists(name)); Assert.IsTrue(new System.Collections.ArrayList(dir.ListAll()).Contains(name)); } finally { dir.Close(); } }
public static void WriteIndexVersion(Directory directory, IndexDefinition indexDefinition) { var version = IndexVersion; if (indexDefinition.IsMapReduce) { version = MapReduceIndexVersion; } using (var indexOutput = directory.CreateOutput(IndexVersionFileName(indexDefinition))) { indexOutput.WriteString(version); indexOutput.Flush(); } }
private void Write(Directory directory, IState state) { System.String segmentFileName = GetNextSegmentFileName(); // Always advance the generation on write: if (generation == -1) { generation = 1; } else { generation++; } var segnOutput = new ChecksumIndexOutput(directory.CreateOutput(segmentFileName, state)); bool success = false; try { segnOutput.WriteInt(CURRENT_FORMAT); // write FORMAT segnOutput.WriteLong(++version); // every write changes // the index segnOutput.WriteInt(counter); // write counter segnOutput.WriteInt(Count); // write infos for (int i = 0; i < Count; i++) { Info(i).Write(segnOutput); } segnOutput.WriteStringStringMap(userData); segnOutput.PrepareCommit(); success = true; pendingSegnOutput = segnOutput; } finally { if (!success) { // We hit an exception above; try to close the file // but suppress any exception: try { segnOutput.Close(); } catch (System.Exception) { // Suppress so we keep throwing the original exception } try { // Try not to leave a truncated segments_N file in // the index: directory.DeleteFile(segmentFileName, state); } catch (System.Exception) { // Suppress so we keep throwing the original exception } } } }
public override IndexOutput CreateOutput(System.String name) { return(dir.CreateOutput(name)); }
public virtual void TestDirectInstantiation() { System.IO.DirectoryInfo path = new System.IO.DirectoryInfo(AppSettings.Get("tempDir", System.IO.Path.GetTempPath())); int sz = 2; Directory[] dirs = new Directory[sz]; dirs[0] = new SimpleFSDirectory(path, null); // dirs[1] = new NIOFSDirectory(path, null); System.Console.WriteLine("Skipping NIOFSDirectory() test under Lucene.Net"); dirs[1] = new MMapDirectory(path, null); for (int i = 0; i < sz; i++) { Directory dir = dirs[i]; dir.EnsureOpen(); System.String fname = "foo." + i; System.String lockname = "foo" + i + ".lck"; IndexOutput out_Renamed = dir.CreateOutput(fname, null); out_Renamed.WriteByte((byte)i); out_Renamed.Close(); for (int j = 0; j < sz; j++) { Directory d2 = dirs[j]; d2.EnsureOpen(); Assert.IsTrue(d2.FileExists(fname, null)); Assert.AreEqual(1, d2.FileLength(fname, null)); // don't test read on MMapDirectory, since it can't really be // closed and will cause a failure to delete the file. if (d2 is MMapDirectory) { continue; } IndexInput input = d2.OpenInput(fname, null); Assert.AreEqual((byte)i, input.ReadByte(null)); input.Close(); } // delete with a different dir dirs[(i + 1) % sz].DeleteFile(fname, null); for (int j = 0; j < sz; j++) { Directory d2 = dirs[j]; Assert.IsFalse(d2.FileExists(fname, null)); } Lock lock_Renamed = dir.MakeLock(lockname); Assert.IsTrue(lock_Renamed.Obtain()); for (int j = 0; j < sz; j++) { Directory d2 = dirs[j]; Lock lock2 = d2.MakeLock(lockname); try { Assert.IsFalse(lock2.Obtain(1)); } catch (LockObtainFailedException) { // OK } } lock_Renamed.Release(); // now lock with different dir lock_Renamed = dirs[(i + 1) % sz].MakeLock(lockname); Assert.IsTrue(lock_Renamed.Obtain()); lock_Renamed.Release(); } for (int i = 0; i < sz; i++) { Directory dir = dirs[i]; dir.EnsureOpen(); dir.Close(); Assert.IsFalse(dir.isOpen_ForNUnit); } }
private void Demo_FSIndexInputBug(Directory fsdir, string file) { // Setup the test file - we need more than 1024 bytes IndexOutput os = fsdir.CreateOutput(file, IOContext.DEFAULT); for (int i = 0; i < 2000; i++) { os.WriteByte((byte)(sbyte)i); } os.Dispose(); IndexInput @in = fsdir.OpenInput(file, IOContext.DEFAULT); // this read primes the buffer in IndexInput @in.ReadByte(); // Close the file @in.Dispose(); // ERROR: this call should fail, but succeeds because the buffer // is still filled @in.ReadByte(); // ERROR: this call should fail, but succeeds for some reason as well @in.Seek(1099); try { // OK: this call correctly fails. We are now past the 1024 internal // buffer, so an actual IO is attempted, which fails @in.ReadByte(); Assert.Fail("expected readByte() to throw exception"); } catch (IOException e) { // expected exception } }
// LUCENE-1468 private void CheckDirectoryFilter(Directory dir) { string name = "file"; try { dir.CreateOutput(name, NewIOContext(Random())).Dispose(); Assert.IsTrue(SlowFileExists(dir, name)); Assert.IsTrue(Arrays.AsList(dir.ListAll()).Contains(name)); } finally { dir.Dispose(); } }
private bool TryReusePreviousCommitPointsToRecoverIndex(Directory directory, IndexDefinition indexDefinition, string indexStoragePath, out IndexCommitPoint indexCommit, out string[] keysToDelete) { indexCommit = null; keysToDelete = null; if (indexDefinition.IsMapReduce) return false; var indexFullPath = Path.Combine(indexStoragePath, indexDefinition.IndexId.ToString()); var allCommitPointsFullPath = IndexCommitPointDirectory.GetAllCommitPointsFullPath(indexFullPath); if (System.IO.Directory.Exists(allCommitPointsFullPath) == false) return false; var filesInIndexDirectory = System.IO.Directory.GetFiles(indexFullPath).Select(Path.GetFileName); var existingCommitPoints = IndexCommitPointDirectory.ScanAllCommitPointsDirectory(indexFullPath); Array.Reverse(existingCommitPoints); // start from the highest generation foreach (var commitPointDirectoryName in existingCommitPoints) { try { var commitPointDirectory = new IndexCommitPointDirectory(indexStoragePath, indexDefinition.IndexId.ToString(), commitPointDirectoryName); if (TryGetCommitPoint(commitPointDirectory, out indexCommit) == false) { IOExtensions.DeleteDirectory(commitPointDirectory.FullPath); continue; // checksum is invalid, try another commit point } var missingFile = indexCommit.SegmentsInfo.ReferencedFiles.Any( referencedFile => filesInIndexDirectory.Contains(referencedFile) == false); if (missingFile) { IOExtensions.DeleteDirectory(commitPointDirectory.FullPath); continue; // there are some missing files, try another commit point } var storedSegmentsFile = indexCommit.SegmentsInfo.SegmentsFileName; // here there should be only one segments_N file, however remove all if there is more foreach (var currentSegmentsFile in System.IO.Directory.GetFiles(commitPointDirectory.IndexFullPath, "segments_*")) { File.Delete(currentSegmentsFile); } // copy old segments_N file File.Copy(Path.Combine(commitPointDirectory.FullPath, storedSegmentsFile), Path.Combine(commitPointDirectory.IndexFullPath, storedSegmentsFile), true); try { // update segments.gen file using (var genOutput = directory.CreateOutput(IndexFileNames.SEGMENTS_GEN)) { genOutput.WriteInt(SegmentInfos.FORMAT_LOCKLESS); genOutput.WriteLong(indexCommit.SegmentsInfo.Generation); genOutput.WriteLong(indexCommit.SegmentsInfo.Generation); } } catch (Exception) { // here we can ignore, segments.gen is used only as fallback } if (File.Exists(commitPointDirectory.DeletedKeysFile)) keysToDelete = File.ReadLines(commitPointDirectory.DeletedKeysFile).ToArray(); return true; } catch (Exception ex) { startupLog.WarnException("Could not recover an index named '" + indexDefinition.IndexId + "'from segments of the following generation " + commitPointDirectoryName, ex); } } return false; }
/// <summary>. </summary> public bool SyncFile(Lucene.Net.Store.Directory directory, string fileName, bool CompressBlobs) { var success = false; try { var blob = _blobContainer.GetBlobClient(_rootFolderName + fileName); _loggingService.Log(new LogEntry(LogLevel.Info, null, $"Syncing file {fileName} for {_rootFolderName}")); // then we will get it fresh into local deflatedName // StreamOutput deflatedStream = new StreamOutput(CacheDirectory.CreateOutput(deflatedName)); using (var deflatedStream = new MemoryStream()) { // get the deflated blob blob.DownloadTo(deflatedStream); #if FULLDEBUG _loggingService.Log(new LogEntry(LogLevel.Info, null, $"GET {fileName} RETREIVED {deflatedStream.Length} bytes")); #endif // seek back to begininng deflatedStream.Seek(0, SeekOrigin.Begin); if (ShouldCompressFile(fileName, CompressBlobs)) { // open output file for uncompressed contents using (var fileStream = new StreamOutput(directory.CreateOutput(fileName))) using (var decompressor = new DeflateStream(deflatedStream, CompressionMode.Decompress)) { var bytes = new byte[65535]; var nRead = 0; do { nRead = decompressor.Read(bytes, 0, 65535); if (nRead > 0) { fileStream.Write(bytes, 0, nRead); } } while (nRead == 65535); } } else { using (var fileStream = new StreamOutput(directory.CreateOutput(fileName))) { // get the blob blob.DownloadTo(fileStream); fileStream.Flush(); #if FULLDEBUG _loggingService.Log(new LogEntry(LogLevel.Info, null, $"GET {fileName} RETREIVED {fileStream.Length} bytes")); #endif } } } success = true; } catch (Exception e) { _loggingService.Log(new LogEntry(LogLevel.Error, e, $"GET {fileName} RETREIVED failed")); } return(success); }