public void ClearCache() { foreach (string file in _cacheDirectory.ListAll()) { _cacheDirectory.DeleteFile(file); } }
public override void DeleteFile(string name) { UninterruptableMonitor.Enter(this); try { if (VERBOSE) { Console.WriteLine("nrtdir.deleteFile name=" + name); } #pragma warning disable 612, 618 if (cache.FileExists(name)) #pragma warning restore 612, 618 { cache.DeleteFile(name); } else { @delegate.DeleteFile(name); } } finally { UninterruptableMonitor.Exit(this); } }
/// <summary>Removes an existing file in the directory. </summary> public override void DeleteFile(string name) { //We're going to try to remove this from the cache directory first, // because the IndexFileDeleter will call this file to remove files // but since some files will be in use still, it will retry when a reader/searcher // is refreshed until the file is no longer locked. So we need to try to remove // from local storage first and if it fails, let it keep throwing the IOExpception // since that is what Lucene is expecting in order for it to retry. //If we remove the main storage file first, then this will never retry to clean out // local storage because the FileExist method will always return false. try { if (_cacheDirectory.FileExists(name)) { _cacheDirectory.DeleteFile(name); SetDirty(); } } catch (IOException ex) { //This will occur because this file is locked, when this is the case, we don't really want to delete it from the master either because // if we do that then this file will never get removed from the cache folder either! This is based on the Deletion Policy which the // IndexFileDeleter uses. We could implement our own one of those to deal with this scenario too but it seems the easiest way it to just // let this throw so Lucene will retry when it can and when that is successful we'll also clear it from the master throw; } //if we've made it this far then the cache directly file has been successfully removed so now we'll do the master _masterDirectory.DeleteFile(name); SetDirty(); }
internal void RollbackCommit(Directory dir, IState state) { if (pendingSegnOutput != null) { try { pendingSegnOutput.Close(); } catch (System.Exception) { // Suppress so we keep throwing the original exception // in our caller } // Must carefully compute fileName from "generation" // since lastGeneration isn't incremented: try { System.String segmentFileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", generation); dir.DeleteFile(segmentFileName, state); } catch (System.Exception) { // Suppress so we keep throwing the original exception // in our caller } pendingSegnOutput = null; } }
internal TermInfosWriter(Directory directory, string segment, FieldInfos fis, int interval) { Initialize(directory, segment, fis, interval, false); bool success = false; try { other = new TermInfosWriter(directory, segment, fis, interval, true); other.other = this; success = true; } finally { if (!success) { IOUtils.DisposeWhileHandlingException(output); try { directory.DeleteFile(IndexFileNames.SegmentFileName(segment, "", (isIndex ? Lucene3xPostingsFormat.TERMS_INDEX_EXTENSION : Lucene3xPostingsFormat.TERMS_EXTENSION))); } catch (Exception ignored) when(ignored.IsIOException()) { } } } }
public override void DeleteFile(string name) { lock (this) { if (VERBOSE) { Console.WriteLine("nrtdir.deleteFile name=" + name); } if (Cache.FileExists(name)) { Cache.DeleteFile(name); } else { @delegate.DeleteFile(name); } } }
public override void DeleteFile(string name) { lock (this) { if (VERBOSE) { Console.WriteLine("nrtdir.deleteFile name=" + name); } #pragma warning disable 612, 618 if (cache.FileExists(name)) #pragma warning restore 612, 618 { cache.DeleteFile(name); } else { @delegate.DeleteFile(name); } } }
private static void RemoveOldFiles(Directory directory, string[] skipFiles, long referenceTimestamp) { var destinationFiles = directory.ListAll(); var filesToRemove = destinationFiles.Except(skipFiles); foreach (var file in filesToRemove) { if (directory.FileModified(file) < referenceTimestamp) { directory.DeleteFile(file); } } }
/// <summary>Removes an existing file in the directory. </summary> public override void DeleteFile(System.String name) { _masterDirectory.DeleteFile(name); if (_cacheDirectory.FileExists(name + ".blob")) { _cacheDirectory.DeleteFile(name + ".blob"); } if (_cacheDirectory.FileExists(name)) { _cacheDirectory.DeleteFile(name); } }
/// <summary> /// Cleanup the index directory by deleting all given files. Called when file /// copy or sync failed. /// </summary> public static void CleanupFilesOnFailure(Directory directory, IList <string> files) { foreach (string file in files) { try { directory.DeleteFile(file); } catch { // suppress any exception because if we're here, it means copy // failed, and we must cleanup after ourselves. } } }
/// <summary> /// Writes <see cref="IndexFileNames.SEGMENTS_GEN"/> file to the directory, reading /// the generation from the given <paramref name="segmentsFile"/>. If it is <c>null</c>, /// this method deletes segments.gen from the directory. /// </summary> public static void WriteSegmentsGen(string segmentsFile, Directory directory) { if (segmentsFile != null) { SegmentInfos.WriteSegmentsGen(directory, SegmentInfos.GenerationFromSegmentsFileName(segmentsFile)); return; } try { directory.DeleteFile(IndexFileNames.SEGMENTS_GEN); } catch { // suppress any errors while deleting this file. } }
/// <summary> /// Cleans up the index directory from old index files. This method uses the /// last commit found by <see cref="GetLastCommit(Directory)"/>. If it matches the /// expected <paramref name="segmentsFile"/>, then all files not referenced by this commit point /// are deleted. /// </summary> /// <remarks> /// <b>NOTE:</b> This method does a best effort attempt to clean the index /// directory. It suppresses any exceptions that occur, as this can be retried /// the next time. /// </remarks> public static void CleanupOldIndexFiles(Directory directory, string segmentsFile) { try { IndexCommit commit = GetLastCommit(directory); // commit == null means weird IO errors occurred, ignore them // if there were any IO errors reading the expected commit point (i.e. // segments files mismatch), then ignore that commit either. if (commit != null && commit.SegmentsFileName.Equals(segmentsFile, StringComparison.Ordinal)) { ISet <string> commitFiles = new JCG.HashSet <string>(commit.FileNames) { IndexFileNames.SEGMENTS_GEN }; Regex matcher = IndexFileNames.CODEC_FILE_PATTERN; foreach (string file in directory.ListAll()) { if (!commitFiles.Contains(file) && (matcher.IsMatch(file) || file.StartsWith(IndexFileNames.SEGMENTS, StringComparison.Ordinal))) { try { directory.DeleteFile(file); } catch { // suppress, it's just a best effort } } } } } catch { // ignore any errors that happens during this state and only log it. this // cleanup will have a chance to succeed the next time we get a new // revision. } }
private void Initialize(Directory directory, string segment, FieldInfos fis, int interval, bool isi) { indexInterval = interval; fieldInfos = fis; isIndex = isi; output = directory.CreateOutput(IndexFileNames.SegmentFileName(segment, "", (isIndex ? Lucene3xPostingsFormat.TERMS_INDEX_EXTENSION : Lucene3xPostingsFormat.TERMS_EXTENSION)), IOContext.DEFAULT); bool success = false; try { output.WriteInt32(FORMAT_CURRENT); // write format output.WriteInt64(0); // leave space for size output.WriteInt32(indexInterval); // write indexInterval output.WriteInt32(skipInterval); // write skipInterval output.WriteInt32(maxSkipLevels); // write maxSkipLevels if (Debugging.AssertsEnabled) { Debugging.Assert(InitUTF16Results()); } success = true; } finally { if (!success) { IOUtils.DisposeWhileHandlingException(output); try { directory.DeleteFile(IndexFileNames.SegmentFileName(segment, "", (isIndex ? Lucene3xPostingsFormat.TERMS_INDEX_EXTENSION : Lucene3xPostingsFormat.TERMS_EXTENSION))); } #pragma warning disable 168 catch (IOException ignored) #pragma warning restore 168 { } } } }
/// <summary> /// Copies the file <paramref name="src"/> to <seealso cref="Directory"/> <paramref name="to"/> under the new /// file name <paramref name="dest"/>. /// <para/> /// If you want to copy the entire source directory to the destination one, you /// can do so like this: /// /// <code> /// Directory to; // the directory to copy to /// foreach (string file in dir.ListAll()) { /// dir.Copy(to, file, newFile, IOContext.DEFAULT); // newFile can be either file, or a new name /// } /// </code> /// <para/> /// <b>NOTE:</b> this method does not check whether <paramref name="dest"/> exist and will /// overwrite it if it does. /// </summary> public virtual void Copy(Directory to, string src, string dest, IOContext context) { IndexOutput os = null; IndexInput @is = null; IOException priorException = null; try { os = to.CreateOutput(dest, context); @is = OpenInput(src, context); os.CopyBytes(@is, @is.Length); } catch (IOException ioe) { priorException = ioe; } finally { bool success = false; try { IOUtils.DisposeWhileHandlingException(priorException, os, @is); success = true; } finally { if (!success) { try { to.DeleteFile(dest); } catch (Exception) { } } } } }
internal virtual FST <T> DoTest(int prune1, int prune2, bool allowRandomSuffixSharing) { if (LuceneTestCase.VERBOSE) { Console.WriteLine("\nTEST: prune1=" + prune1 + " prune2=" + prune2); } bool willRewrite = random.NextBoolean(); Builder <T> builder = new Builder <T>(inputMode == 0 ? FST.INPUT_TYPE.BYTE1 : FST.INPUT_TYPE.BYTE4, prune1, prune2, prune1 == 0 && prune2 == 0, allowRandomSuffixSharing ? random.NextBoolean() : true, allowRandomSuffixSharing ? TestUtil.NextInt32(random, 1, 10) : int.MaxValue, outputs, null, willRewrite, PackedInt32s.DEFAULT, true, 15); if (LuceneTestCase.VERBOSE) { if (willRewrite) { Console.WriteLine("TEST: packed FST"); } else { Console.WriteLine("TEST: non-packed FST"); } } foreach (InputOutput <T> pair in pairs) { if (pair.Output is IEnumerable) { Builder <object> builderObject = builder as Builder <object>; var values = pair.Output as IEnumerable; foreach (object value in values) { builderObject.Add(pair.Input, value); } } else { builder.Add(pair.Input, pair.Output); } } FST <T> fst = builder.Finish(); if (random.NextBoolean() && fst != null && !willRewrite) { IOContext context = LuceneTestCase.NewIOContext(random); using (IndexOutput @out = dir.CreateOutput("fst.bin", context)) { fst.Save(@out); } IndexInput @in = dir.OpenInput("fst.bin", context); try { fst = new FST <T>(@in, outputs); } finally { @in.Dispose(); dir.DeleteFile("fst.bin"); } } if (LuceneTestCase.VERBOSE && pairs.Count <= 20 && fst != null) { using (TextWriter w = new StreamWriter(new FileStream("out.dot", FileMode.OpenOrCreate), Encoding.UTF8)) { Util.ToDot(fst, w, false, false); } Console.WriteLine("SAVED out.dot"); } if (LuceneTestCase.VERBOSE) { if (fst == null) { Console.WriteLine(" fst has 0 nodes (fully pruned)"); } else { Console.WriteLine(" fst has " + fst.NodeCount + " nodes and " + fst.ArcCount + " arcs"); } } if (prune1 == 0 && prune2 == 0) { VerifyUnPruned(inputMode, fst); } else { VerifyPruned(inputMode, fst, prune1, prune2); } return(fst); }
public virtual void TestCopyBytesMem() { int num = AtLeast(10); for (int iter = 0; iter < num; iter++) { Directory dir = NewDirectory(); if (VERBOSE) { Console.WriteLine("TEST: iter=" + iter + " dir=" + dir); } // make random file IndexOutput @out = dir.CreateOutput("test", NewIOContext(Random())); var bytes = new byte[TestUtil.NextInt(Random(), 1, 77777)]; int size = TestUtil.NextInt(Random(), 1, 1777777); int upto = 0; int byteUpto = 0; while (upto < size) { bytes[byteUpto++] = Value(upto); upto++; if (byteUpto == bytes.Length) { @out.WriteBytes(bytes, 0, bytes.Length); byteUpto = 0; } } @out.WriteBytes(bytes, 0, byteUpto); Assert.AreEqual(size, @out.GetFilePointer()); @out.Dispose(); Assert.AreEqual(size, dir.FileLength("test")); // copy from test -> test2 IndexInput @in = dir.OpenInput("test", NewIOContext(Random())); @out = dir.CreateOutput("test2", NewIOContext(Random())); upto = 0; while (upto < size) { if (Random().NextBoolean()) { @out.WriteByte(@in.ReadByte()); upto++; } else { int chunk = Math.Min(TestUtil.NextInt(Random(), 1, bytes.Length), size - upto); @out.CopyBytes(@in, chunk); upto += chunk; } } Assert.AreEqual(size, upto); @out.Dispose(); @in.Dispose(); // verify IndexInput in2 = dir.OpenInput("test2", NewIOContext(Random())); upto = 0; while (upto < size) { if (Random().NextBoolean()) { var v = in2.ReadByte(); Assert.AreEqual(Value(upto), v); upto++; } else { int limit = Math.Min(TestUtil.NextInt(Random(), 1, bytes.Length), size - upto); in2.ReadBytes(bytes, 0, limit); for (int byteIdx = 0; byteIdx < limit; byteIdx++) { Assert.AreEqual(Value(upto), bytes[byteIdx]); upto++; } } } in2.Dispose(); dir.DeleteFile("test"); dir.DeleteFile("test2"); dir.Dispose(); } }
internal FieldsWriter(Directory d, System.String segment, FieldInfos fn, IState state) { fieldInfos = fn; bool success = false; String fieldsName = segment + "." + IndexFileNames.FIELDS_EXTENSION; try { fieldsStream = d.CreateOutput(fieldsName, state); fieldsStream.WriteInt(FORMAT_CURRENT); success = true; } finally { if (!success) { try { Dispose(); } catch (System.Exception) { // Suppress so we keep throwing the original exception } try { d.DeleteFile(fieldsName, state); } catch (System.Exception) { // Suppress so we keep throwing the original exception } } } success = false; String indexName = segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION; try { indexStream = d.CreateOutput(indexName, state); indexStream.WriteInt(FORMAT_CURRENT); success = true; } finally { if (!success) { try { Dispose(); } catch (System.IO.IOException) { } try { d.DeleteFile(fieldsName, state); } catch (System.Exception) { // Suppress so we keep throwing the original exception } try { d.DeleteFile(indexName, state); } catch (System.Exception) { // Suppress so we keep throwing the original exception } } } doClose = true; }
internal void FinishCommit(Directory dir, IState state) { if (pendingSegnOutput == null) { throw new System.SystemException("prepareCommit was not called"); } bool success = false; try { pendingSegnOutput.FinishCommit(); pendingSegnOutput.Close(); pendingSegnOutput = null; success = true; } finally { if (!success) { RollbackCommit(dir, state); } } // NOTE: if we crash here, we have left a segments_N // file in the directory in a possibly corrupt state (if // some bytes made it to stable storage and others // didn't). But, the segments_N file includes checksum // at the end, which should catch this case. So when a // reader tries to read it, it will throw a // CorruptIndexException, which should cause the retry // logic in SegmentInfos to kick in and load the last // good (previous) segments_N-1 file. System.String fileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", generation); success = false; try { dir.Sync(fileName); success = true; } finally { if (!success) { try { dir.DeleteFile(fileName, state); } catch (System.Exception) { // Suppress so we keep throwing the original exception } } } lastGeneration = generation; try { IndexOutput genOutput = dir.CreateOutput(IndexFileNames.SEGMENTS_GEN, state); try { genOutput.WriteInt(FORMAT_LOCKLESS); genOutput.WriteLong(generation); genOutput.WriteLong(generation); } finally { genOutput.Close(); } } catch (System.Exception) { // It's OK if we fail to write this file since it's // used only as one of the retry fallbacks. } }
public override void DeleteFile(string name) { Dir.DeleteFile(name); }
/// <summary> /// Copies the file <i>src</i> to <seealso cref="Directory"/> <i>to</i> under the new /// file name <i>dest</i>. /// <p> /// If you want to copy the entire source directory to the destination one, you /// can do so like this: /// /// <pre class="prettyprint"> /// Directory to; // the directory to copy to /// for (String file : dir.listAll()) { /// dir.copy(to, file, newFile, IOContext.DEFAULT); // newFile can be either file, or a new name /// } /// </pre> /// <p> /// <b>NOTE:</b> this method does not check whether <i>dest</i> exist and will /// overwrite it if it does. /// </summary> public virtual void Copy(Directory to, string src, string dest, IOContext context) { IndexOutput os = null; IndexInput @is = null; System.IO.IOException priorException = null; try { os = to.CreateOutput(dest, context); @is = OpenInput(src, context); os.CopyBytes(@is, @is.Length()); } catch (System.IO.IOException ioe) { priorException = ioe; } finally { bool success = false; try { IOUtils.CloseWhileHandlingException(priorException, os, @is); success = true; } finally { if (!success) { try { to.DeleteFile(dest); } catch (Exception) { } } } } }
internal void FinishCommit(Directory dir) { if (PendingSegnOutput == null) { throw new InvalidOperationException("prepareCommit was not called"); } bool success = false; try { CodecUtil.WriteFooter(PendingSegnOutput); success = true; } finally { if (!success) { // Closes pendingSegnOutput & deletes partial segments_N: RollbackCommit(dir); } else { success = false; try { PendingSegnOutput.Dispose(); success = true; } finally { if (!success) { // Closes pendingSegnOutput & deletes partial segments_N: RollbackCommit(dir); } else { PendingSegnOutput = null; } } } } // NOTE: if we crash here, we have left a segments_N // file in the directory in a possibly corrupt state (if // some bytes made it to stable storage and others // didn't). But, the segments_N file includes checksum // at the end, which should catch this case. So when a // reader tries to read it, it will throw a // CorruptIndexException, which should cause the retry // logic in SegmentInfos to kick in and load the last // good (previous) segments_N-1 file. var fileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", _generation); success = false; try { dir.Sync(Collections.Singleton(fileName)); success = true; } finally { if (!success) { try { dir.DeleteFile(fileName); } catch (Exception) { // Suppress so we keep throwing the original exception } } } _lastGeneration = _generation; WriteSegmentsGen(dir, _generation); }
private void Write(Directory directory, IState state) { System.String segmentFileName = GetNextSegmentFileName(); // Always advance the generation on write: if (generation == -1) { generation = 1; } else { generation++; } var segnOutput = new ChecksumIndexOutput(directory.CreateOutput(segmentFileName, state)); bool success = false; try { segnOutput.WriteInt(CURRENT_FORMAT); // write FORMAT segnOutput.WriteLong(++version); // every write changes // the index segnOutput.WriteInt(counter); // write counter segnOutput.WriteInt(Count); // write infos for (int i = 0; i < Count; i++) { Info(i).Write(segnOutput); } segnOutput.WriteStringStringMap(userData); segnOutput.PrepareCommit(); success = true; pendingSegnOutput = segnOutput; } finally { if (!success) { // We hit an exception above; try to close the file // but suppress any exception: try { segnOutput.Close(); } catch (System.Exception) { // Suppress so we keep throwing the original exception } try { // Try not to leave a truncated segments_N file in // the index: directory.DeleteFile(segmentFileName, state); } catch (System.Exception) { // Suppress so we keep throwing the original exception } } } }
/// <summary> /// A utility for writing the <seealso cref="IndexFileNames#SEGMENTS_GEN"/> file to a /// <seealso cref="Directory"/>. /// /// <p> /// <b>NOTE:</b> this is an internal utility which is kept public so that it's /// accessible by code from other packages. You should avoid calling this /// method unless you're absolutely sure what you're doing! /// /// @lucene.internal /// </summary> public static void WriteSegmentsGen(Directory dir, long generation) { try { IndexOutput genOutput = dir.CreateOutput(IndexFileNames.SEGMENTS_GEN, IOContext.READONCE); try { genOutput.WriteInt(FORMAT_SEGMENTS_GEN_CURRENT); genOutput.WriteLong(generation); genOutput.WriteLong(generation); CodecUtil.WriteFooter(genOutput); } finally { genOutput.Dispose(); dir.Sync(Collections.Singleton(IndexFileNames.SEGMENTS_GEN)); } } catch (Exception) { // It's OK if we fail to write this file since it's // used only as one of the retry fallbacks. try { dir.DeleteFile(IndexFileNames.SEGMENTS_GEN); } catch (Exception) { // Ignore; this file is only used in a retry // fallback on init. } } }
private void Write(Directory directory) { string segmentsFileName = NextSegmentFileName; // Always advance the generation on write: if (_generation == -1) { _generation = 1; } else { _generation++; } IndexOutput segnOutput = null; bool success = false; var upgradedSIFiles = new HashSet<string>(); try { segnOutput = directory.CreateOutput(segmentsFileName, IOContext.DEFAULT); CodecUtil.WriteHeader(segnOutput, "segments", VERSION_48); segnOutput.WriteLong(Version); segnOutput.WriteInt(Counter); // write counter segnOutput.WriteInt(Size()); // write infos foreach (SegmentCommitInfo siPerCommit in segments) { SegmentInfo si = siPerCommit.Info; segnOutput.WriteString(si.Name); segnOutput.WriteString(si.Codec.Name); segnOutput.WriteLong(siPerCommit.DelGen); int delCount = siPerCommit.DelCount; if (delCount < 0 || delCount > si.DocCount) { throw new InvalidOperationException("cannot write segment: invalid docCount segment=" + si.Name + " docCount=" + si.DocCount + " delCount=" + delCount); } segnOutput.WriteInt(delCount); segnOutput.WriteLong(siPerCommit.FieldInfosGen); IDictionary<long, ISet<string>> genUpdatesFiles = siPerCommit.UpdatesFiles; segnOutput.WriteInt(genUpdatesFiles.Count); foreach (KeyValuePair<long, ISet<string>> e in genUpdatesFiles) { segnOutput.WriteLong(e.Key); segnOutput.WriteStringSet(e.Value); } Debug.Assert(si.Dir == directory); // If this segment is pre-4.x, perform a one-time // "ugprade" to write the .si file for it: string version = si.Version; if (version == null || StringHelper.VersionComparator.Compare(version, "4.0") < 0) { if (!SegmentWasUpgraded(directory, si)) { string markerFileName = IndexFileNames.SegmentFileName(si.Name, "upgraded", Lucene3xSegmentInfoFormat.UPGRADED_SI_EXTENSION); si.AddFile(markerFileName); string segmentFileName = Write3xInfo(directory, si, IOContext.DEFAULT); upgradedSIFiles.Add(segmentFileName); directory.Sync(/*Collections.singletonList(*/new[] { segmentFileName }/*)*/); // Write separate marker file indicating upgrade // is completed. this way, if there is a JVM // kill/crash, OS crash, power loss, etc. while // writing the upgraded file, the marker file // will be missing: IndexOutput @out = directory.CreateOutput(markerFileName, IOContext.DEFAULT); try { CodecUtil.WriteHeader(@out, SEGMENT_INFO_UPGRADE_CODEC, SEGMENT_INFO_UPGRADE_VERSION); } finally { @out.Dispose(); } upgradedSIFiles.Add(markerFileName); directory.Sync(/*Collections.SingletonList(*/new[] { markerFileName }/*)*/); } } } segnOutput.WriteStringStringMap(_userData); PendingSegnOutput = segnOutput; success = true; } finally { if (!success) { // We hit an exception above; try to close the file // but suppress any exception: IOUtils.CloseWhileHandlingException(segnOutput); foreach (string fileName in upgradedSIFiles) { try { directory.DeleteFile(fileName); } catch (Exception) { // Suppress so we keep throwing the original exception } } try { // Try not to leave a truncated segments_N file in // the index: directory.DeleteFile(segmentsFileName); } catch (Exception) { // Suppress so we keep throwing the original exception } } } }
public override void DeleteFile(System.String name, IState state) { dir.DeleteFile(name, null); }
public override void DeleteFile(string name) { m_input.DeleteFile(name); }
public override void DeleteFile(System.String name) { dir.DeleteFile(name); }
/// <summary>Removes an existing file in the directory. </summary> public override void DeleteFile(string name) { //perform on both dirs _realDirectory.DeleteFile(name); base.DeleteFile(name); }