public void TestDiskFull() { // test writeBytes var dir = NewMockDirectory(); dir.MaxSizeInBytes = 3; var bytes = new byte[] { 1, 2 }; IndexOutput @out = dir.CreateOutput("foo", IOContext.DEFAULT); @out.WriteBytes(bytes, bytes.Length); // first write should succeed // flush() to ensure the written bytes are not buffered and counted // against the directory size @out.Flush(); try { @out.WriteBytes(bytes, bytes.Length); Assert.Fail("should have failed on disk full"); } #pragma warning disable 168 catch (IOException e) #pragma warning restore 168 { // expected } @out.Dispose(); dir.Dispose(); // test copyBytes dir = NewMockDirectory(); dir.MaxSizeInBytes = 3; @out = dir.CreateOutput("foo", IOContext.DEFAULT); @out.CopyBytes(new ByteArrayDataInput(bytes), bytes.Length); // first copy should succeed // flush() to ensure the written bytes are not buffered and counted // against the directory size @out.Flush(); try { @out.CopyBytes(new ByteArrayDataInput(bytes), bytes.Length); Assert.Fail("should have failed on disk full"); } #pragma warning disable 168 catch (IOException e) #pragma warning restore 168 { // expected } @out.Dispose(); dir.Dispose(); }
public override void Flush() { try { base.Flush(); } finally { @delegate.Flush(); } }
public override void Flush(IDictionary <TermsHashConsumerPerThread, ICollection <TermsHashConsumerPerField> > threadsAndFields, SegmentWriteState state, IState s) { lock (this) { // NOTE: it's possible that all documents seen in this segment // hit non-aborting exceptions, in which case we will // not have yet init'd the TermVectorsWriter. This is // actually OK (unlike in the stored fields case) // because, although IieldInfos.hasVectors() will return // true, the TermVectorsReader gracefully handles // non-existence of the term vectors files. if (tvx != null) { if (state.numDocsInStore > 0) { // In case there are some final documents that we // didn't see (because they hit a non-aborting exception): Fill(state.numDocsInStore - docWriter.DocStoreOffset); } tvx.Flush(); tvd.Flush(); tvf.Flush(); } foreach (var entry in threadsAndFields) { foreach (var field in entry.Value) { TermVectorsTermsWriterPerField perField = (TermVectorsTermsWriterPerField)field; perField.termsHashPerField.Reset(); perField.ShrinkHash(); } TermVectorsTermsWriterPerThread perThread = (TermVectorsTermsWriterPerThread)entry.Key; perThread.termsHashPerThread.Reset(true); } } }
public override void Flush() { dir.MaybeThrowDeterministicException(); @delegate.Flush(); }
internal void Flush() { indexStream.Flush(); fieldsStream.Flush(); }
public override void Flush() { @delegate.Flush(); }
public override void Flush() { main.Flush(); }