public void TestDiskFull() { // test writeBytes var dir = NewMockDirectory(); dir.MaxSizeInBytes = 3; var bytes = new byte[] { 1, 2 }; IndexOutput @out = dir.CreateOutput("foo", IOContext.DEFAULT); @out.WriteBytes(bytes, bytes.Length); // first write should succeed // flush() to ensure the written bytes are not buffered and counted // against the directory size @out.Flush(); try { @out.WriteBytes(bytes, bytes.Length); Assert.Fail("should have failed on disk full"); } #pragma warning disable 168 catch (IOException e) #pragma warning restore 168 { // expected } @out.Dispose(); dir.Dispose(); // test copyBytes dir = NewMockDirectory(); dir.MaxSizeInBytes = 3; @out = dir.CreateOutput("foo", IOContext.DEFAULT); @out.CopyBytes(new ByteArrayDataInput(bytes), bytes.Length); // first copy should succeed // flush() to ensure the written bytes are not buffered and counted // against the directory size @out.Flush(); try { @out.CopyBytes(new ByteArrayDataInput(bytes), bytes.Length); Assert.Fail("should have failed on disk full"); } #pragma warning disable 168 catch (IOException e) #pragma warning restore 168 { // expected } @out.Dispose(); dir.Dispose(); }
private void CheckDiskFull(byte[] b, int offset, DataInput @in, long len) { long freeSpace = dir.maxSize == 0 ? 0 : dir.maxSize - dir.GetSizeInBytes(); long realUsage = 0; // Enforce disk full: if (dir.maxSize != 0 && freeSpace <= len) { // Compute the real disk free. this will greatly slow // down our test but makes it more accurate: realUsage = dir.GetRecomputedActualSizeInBytes(); freeSpace = dir.maxSize - realUsage; } if (dir.maxSize != 0 && freeSpace <= len) { if (freeSpace > 0) { realUsage += freeSpace; if (b != null) { @delegate.WriteBytes(b, offset, (int)freeSpace); } else { @delegate.CopyBytes(@in, len); } } if (realUsage > dir.maxUsedSize) { dir.maxUsedSize = realUsage; } string message = "fake disk full at " + dir.GetRecomputedActualSizeInBytes() + " bytes when writing " + name + " (file length=" + @delegate.Length; if (freeSpace > 0) { message += "; wrote " + freeSpace + " of " + len + " bytes"; } message += ")"; // LUCENENET TODO: Finish implementation /*if (LuceneTestCase.VERBOSE) * { * Console.WriteLine(Thread.CurrentThread.Name + ": MDW: now throw fake disk full"); * (new Exception()).printStackTrace(System.out); * }*/ throw new System.IO.IOException(message); } }
/// <summary> /// Copy the contents of the file with specified extension into the provided /// output stream. /// </summary> private static long CopyFileEntry(IndexOutput dataOut, FileEntry fileEntry) // LUCENENET: CA1822: Mark members as static { IndexInput @is = fileEntry.Dir.OpenInput(fileEntry.File, IOContext.READ_ONCE); bool success = false; try { long startPtr = dataOut.GetFilePointer(); long length = fileEntry.Length; dataOut.CopyBytes(@is, length); // Verify that the output length diff is equal to original file long endPtr = dataOut.GetFilePointer(); long diff = endPtr - startPtr; if (diff != length) { throw new IOException("Difference in the output file offsets " + diff + " does not match the original file length " + length); } fileEntry.Offset = startPtr; success = true; return(length); } finally { if (success) { IOUtils.Dispose(@is); // copy successful - delete file fileEntry.Dir.DeleteFile(fileEntry.File); } else { IOUtils.DisposeWhileHandlingException(@is); } } }
public void TestDiskFull() { byte[] bytes = new byte[] { 1, 2 }; // test writeBytes using (MockDirectoryWrapper dir = NewMockDirectory()) { dir.MaxSizeInBytes = 3; using (IndexOutput @out = dir.CreateOutput("foo", IOContext.DEFAULT)) { @out.WriteBytes(bytes, bytes.Length); // first write should succeed // close() to ensure the written bytes are not buffered and counted // against the directory size } // @out.close(); using (IndexOutput @out = dir.CreateOutput("bar", IOContext.DEFAULT)) { try { @out.WriteBytes(bytes, bytes.Length); fail("should have failed on disk full"); } #pragma warning disable 168 catch (Exception e) #pragma warning restore 168 { // expected } } // @out.close(); } // dir.close(); // test copyBytes using (MockDirectoryWrapper dir = NewMockDirectory()) { dir.MaxSizeInBytes = 3; using (IndexOutput @out = dir.CreateOutput("foo", IOContext.DEFAULT)) { @out.CopyBytes(new ByteArrayDataInput(bytes), bytes.Length); // first copy should succeed // close() to ensure the written bytes are not buffered and counted // against the directory size } // @out.close(); using (IndexOutput @out = dir.CreateOutput("bar", IOContext.DEFAULT)) { try { @out.CopyBytes(new ByteArrayDataInput(bytes), bytes.Length); fail("should have failed on disk full"); } #pragma warning disable 168 catch (Exception e) #pragma warning restore 168 { // expected } } // @out.close(); } // dir.close(); }
public override void Run() { try { Dst.CopyBytes(Src, Src.Length - 100); Dst.Dispose(); } catch (IOException ex) { throw new Exception(ex.ToString(), ex); } }
public override void Run() { try { Dst.CopyBytes(Src, Src.Length() - 100); Dst.Dispose(); } catch (IOException ex) { throw new Exception(ex.Message, ex); } }
public override void Run() { try { dst.CopyBytes(src, src.Length - 100); dst.Dispose(); } catch (Exception ex) when(ex.IsIOException()) { throw RuntimeException.Create(ex); } }
/// <summary>Bulk write a contiguous series of documents. The /// lengths array is the length (in bytes) of each raw /// document. The stream IndexInput is the /// fieldsStream from which we should bulk-copy all /// bytes. /// </summary> internal void AddRawDocuments(IndexInput stream, int[] lengths, int numDocs, IState state) { long position = fieldsStream.FilePointer; long start = position; for (int i = 0; i < numDocs; i++) { indexStream.WriteLong(position); position += lengths[i]; } fieldsStream.CopyBytes(stream, position - start, state); System.Diagnostics.Debug.Assert(fieldsStream.FilePointer == position); }
public virtual void TestCopyBytesWithThreads() { int datalen = TestUtil.NextInt(Random(), 101, 10000); byte[] data = new byte[datalen]; Random().NextBytes(data); Directory d = NewDirectory(); IndexOutput output = d.CreateOutput("data", IOContext.DEFAULT); output.WriteBytes(data, 0, datalen); output.Dispose(); IndexInput input = d.OpenInput("data", IOContext.DEFAULT); IndexOutput outputHeader = d.CreateOutput("header", IOContext.DEFAULT); // copy our 100-byte header outputHeader.CopyBytes(input, 100); outputHeader.Dispose(); // now make N copies of the remaining bytes CopyThread[] copies = new CopyThread[10]; for (int i = 0; i < copies.Length; i++) { copies[i] = new CopyThread((IndexInput)input.Clone(), d.CreateOutput("copy" + i, IOContext.DEFAULT)); } for (int i = 0; i < copies.Length; i++) { copies[i].Start(); } for (int i = 0; i < copies.Length; i++) { copies[i].Join(); } for (int i = 0; i < copies.Length; i++) { IndexInput copiedData = d.OpenInput("copy" + i, IOContext.DEFAULT); byte[] dataCopy = new byte[datalen]; System.Buffer.BlockCopy(data, 0, dataCopy, 0, 100); // copy the header for easy testing copiedData.ReadBytes(dataCopy, 100, datalen - 100); Assert.AreEqual(data, dataCopy); copiedData.Dispose(); } input.Dispose(); d.Dispose(); }
private void UnCache(string fileName) { // Only let one thread uncache at a time; this only // happens during commit() or close(): UninterruptableMonitor.Enter(uncacheLock); try { if (VERBOSE) { Console.WriteLine("nrtdir.unCache name=" + fileName); } #pragma warning disable 612, 618 if (!cache.FileExists(fileName)) #pragma warning restore 612, 618 { // Another thread beat us... return; } IOContext context = IOContext.DEFAULT; IndexOutput @out = @delegate.CreateOutput(fileName, context); IndexInput @in = null; try { @in = cache.OpenInput(fileName, context); @out.CopyBytes(@in, @in.Length); } finally { IOUtils.Dispose(@in, @out); } // Lock order: uncacheLock -> this UninterruptableMonitor.Enter(this); try { // Must sync here because other sync methods have // if (cache.fileExists(name)) { ... } else { ... }: cache.DeleteFile(fileName); } finally { UninterruptableMonitor.Exit(this); } } finally { UninterruptableMonitor.Exit(uncacheLock); } }
/// <summary> Do a bulk copy of numDocs documents from reader to our /// streams. This is used to expedite merging, if the /// field numbers are congruent. /// </summary> internal void AddRawDocuments(TermVectorsReader reader, int[] tvdLengths, int[] tvfLengths, int numDocs, IState state) { long tvdPosition = tvd.FilePointer; long tvfPosition = tvf.FilePointer; long tvdStart = tvdPosition; long tvfStart = tvfPosition; for (int i = 0; i < numDocs; i++) { tvx.WriteLong(tvdPosition); tvdPosition += tvdLengths[i]; tvx.WriteLong(tvfPosition); tvfPosition += tvfLengths[i]; } tvd.CopyBytes(reader.GetTvdStream(), tvdPosition - tvdStart, state); tvf.CopyBytes(reader.GetTvfStream(), tvfPosition - tvfStart, state); System.Diagnostics.Debug.Assert(tvd.FilePointer == tvdPosition); System.Diagnostics.Debug.Assert(tvf.FilePointer == tvfPosition); }
/// <summary> /// Copies the file <paramref name="src"/> to <seealso cref="Directory"/> <paramref name="to"/> under the new /// file name <paramref name="dest"/>. /// <para/> /// If you want to copy the entire source directory to the destination one, you /// can do so like this: /// /// <code> /// Directory to; // the directory to copy to /// foreach (string file in dir.ListAll()) { /// dir.Copy(to, file, newFile, IOContext.DEFAULT); // newFile can be either file, or a new name /// } /// </code> /// <para/> /// <b>NOTE:</b> this method does not check whether <paramref name="dest"/> exist and will /// overwrite it if it does. /// </summary> public virtual void Copy(Directory to, string src, string dest, IOContext context) { IndexOutput os = null; IndexInput @is = null; IOException priorException = null; try { os = to.CreateOutput(dest, context); @is = OpenInput(src, context); os.CopyBytes(@is, @is.Length); } catch (IOException ioe) { priorException = ioe; } finally { bool success = false; try { IOUtils.DisposeWhileHandlingException(priorException, os, @is); success = true; } finally { if (!success) { try { to.DeleteFile(dest); } catch (Exception) { } } } } }
private void UnCache(string fileName) { // Only let one thread uncache at a time; this only // happens during commit() or close(): lock (UncacheLock) { if (VERBOSE) { Console.WriteLine("nrtdir.unCache name=" + fileName); } if (!Cache.FileExists(fileName)) { // Another thread beat us... return; } IOContext context = IOContext.DEFAULT; IndexOutput @out = @delegate.CreateOutput(fileName, context); IndexInput @in = null; try { @in = Cache.OpenInput(fileName, context); @out.CopyBytes(@in, @in.Length()); } finally { IOUtils.Close(@in, @out); } // Lock order: uncacheLock -> this lock (this) { // Must sync here because other sync methods have // if (cache.fileExists(name)) { ... } else { ... }: Cache.DeleteFile(fileName); } } }
public virtual void TestCopyBytesMem() { int num = AtLeast(10); for (int iter = 0; iter < num; iter++) { Directory dir = NewDirectory(); if (VERBOSE) { Console.WriteLine("TEST: iter=" + iter + " dir=" + dir); } // make random file IndexOutput @out = dir.CreateOutput("test", NewIOContext(Random())); var bytes = new byte[TestUtil.NextInt(Random(), 1, 77777)]; int size = TestUtil.NextInt(Random(), 1, 1777777); int upto = 0; int byteUpto = 0; while (upto < size) { bytes[byteUpto++] = Value(upto); upto++; if (byteUpto == bytes.Length) { @out.WriteBytes(bytes, 0, bytes.Length); byteUpto = 0; } } @out.WriteBytes(bytes, 0, byteUpto); Assert.AreEqual(size, @out.GetFilePointer()); @out.Dispose(); Assert.AreEqual(size, dir.FileLength("test")); // copy from test -> test2 IndexInput @in = dir.OpenInput("test", NewIOContext(Random())); @out = dir.CreateOutput("test2", NewIOContext(Random())); upto = 0; while (upto < size) { if (Random().NextBoolean()) { @out.WriteByte(@in.ReadByte()); upto++; } else { int chunk = Math.Min(TestUtil.NextInt(Random(), 1, bytes.Length), size - upto); @out.CopyBytes(@in, chunk); upto += chunk; } } Assert.AreEqual(size, upto); @out.Dispose(); @in.Dispose(); // verify IndexInput in2 = dir.OpenInput("test2", NewIOContext(Random())); upto = 0; while (upto < size) { if (Random().NextBoolean()) { var v = in2.ReadByte(); Assert.AreEqual(Value(upto), v); upto++; } else { int limit = Math.Min(TestUtil.NextInt(Random(), 1, bytes.Length), size - upto); in2.ReadBytes(bytes, 0, limit); for (int byteIdx = 0; byteIdx < limit; byteIdx++) { Assert.AreEqual(Value(upto), bytes[byteIdx]); upto++; } } } in2.Dispose(); dir.DeleteFile("test"); dir.DeleteFile("test2"); dir.Dispose(); } }
/// <summary> /// Copy the contents of the file with specified extension into the provided /// output stream. /// </summary> private long CopyFileEntry(IndexOutput dataOut, FileEntry fileEntry) { IndexInput @is = fileEntry.Dir.OpenInput(fileEntry.File, IOContext.READONCE); bool success = false; try { long startPtr = dataOut.FilePointer; long length = fileEntry.Length; dataOut.CopyBytes(@is, length); // Verify that the output length diff is equal to original file long endPtr = dataOut.FilePointer; long diff = endPtr - startPtr; if (diff != length) { throw new System.IO.IOException("Difference in the output file offsets " + diff + " does not match the original file length " + length); } fileEntry.Offset = startPtr; success = true; return length; } finally { if (success) { IOUtils.Close(@is); // copy successful - delete file fileEntry.Dir.DeleteFile(fileEntry.File); } else { IOUtils.CloseWhileHandlingException(@is); } } }