/// <summary>Copy the current file content into the temporary file.</summary> /// <remarks> /// Copy the current file content into the temporary file. /// <p> /// This method saves the current file content by inserting it into the /// temporary file, so that the caller can safely append rather than replace /// the primary file. /// <p> /// This method does nothing if the current file does not exist, or exists /// but is empty. /// </remarks> /// <exception cref="System.IO.IOException"> /// the temporary file could not be written, or a read error /// occurred while reading from the current file. The lock is /// released before throwing the underlying IO exception to the /// caller. /// </exception> /// <exception cref="Sharpen.RuntimeException"> /// the temporary file could not be written. The lock is released /// before throwing the underlying exception to the caller. /// </exception> public virtual void CopyCurrentContent() { RequireLock(); try { FileInputStream fis = new FileInputStream(@ref); try { if (fsync) { FileChannel @in = fis.GetChannel(); long pos = 0; long cnt = @in.Size(); while (0 < cnt) { long r = os.GetChannel().TransferFrom(@in, pos, cnt); pos += r; cnt -= r; } } else { byte[] buf = new byte[2048]; int r; while ((r = fis.Read(buf)) >= 0) { os.Write(buf, 0, r); } } } finally { fis.Close(); } } catch (FileNotFoundException) { } catch (IOException ioe) { // Don't worry about a file that doesn't exist yet, it // conceptually has no current content to copy. // Unlock(); throw; } catch (RuntimeException ioe) { Unlock(); throw; } catch (Error ioe) { Unlock(); throw; } }
/// <exception cref="System.IO.IOException"></exception> /// <exception cref="System.IO.FileNotFoundException"></exception> /// <exception cref="Sharpen.Error"></exception> private FilePath ToTemp(MessageDigest md, int type, long len, InputStream @is) { bool delete = true; FilePath tmp = NewTempFile(); try { FileOutputStream fOut = new FileOutputStream(tmp); try { OutputStream @out = fOut; if (config.GetFSyncObjectFiles()) { @out = Channels.NewOutputStream(fOut.GetChannel()); } DeflaterOutputStream cOut = Compress(@out); DigestOutputStream dOut = new DigestOutputStream(cOut, md); WriteHeader(dOut, type, len); byte[] buf = Buffer(); while (len > 0) { int n = @is.Read(buf, 0, (int)Math.Min(len, buf.Length)); if (n <= 0) { throw ShortInput(len); } dOut.Write(buf, 0, n); len -= n; } dOut.Flush(); cOut.Finish(); } finally { if (config.GetFSyncObjectFiles()) { fOut.GetChannel().Force(true); } fOut.Close(); } delete = false; return(tmp); } finally { if (delete) { FileUtils.Delete(tmp); } } }
public virtual void TestRecoveryMode() { // edits generated by nnHelper (MiniDFSCluster), should have all op codes // binary, XML, reparsed binary string edits = nnHelper.GenerateEdits(); FileOutputStream os = new FileOutputStream(edits, true); // Corrupt the file by truncating the end FileChannel editsFile = os.GetChannel(); editsFile.Truncate(editsFile.Size() - 5); string editsParsedXml = folder.NewFile("editsRecoveredParsed.xml").GetAbsolutePath (); string editsReparsed = folder.NewFile("editsRecoveredReparsed").GetAbsolutePath(); string editsParsedXml2 = folder.NewFile("editsRecoveredParsed2.xml").GetAbsolutePath (); // Can't read the corrupted file without recovery mode NUnit.Framework.Assert.AreEqual(-1, RunOev(edits, editsParsedXml, "xml", false)); // parse to XML then back to binary NUnit.Framework.Assert.AreEqual(0, RunOev(edits, editsParsedXml, "xml", true)); NUnit.Framework.Assert.AreEqual(0, RunOev(editsParsedXml, editsReparsed, "binary" , false)); NUnit.Framework.Assert.AreEqual(0, RunOev(editsReparsed, editsParsedXml2, "xml", false)); // judgment time NUnit.Framework.Assert.IsTrue("Test round trip", FileUtils.ContentEqualsIgnoreEOL (new FilePath(editsParsedXml), new FilePath(editsParsedXml2), "UTF-8")); os.Close(); }
/// <summary>Truncate a block file</summary> /// <exception cref="System.IO.IOException"/> private long TruncateBlockFile() { lock (fds) { foreach (ReplicaInfo b in FsDatasetTestUtil.GetReplicas(fds, bpid)) { FilePath f = b.GetBlockFile(); FilePath mf = b.GetMetaFile(); // Truncate a block file that has a corresponding metadata file if (f.Exists() && f.Length() != 0 && mf.Exists()) { FileOutputStream s = null; FileChannel channel = null; try { s = new FileOutputStream(f); channel = s.GetChannel(); channel.Truncate(0); Log.Info("Truncated block file " + f.GetAbsolutePath()); return(b.GetBlockId()); } finally { IOUtils.Cleanup(Log, channel, s); } } } } return(0); }
/// <exception cref="System.Exception"/> public virtual void TestMlock() { Assume.AssumeTrue(NativeIO.IsAvailable()); FilePath TestFile = new FilePath(new FilePath(Runtime.GetProperty("test.build.data" , "build/test/data")), "testMlockFile"); int BufLen = 12289; byte[] buf = new byte[BufLen]; int bufSum = 0; for (int i = 0; i < buf.Length; i++) { buf[i] = unchecked ((byte)(i % 60)); bufSum += buf[i]; } FileOutputStream fos = new FileOutputStream(TestFile); try { fos.Write(buf); fos.GetChannel().Force(true); } finally { fos.Close(); } FileInputStream fis = null; FileChannel channel = null; try { // Map file into memory fis = new FileInputStream(TestFile); channel = fis.GetChannel(); long fileSize = channel.Size(); MappedByteBuffer mapbuf = channel.Map(FileChannel.MapMode.ReadOnly, 0, fileSize); // mlock the buffer NativeIO.POSIX.Mlock(mapbuf, fileSize); // Read the buffer int sum = 0; for (int i_1 = 0; i_1 < fileSize; i_1++) { sum += mapbuf.Get(i_1); } Assert.Equal("Expected sums to be equal", bufSum, sum); // munmap the buffer, which also implicitly unlocks it NativeIO.POSIX.Munmap(mapbuf); } finally { if (channel != null) { channel.Close(); } if (fis != null) { fis.Close(); } } }
/// <exception cref="System.IO.IOException"/> public override bool Truncate(Path f, long newLength) { FileStatus status = GetFileStatus(f); if (status == null) { throw new FileNotFoundException("File " + f + " not found"); } if (status.IsDirectory()) { throw new IOException("Cannot truncate a directory (=" + f + ")"); } long oldLength = status.GetLen(); if (newLength > oldLength) { throw new ArgumentException("Cannot truncate to a larger file size. Current size: " + oldLength + ", truncate size: " + newLength + "."); } using (FileOutputStream @out = new FileOutputStream(PathToFile(f), true)) { try { @out.GetChannel().Truncate(newLength); } catch (IOException e) { throw new FSError(e); } } return(true); }
/// <exception cref="System.IO.IOException"></exception> private NGit.Storage.File.ReflogWriter Log(string refName, byte[] rec) { FilePath log = LogFor(refName); bool write = forceWrite || (IsLogAllRefUpdates() && ShouldAutoCreateLog(refName)) || log.IsFile(); if (!write) { return(this); } WriteConfig wc = GetRepository().GetConfig().Get(WriteConfig.KEY); FileOutputStream @out; try { @out = new FileOutputStream(log, true); } catch (FileNotFoundException err) { FilePath dir = log.GetParentFile(); if (dir.Exists()) { throw; } if (!dir.Mkdirs() && !dir.IsDirectory()) { throw new IOException(MessageFormat.Format(JGitText.Get().cannotCreateDirectory, dir)); } @out = new FileOutputStream(log, true); } try { if (wc.GetFSyncRefFiles()) { FileChannel fc = @out.GetChannel(); ByteBuffer buf = ByteBuffer.Wrap(rec); while (0 < buf.Remaining()) { fc.Write(buf); } fc.Force(true); } else { @out.Write(rec); } } finally { @out.Close(); } return(this); }
/// <exception cref="System.IO.IOException"></exception> /// <exception cref="System.IO.FileNotFoundException"></exception> private FilePath ToTemp(int type, byte[] buf, int pos, int len) { bool delete = true; FilePath tmp = NewTempFile(); try { FileOutputStream fOut = new FileOutputStream(tmp); try { OutputStream @out = fOut; if (config.GetFSyncObjectFiles()) { @out = Channels.NewOutputStream(fOut.GetChannel()); } DeflaterOutputStream cOut = Compress(@out); WriteHeader(cOut, type, len); cOut.Write(buf, pos, len); cOut.Finish(); } finally { if (config.GetFSyncObjectFiles()) { fOut.GetChannel().Force(true); } fOut.Close(); } delete = false; return(tmp); } finally { if (delete) { FileUtils.Delete(tmp); } } }
/// <exception cref="System.IO.IOException"/> private void SaveInternal(FileOutputStream fout, FSImageCompression compression, string filePath) { StartupProgress prog = NameNode.GetStartupProgress(); MessageDigest digester = MD5Hash.GetDigester(); underlyingOutputStream = new DigestOutputStream(new BufferedOutputStream(fout), digester ); underlyingOutputStream.Write(FSImageUtil.MagicHeader); fileChannel = fout.GetChannel(); FsImageProto.FileSummary.Builder b = FsImageProto.FileSummary.NewBuilder().SetOndiskVersion (FSImageUtil.FileVersion).SetLayoutVersion(NameNodeLayoutVersion.CurrentLayoutVersion ); codec = compression.GetImageCodec(); if (codec != null) { b.SetCodec(codec.GetType().GetCanonicalName()); sectionOutputStream = codec.CreateOutputStream(underlyingOutputStream); } else { sectionOutputStream = underlyingOutputStream; } SaveNameSystemSection(b); // Check for cancellation right after serializing the name system section. // Some unit tests, such as TestSaveNamespace#testCancelSaveNameSpace // depends on this behavior. context.CheckCancelled(); Step step = new Step(StepType.Inodes, filePath); prog.BeginStep(Phase.SavingCheckpoint, step); SaveInodes(b); SaveSnapshots(b); prog.EndStep(Phase.SavingCheckpoint, step); step = new Step(StepType.DelegationTokens, filePath); prog.BeginStep(Phase.SavingCheckpoint, step); SaveSecretManagerSection(b); prog.EndStep(Phase.SavingCheckpoint, step); step = new Step(StepType.CachePools, filePath); prog.BeginStep(Phase.SavingCheckpoint, step); SaveCacheManagerSection(b); prog.EndStep(Phase.SavingCheckpoint, step); SaveStringTableSection(b); // We use the underlyingOutputStream to write the header. Therefore flush // the buffered stream (which is potentially compressed) first. FlushSectionOutputStream(); FsImageProto.FileSummary summary = ((FsImageProto.FileSummary)b.Build()); SaveFileSummary(underlyingOutputStream, summary); underlyingOutputStream.Close(); savedDigest = new MD5Hash(digester.Digest()); }
/// <exception cref="System.IO.IOException"/> internal void Save(FilePath file, FSImageCompression compression) { FileOutputStream fout = new FileOutputStream(file); fileChannel = fout.GetChannel(); try { SaveInternal(fout, compression, file.GetAbsolutePath()); } finally { fout.Close(); } }
/// <exception cref="System.IO.IOException"/> private void CopyPartOfFile(FilePath src, FilePath dest) { FileInputStream @in = null; FileOutputStream @out = null; int MaxBytes = 700; try { @in = new FileInputStream(src); @out = new FileOutputStream(dest); @in.GetChannel().TransferTo(0, MaxBytes, @out.GetChannel()); } finally { IOUtils.Cleanup(null, @in); IOUtils.Cleanup(null, @out); } }
/// <summary>Writing the data into a local file.</summary> /// <remarks> /// Writing the data into a local file. After the writing, if /// <see cref="dataState"/> /// is still ALLOW_DUMP, set /// <see cref="data"/> /// to null and set /// <see cref="dataState"/> /// to DUMPED. /// </remarks> /// <exception cref="System.IO.IOException"/> internal virtual long DumpData(FileOutputStream dumpOut, RandomAccessFile raf) { if (dataState != WriteCtx.DataState.AllowDump) { if (Log.IsTraceEnabled()) { Log.Trace("No need to dump with status(replied,dataState):" + "(" + replied + "," + dataState + ")"); } return(0); } // Resized write should not allow dump Preconditions.CheckState(originalCount == InvalidOriginalCount); this.raf = raf; dumpFileOffset = dumpOut.GetChannel().Position(); dumpOut.Write(((byte[])data.Array()), 0, count); if (Log.IsDebugEnabled()) { Log.Debug("After dump, new dumpFileOffset:" + dumpFileOffset); } // it is possible that while we dump the data, the data is also being // written back to HDFS. After dump, if the writing back has not finished // yet, we change its flag to DUMPED and set the data to null. Otherwise // this WriteCtx instance should have been removed from the buffer. if (dataState == WriteCtx.DataState.AllowDump) { lock (this) { if (dataState == WriteCtx.DataState.AllowDump) { data = null; dataState = WriteCtx.DataState.Dumped; return(count); } } } return(0); }
/// <summary> /// Unbuffered file copy from src to dst without tainting OS buffer cache /// In POSIX platform: /// It uses FileChannel#transferTo() which internally attempts /// unbuffered IO on OS with native sendfile64() support and falls back to /// buffered IO otherwise. /// </summary> /// <remarks> /// Unbuffered file copy from src to dst without tainting OS buffer cache /// In POSIX platform: /// It uses FileChannel#transferTo() which internally attempts /// unbuffered IO on OS with native sendfile64() support and falls back to /// buffered IO otherwise. /// It minimizes the number of FileChannel#transferTo call by passing the the /// src file size directly instead of a smaller size as the 3rd parameter. /// This saves the number of sendfile64() system call when native sendfile64() /// is supported. In the two fall back cases where sendfile is not supported, /// FileChannle#transferTo already has its own batching of size 8 MB and 8 KB, /// respectively. /// In Windows Platform: /// It uses its own native wrapper of CopyFileEx with COPY_FILE_NO_BUFFERING /// flag, which is supported on Windows Server 2008 and above. /// Ideally, we should use FileChannel#transferTo() across both POSIX and Windows /// platform. Unfortunately, the wrapper(Java_sun_nio_ch_FileChannelImpl_transferTo0) /// used by FileChannel#transferTo for unbuffered IO is not implemented on Windows. /// Based on OpenJDK 6/7/8 source code, Java_sun_nio_ch_FileChannelImpl_transferTo0 /// on Windows simply returns IOS_UNSUPPORTED. /// Note: This simple native wrapper does minimal parameter checking before copy and /// consistency check (e.g., size) after copy. /// It is recommended to use wrapper function like /// the Storage#nativeCopyFileUnbuffered() function in hadoop-hdfs with pre/post copy /// checks. /// </remarks> /// <param name="src">The source path</param> /// <param name="dst">The destination path</param> /// <exception cref="System.IO.IOException"/> public static void CopyFileUnbuffered(FilePath src, FilePath dst) { if (nativeLoaded && Shell.Windows) { CopyFileUnbuffered0(src.GetAbsolutePath(), dst.GetAbsolutePath()); } else { FileInputStream fis = null; FileOutputStream fos = null; FileChannel input = null; FileChannel output = null; try { fis = new FileInputStream(src); fos = new FileOutputStream(dst); input = fis.GetChannel(); output = fos.GetChannel(); long remaining = input.Size(); long position = 0; long transferred = 0; while (remaining > 0) { transferred = input.TransferTo(position, remaining, output); remaining -= transferred; position += transferred; } } finally { IOUtils.Cleanup(Log, output); IOUtils.Cleanup(Log, fos); IOUtils.Cleanup(Log, input); IOUtils.Cleanup(Log, fis); } } }
/// <exception cref="System.IO.IOException"></exception> private void WriteIdx() { IList <PackedObjectInfo> list = GetSortedObjectList(null); FileOutputStream os = new FileOutputStream(tmpIdx); try { PackIndexWriter iw; if (indexVersion <= 0) { iw = PackIndexWriter.CreateOldestPossible(os, list); } else { iw = PackIndexWriter.CreateVersion(os, indexVersion); } iw.Write(list, packHash); os.GetChannel().Force(true); } finally { os.Close(); } }
/// <summary>Write content of index to disk.</summary> /// <remarks>Write content of index to disk.</remarks> /// <exception cref="System.IO.IOException">System.IO.IOException</exception> public virtual void Write() { CheckWriteOk(); FilePath tmpIndex = new FilePath(cacheFile.GetAbsoluteFile() + ".tmp"); FilePath Lock = new FilePath(cacheFile.GetAbsoluteFile() + ".lock"); if (!Lock.CreateNewFile()) { throw new IOException(JGitText.Get().indexFileIsInUse); } try { FileOutputStream fileOutputStream = new FileOutputStream(tmpIndex); FileChannel fc = fileOutputStream.GetChannel(); ByteBuffer buf = ByteBuffer.Allocate(4096); MessageDigest newMessageDigest = Constants.NewMessageDigest(); header = new GitIndex.Header(entries); header.Write(buf); buf.Flip(); newMessageDigest.Update(((byte[])buf.Array()), buf.ArrayOffset(), buf.Limit()); fc.Write(buf); buf.Flip(); buf.Clear(); for (Iterator i = entries.Values.Iterator(); i.HasNext();) { GitIndex.Entry e = (GitIndex.Entry)i.Next(); e.Write(buf); buf.Flip(); newMessageDigest.Update(((byte[])buf.Array()), buf.ArrayOffset(), buf.Limit()); fc.Write(buf); buf.Flip(); buf.Clear(); } buf.Put(newMessageDigest.Digest()); buf.Flip(); fc.Write(buf); fc.Close(); fileOutputStream.Close(); if (cacheFile.Exists()) { if (db.FileSystem.RetryFailedLockFileCommit()) { // file deletion fails on windows if another // thread is reading the file concurrently // So let's try 10 times... bool deleted = false; for (int i_1 = 0; i_1 < 10; i_1++) { if (cacheFile.Delete()) { deleted = true; break; } try { Sharpen.Thread.Sleep(100); } catch (Exception) { } } // ignore if (!deleted) { throw new IOException(JGitText.Get().couldNotRenameDeleteOldIndex); } } else { if (!cacheFile.Delete()) { throw new IOException(JGitText.Get().couldNotRenameDeleteOldIndex); } } } if (!tmpIndex.RenameTo(cacheFile)) { throw new IOException(JGitText.Get().couldNotRenameTemporaryIndexFileToIndex); } changed = false; statDirty = false; lastCacheTime = cacheFile.LastModified(); db.FireEvent(new IndexChangedEvent()); } finally { if (!Lock.Delete()) { throw new IOException(JGitText.Get().couldNotDeleteLockFileShouldNotHappen); } if (tmpIndex.Exists() && !tmpIndex.Delete()) { throw new IOException(JGitText.Get().couldNotDeleteTemporaryIndexFileShouldNotHappen ); } } }
/// <exception cref="System.IO.IOException"/> public virtual ReplicaOutputStreams CreateStreams(bool isCreate, DataChecksum requestedChecksum ) { // ReplicaInPipelineInterface FilePath blockFile = GetBlockFile(); FilePath metaFile = GetMetaFile(); if (DataNode.Log.IsDebugEnabled()) { DataNode.Log.Debug("writeTo blockfile is " + blockFile + " of size " + blockFile. Length()); DataNode.Log.Debug("writeTo metafile is " + metaFile + " of size " + metaFile.Length ()); } long blockDiskSize = 0L; long crcDiskSize = 0L; // the checksum that should actually be used -- this // may differ from requestedChecksum for appends. DataChecksum checksum; RandomAccessFile metaRAF = new RandomAccessFile(metaFile, "rw"); if (!isCreate) { // For append or recovery, we must enforce the existing checksum. // Also, verify that the file has correct lengths, etc. bool checkedMeta = false; try { BlockMetadataHeader header = BlockMetadataHeader.ReadHeader(metaRAF); checksum = header.GetChecksum(); if (checksum.GetBytesPerChecksum() != requestedChecksum.GetBytesPerChecksum()) { throw new IOException("Client requested checksum " + requestedChecksum + " when appending to an existing block " + "with different chunk size: " + checksum); } int bytesPerChunk = checksum.GetBytesPerChecksum(); int checksumSize = checksum.GetChecksumSize(); blockDiskSize = bytesOnDisk; crcDiskSize = BlockMetadataHeader.GetHeaderSize() + (blockDiskSize + bytesPerChunk - 1) / bytesPerChunk * checksumSize; if (blockDiskSize > 0 && (blockDiskSize > blockFile.Length() || crcDiskSize > metaFile .Length())) { throw new IOException("Corrupted block: " + this); } checkedMeta = true; } finally { if (!checkedMeta) { // clean up in case of exceptions. IOUtils.CloseStream(metaRAF); } } } else { // for create, we can use the requested checksum checksum = requestedChecksum; } FileOutputStream blockOut = null; FileOutputStream crcOut = null; try { blockOut = new FileOutputStream(new RandomAccessFile(blockFile, "rw").GetFD()); crcOut = new FileOutputStream(metaRAF.GetFD()); if (!isCreate) { blockOut.GetChannel().Position(blockDiskSize); crcOut.GetChannel().Position(crcDiskSize); } return(new ReplicaOutputStreams(blockOut, crcOut, checksum, GetVolume().IsTransientStorage ())); } catch (IOException e) { IOUtils.CloseStream(blockOut); IOUtils.CloseStream(metaRAF); throw; } }