/// <summary>Reads bytes into a buffer until EOF or the buffer's limit is reached</summary> /// <exception cref="System.IO.IOException"/> private int FillBuffer(FileInputStream stream, ByteBuffer buf) { TraceScope scope = Trace.StartSpan("BlockReaderLocalLegacy#fillBuffer(" + blockId + ")", Sampler.Never); try { int bytesRead = stream.GetChannel().Read(buf); if (bytesRead < 0) { //EOF return(bytesRead); } while (buf.Remaining() > 0) { int n = stream.GetChannel().Read(buf); if (n < 0) { //EOF return(bytesRead); } bytesRead += n; } return(bytesRead); } finally { scope.Close(); } }
/// <exception cref="System.Exception"/> public virtual void TestMlock() { Assume.AssumeTrue(NativeIO.IsAvailable()); FilePath TestFile = new FilePath(new FilePath(Runtime.GetProperty("test.build.data" , "build/test/data")), "testMlockFile"); int BufLen = 12289; byte[] buf = new byte[BufLen]; int bufSum = 0; for (int i = 0; i < buf.Length; i++) { buf[i] = unchecked ((byte)(i % 60)); bufSum += buf[i]; } FileOutputStream fos = new FileOutputStream(TestFile); try { fos.Write(buf); fos.GetChannel().Force(true); } finally { fos.Close(); } FileInputStream fis = null; FileChannel channel = null; try { // Map file into memory fis = new FileInputStream(TestFile); channel = fis.GetChannel(); long fileSize = channel.Size(); MappedByteBuffer mapbuf = channel.Map(FileChannel.MapMode.ReadOnly, 0, fileSize); // mlock the buffer NativeIO.POSIX.Mlock(mapbuf, fileSize); // Read the buffer int sum = 0; for (int i_1 = 0; i_1 < fileSize; i_1++) { sum += mapbuf.Get(i_1); } Assert.Equal("Expected sums to be equal", bufSum, sum); // munmap the buffer, which also implicitly unlocks it NativeIO.POSIX.Munmap(mapbuf); } finally { if (channel != null) { channel.Close(); } if (fis != null) { fis.Close(); } } }
/// <summary> /// Return the length of bytes in the given file after subtracting /// the trailer of 0xFF (OP_INVALID)s. /// </summary> /// <remarks> /// Return the length of bytes in the given file after subtracting /// the trailer of 0xFF (OP_INVALID)s. /// This seeks to the end of the file and reads chunks backwards until /// it finds a non-0xFF byte. /// </remarks> /// <exception cref="System.IO.IOException">if the file cannot be read</exception> private static long GetNonTrailerLength(FilePath f) { int chunkSizeToRead = 256 * 1024; FileInputStream fis = new FileInputStream(f); try { byte[] buf = new byte[chunkSizeToRead]; FileChannel fc = fis.GetChannel(); long size = fc.Size(); long pos = size - (size % chunkSizeToRead); while (pos >= 0) { fc.Position(pos); int readLen = (int)Math.Min(size - pos, chunkSizeToRead); IOUtils.ReadFully(fis, buf, 0, readLen); for (int i = readLen - 1; i >= 0; i--) { if (buf[i] != FSEditLogOpCodes.OpInvalid.GetOpCode()) { return(pos + i + 1); } } // + 1 since we count this byte! pos -= chunkSizeToRead; } return(0); } finally { fis.Close(); } }
/// <summary>Read an entire local file into memory as a byte array.</summary> /// <remarks>Read an entire local file into memory as a byte array.</remarks> /// <param name="path">location of the file to read.</param> /// <param name="max"> /// maximum number of bytes to read, if the file is larger than /// this limit an IOException is thrown. /// </param> /// <returns>complete contents of the requested local file.</returns> /// <exception cref="System.IO.FileNotFoundException">the file does not exist.</exception> /// <exception cref="System.IO.IOException">the file exists, but its contents cannot be read. /// </exception> public static byte[] ReadFully(FilePath path, int max) { FileInputStream @in = new FileInputStream(path); try { long sz = @in.GetChannel().Size(); if (sz > max) { throw new IOException(MessageFormat.Format(JGitText.Get().fileIsTooLarge, path)); } byte[] buf = new byte[(int)sz]; IOUtil.ReadFully(@in, buf, 0, buf.Length); return(buf); } finally { try { @in.Close(); } catch (IOException) { } } }
Load(long length, FileInputStream blockIn, FileInputStream metaIn, string blockFileName ) { Org.Apache.Hadoop.Hdfs.Server.Datanode.Fsdataset.Impl.MappableBlock mappableBlock = null; MappedByteBuffer mmap = null; FileChannel blockChannel = null; try { blockChannel = blockIn.GetChannel(); if (blockChannel == null) { throw new IOException("Block InputStream has no FileChannel."); } mmap = blockChannel.Map(FileChannel.MapMode.ReadOnly, 0, length); NativeIO.POSIX.GetCacheManipulator().Mlock(blockFileName, mmap, length); VerifyChecksum(length, metaIn, blockChannel, blockFileName); mappableBlock = new Org.Apache.Hadoop.Hdfs.Server.Datanode.Fsdataset.Impl.MappableBlock (mmap, length); } finally { IOUtils.CloseQuietly(blockChannel); if (mappableBlock == null) { if (mmap != null) { NativeIO.POSIX.Munmap(mmap); } } } // unmapping also unlocks return(mappableBlock); }
/// <exception cref="NGit.Errors.MissingObjectException"></exception> /// <exception cref="System.IO.IOException"></exception> public override ObjectStream OpenStream() { FileInputStream @in = new FileInputStream(p); long sz = @in.GetChannel().Size(); int type = this.GetType(); BufferedInputStream b = new BufferedInputStream(@in); return(new ObjectStream.Filter(type, sz, b)); }
/// <summary>Copy the current file content into the temporary file.</summary> /// <remarks> /// Copy the current file content into the temporary file. /// <p> /// This method saves the current file content by inserting it into the /// temporary file, so that the caller can safely append rather than replace /// the primary file. /// <p> /// This method does nothing if the current file does not exist, or exists /// but is empty. /// </remarks> /// <exception cref="System.IO.IOException"> /// the temporary file could not be written, or a read error /// occurred while reading from the current file. The lock is /// released before throwing the underlying IO exception to the /// caller. /// </exception> /// <exception cref="Sharpen.RuntimeException"> /// the temporary file could not be written. The lock is released /// before throwing the underlying exception to the caller. /// </exception> public virtual void CopyCurrentContent() { RequireLock(); try { FileInputStream fis = new FileInputStream(@ref); try { if (fsync) { FileChannel @in = fis.GetChannel(); long pos = 0; long cnt = @in.Size(); while (0 < cnt) { long r = os.GetChannel().TransferFrom(@in, pos, cnt); pos += r; cnt -= r; } } else { byte[] buf = new byte[2048]; int r; while ((r = fis.Read(buf)) >= 0) { os.Write(buf, 0, r); } } } finally { fis.Close(); } } catch (FileNotFoundException) { } catch (IOException ioe) { // Don't worry about a file that doesn't exist yet, it // conceptually has no current content to copy. // Unlock(); throw; } catch (RuntimeException ioe) { Unlock(); throw; } catch (Error ioe) { Unlock(); throw; } }
/// <summary>Calculate the usable size of a shared memory segment.</summary> /// <remarks> /// Calculate the usable size of a shared memory segment. /// We round down to a multiple of the slot size and do some validation. /// </remarks> /// <param name="stream">The stream we're using.</param> /// <returns>The usable size of the shared memory segment.</returns> /// <exception cref="System.IO.IOException"/> private static int GetUsableLength(FileInputStream stream) { int intSize = Ints.CheckedCast(stream.GetChannel().Size()); int slots = intSize / BytesPerSlot; if (slots == 0) { throw new IOException("size of shared memory segment was " + intSize + ", but that is not enough to hold even one slot." ); } return(slots * BytesPerSlot); }
/// <summary>Verifies the block's checksum.</summary> /// <remarks>Verifies the block's checksum. This is an I/O intensive operation.</remarks> /// <exception cref="System.IO.IOException"/> /// <exception cref="Org.Apache.Hadoop.FS.ChecksumException"/> private static void VerifyChecksum(long length, FileInputStream metaIn, FileChannel blockChannel, string blockFileName) { // Verify the checksum from the block's meta file // Get the DataChecksum from the meta file header BlockMetadataHeader header = BlockMetadataHeader.ReadHeader(new DataInputStream(new BufferedInputStream(metaIn, BlockMetadataHeader.GetHeaderSize()))); FileChannel metaChannel = null; try { metaChannel = metaIn.GetChannel(); if (metaChannel == null) { throw new IOException("Block InputStream meta file has no FileChannel."); } DataChecksum checksum = header.GetChecksum(); int bytesPerChecksum = checksum.GetBytesPerChecksum(); int checksumSize = checksum.GetChecksumSize(); int numChunks = (8 * 1024 * 1024) / bytesPerChecksum; ByteBuffer blockBuf = ByteBuffer.Allocate(numChunks * bytesPerChecksum); ByteBuffer checksumBuf = ByteBuffer.Allocate(numChunks * checksumSize); // Verify the checksum int bytesVerified = 0; while (bytesVerified < length) { Preconditions.CheckState(bytesVerified % bytesPerChecksum == 0, "Unexpected partial chunk before EOF" ); System.Diagnostics.Debug.Assert(bytesVerified % bytesPerChecksum == 0); int bytesRead = FillBuffer(blockChannel, blockBuf); if (bytesRead == -1) { throw new IOException("checksum verification failed: premature EOF"); } blockBuf.Flip(); // Number of read chunks, including partial chunk at end int chunks = (bytesRead + bytesPerChecksum - 1) / bytesPerChecksum; checksumBuf.Limit(chunks * checksumSize); FillBuffer(metaChannel, checksumBuf); checksumBuf.Flip(); checksum.VerifyChunkedSums(blockBuf, checksumBuf, blockFileName, bytesVerified); // Success bytesVerified += bytesRead; blockBuf.Clear(); checksumBuf.Clear(); } } finally { IOUtils.CloseQuietly(metaChannel); } }
/// <exception cref="System.Exception"/> public virtual void TestReadAndWrite() { FilePath path = new FilePath(TestBase, "testReadAndWrite"); path.Mkdirs(); SharedFileDescriptorFactory factory = SharedFileDescriptorFactory.Create("woot_", new string[] { path.GetAbsolutePath() }); FileInputStream inStream = factory.CreateDescriptor("testReadAndWrite", 4096); FileOutputStream outStream = new FileOutputStream(inStream.GetFD()); outStream.Write(101); inStream.GetChannel().Position(0); Assert.Equal(101, inStream.Read()); inStream.Close(); outStream.Close(); FileUtil.FullyDelete(path); }
/// <exception cref="System.IO.IOException"/> public ShortCircuitReplica(ExtendedBlockId key, FileInputStream dataStream, FileInputStream metaStream, ShortCircuitCache cache, long creationTimeMs, ShortCircuitShm.Slot slot) { this.key = key; this.dataStream = dataStream; this.metaStream = metaStream; this.metaHeader = BlockMetadataHeader.PreadHeader(metaStream.GetChannel()); if (metaHeader.GetVersion() != 1) { throw new IOException("invalid metadata header version " + metaHeader.GetVersion( ) + ". Can only handle version 1."); } this.cache = cache; this.creationTimeMs = creationTimeMs; this.slot = slot; }
/// <exception cref="System.IO.IOException"/> private void CopyPartOfFile(FilePath src, FilePath dest) { FileInputStream @in = null; FileOutputStream @out = null; int MaxBytes = 700; try { @in = new FileInputStream(src); @out = new FileOutputStream(dest); @in.GetChannel().TransferTo(0, MaxBytes, @out.GetChannel()); } finally { IOUtils.Cleanup(null, @in); IOUtils.Cleanup(null, @out); } }
/// <exception cref="System.IO.IOException"/> private void LoadINodeDirSection(FileInputStream fin, IList <FsImageProto.FileSummary.Section > sections, FsImageProto.FileSummary summary, Configuration conf) { Log.Info("Loading INode directory section."); long startTime = Time.MonotonicNow(); foreach (FsImageProto.FileSummary.Section section in sections) { if (FSImageFormatProtobuf.SectionName.FromString(section.GetName()) == FSImageFormatProtobuf.SectionName .InodeDir) { fin.GetChannel().Position(section.GetOffset()); InputStream @is = FSImageUtil.WrapInputStreamForCompression(conf, summary.GetCodec (), new BufferedInputStream(new LimitInputStream(fin, section.GetLength()))); BuildNamespace(@is); } } long timeTaken = Time.MonotonicNow() - startTime; Log.Info("Finished loading INode directory section in {}ms", timeTaken); }
/// <exception cref="System.IO.IOException"/> private void Output(Configuration conf, FsImageProto.FileSummary summary, FileInputStream fin, AList <FsImageProto.FileSummary.Section> sections) { InputStream @is; long startTime = Time.MonotonicNow(); foreach (FsImageProto.FileSummary.Section section in sections) { if (FSImageFormatProtobuf.SectionName.FromString(section.GetName()) == FSImageFormatProtobuf.SectionName .Inode) { fin.GetChannel().Position(section.GetOffset()); @is = FSImageUtil.WrapInputStreamForCompression(conf, summary.GetCodec(), new BufferedInputStream (new LimitInputStream(fin, section.GetLength()))); OutputINodes(@is); } } long timeTaken = Time.MonotonicNow() - startTime; Log.Debug("Time to output inodes: {}ms", timeTaken); }
/// <exception cref="System.IO.IOException"/> public virtual void Visit(RandomAccessFile file) { Configuration conf = new Configuration(); if (!FSImageUtil.CheckFileFormat(file)) { throw new IOException("Unrecognized FSImage"); } FsImageProto.FileSummary summary = FSImageUtil.LoadSummary(file); using (FileInputStream fin = new FileInputStream(file.GetFD())) { InputStream @is; AList <FsImageProto.FileSummary.Section> sections = Lists.NewArrayList(summary.GetSectionsList ()); sections.Sort(new _IComparer_427()); foreach (FsImageProto.FileSummary.Section section in sections) { fin.GetChannel().Position(section.GetOffset()); @is = FSImageUtil.WrapInputStreamForCompression(conf, summary.GetCodec(), new BufferedInputStream (new LimitInputStream(fin, section.GetLength()))); switch (FSImageFormatProtobuf.SectionName.FromString(section.GetName())) { case FSImageFormatProtobuf.SectionName.StringTable: { stringTable = FSImageLoader.LoadStringTable(@is); break; } default: { break; } } } LoadDirectories(fin, sections, summary, conf); LoadINodeDirSection(fin, sections, summary, conf); metadataMap.Sync(); Output(conf, summary, fin, sections); } }
/// <summary> /// Unbuffered file copy from src to dst without tainting OS buffer cache /// In POSIX platform: /// It uses FileChannel#transferTo() which internally attempts /// unbuffered IO on OS with native sendfile64() support and falls back to /// buffered IO otherwise. /// </summary> /// <remarks> /// Unbuffered file copy from src to dst without tainting OS buffer cache /// In POSIX platform: /// It uses FileChannel#transferTo() which internally attempts /// unbuffered IO on OS with native sendfile64() support and falls back to /// buffered IO otherwise. /// It minimizes the number of FileChannel#transferTo call by passing the the /// src file size directly instead of a smaller size as the 3rd parameter. /// This saves the number of sendfile64() system call when native sendfile64() /// is supported. In the two fall back cases where sendfile is not supported, /// FileChannle#transferTo already has its own batching of size 8 MB and 8 KB, /// respectively. /// In Windows Platform: /// It uses its own native wrapper of CopyFileEx with COPY_FILE_NO_BUFFERING /// flag, which is supported on Windows Server 2008 and above. /// Ideally, we should use FileChannel#transferTo() across both POSIX and Windows /// platform. Unfortunately, the wrapper(Java_sun_nio_ch_FileChannelImpl_transferTo0) /// used by FileChannel#transferTo for unbuffered IO is not implemented on Windows. /// Based on OpenJDK 6/7/8 source code, Java_sun_nio_ch_FileChannelImpl_transferTo0 /// on Windows simply returns IOS_UNSUPPORTED. /// Note: This simple native wrapper does minimal parameter checking before copy and /// consistency check (e.g., size) after copy. /// It is recommended to use wrapper function like /// the Storage#nativeCopyFileUnbuffered() function in hadoop-hdfs with pre/post copy /// checks. /// </remarks> /// <param name="src">The source path</param> /// <param name="dst">The destination path</param> /// <exception cref="System.IO.IOException"/> public static void CopyFileUnbuffered(FilePath src, FilePath dst) { if (nativeLoaded && Shell.Windows) { CopyFileUnbuffered0(src.GetAbsolutePath(), dst.GetAbsolutePath()); } else { FileInputStream fis = null; FileOutputStream fos = null; FileChannel input = null; FileChannel output = null; try { fis = new FileInputStream(src); fos = new FileOutputStream(dst); input = fis.GetChannel(); output = fos.GetChannel(); long remaining = input.Size(); long position = 0; long transferred = 0; while (remaining > 0) { transferred = input.TransferTo(position, remaining, output); remaining -= transferred; position += transferred; } } finally { IOUtils.Cleanup(Log, output); IOUtils.Cleanup(Log, fos); IOUtils.Cleanup(Log, input); IOUtils.Cleanup(Log, fis); } } }
/// <exception cref="System.IO.IOException"/> internal void Visit(RandomAccessFile file) { if (!FSImageUtil.CheckFileFormat(file)) { throw new IOException("Unrecognized FSImage"); } FsImageProto.FileSummary summary = FSImageUtil.LoadSummary(file); using (FileInputStream @in = new FileInputStream(file.GetFD())) { foreach (FsImageProto.FileSummary.Section s in summary.GetSectionsList()) { if (FSImageFormatProtobuf.SectionName.FromString(s.GetName()) != FSImageFormatProtobuf.SectionName .Inode) { continue; } @in.GetChannel().Position(s.GetOffset()); InputStream @is = FSImageUtil.WrapInputStreamForCompression(conf, summary.GetCodec (), new BufferedInputStream(new LimitInputStream(@in, s.GetLength()))); Run(@is); Output(); } } }
/// <exception cref="System.Exception"/> private static long[] GetBlockSizes(HdfsBlockLocation[] locs) { long[] sizes = new long[locs.Length]; for (int i = 0; i < locs.Length; i++) { HdfsBlockLocation loc = locs[i]; string bpid = loc.GetLocatedBlock().GetBlock().GetBlockPoolId(); Block block = loc.GetLocatedBlock().GetBlock().GetLocalBlock(); ExtendedBlock extBlock = new ExtendedBlock(bpid, block); FileInputStream blockInputStream = null; FileChannel blockChannel = null; try { blockInputStream = (FileInputStream)fsd.GetBlockInputStream(extBlock, 0); blockChannel = blockInputStream.GetChannel(); sizes[i] = blockChannel.Size(); } finally { IOUtils.Cleanup(Log, blockChannel, blockInputStream); } } return(sizes); }
internal virtual MappedByteBuffer LoadMmapInternal() { try { FileChannel channel = dataStream.GetChannel(); MappedByteBuffer mmap = channel.Map(FileChannel.MapMode.ReadOnly, 0, Math.Min(int.MaxValue , channel.Size())); if (Log.IsTraceEnabled()) { Log.Trace(this + ": created mmap of size " + channel.Size()); } return(mmap); } catch (IOException e) { Log.Warn(this + ": mmap error", e); return(null); } catch (RuntimeException e) { Log.Warn(this + ": mmap error", e); return(null); } }
/// <exception cref="System.IO.IOException"/> internal override int Run(IList <string> args) { if (args.Count == 0) { System.Console.Out.WriteLine(this.usageText); System.Console.Out.WriteLine(this.helpText + "\n"); return(1); } string blockFile = StringUtils.PopOptionWithArgument("-block", args); string metaFile = StringUtils.PopOptionWithArgument("-meta", args); if (metaFile == null) { System.Console.Error.WriteLine("You must specify a meta file with -meta"); return(1); } FileInputStream metaStream = null; FileInputStream dataStream = null; FileChannel metaChannel = null; FileChannel dataChannel = null; DataInputStream checksumStream = null; try { BlockMetadataHeader header; try { metaStream = new FileInputStream(metaFile); checksumStream = new DataInputStream(metaStream); header = BlockMetadataHeader.ReadHeader(checksumStream); metaChannel = metaStream.GetChannel(); metaChannel.Position(DebugAdmin.HeaderLen); } catch (RuntimeException e) { System.Console.Error.WriteLine("Failed to read HDFS metadata file header for " + metaFile + ": " + StringUtils.StringifyException(e)); return(1); } catch (IOException e) { System.Console.Error.WriteLine("Failed to read HDFS metadata file header for " + metaFile + ": " + StringUtils.StringifyException(e)); return(1); } DataChecksum checksum = header.GetChecksum(); System.Console.Out.WriteLine("Checksum type: " + checksum.ToString()); if (blockFile == null) { return(0); } ByteBuffer metaBuf; ByteBuffer dataBuf; try { dataStream = new FileInputStream(blockFile); dataChannel = dataStream.GetChannel(); int ChecksumsPerBuf = 1024 * 32; metaBuf = ByteBuffer.Allocate(checksum.GetChecksumSize() * ChecksumsPerBuf); dataBuf = ByteBuffer.Allocate(checksum.GetBytesPerChecksum() * ChecksumsPerBuf); } catch (IOException e) { System.Console.Error.WriteLine("Failed to open HDFS block file for " + blockFile + ": " + StringUtils.StringifyException(e)); return(1); } long offset = 0; while (true) { dataBuf.Clear(); int dataRead = -1; try { dataRead = dataChannel.Read(dataBuf); if (dataRead < 0) { break; } } catch (IOException e) { System.Console.Error.WriteLine("Got I/O error reading block file " + blockFile + "from disk at offset " + dataChannel.Position() + ": " + StringUtils.StringifyException (e)); return(1); } try { int csumToRead = (((checksum.GetBytesPerChecksum() - 1) + dataRead) / checksum.GetBytesPerChecksum ()) * checksum.GetChecksumSize(); metaBuf.Clear(); metaBuf.Limit(csumToRead); metaChannel.Read(metaBuf); dataBuf.Flip(); metaBuf.Flip(); } catch (IOException e) { System.Console.Error.WriteLine("Got I/O error reading metadata file " + metaFile + "from disk at offset " + metaChannel.Position() + ": " + StringUtils.StringifyException (e)); return(1); } try { checksum.VerifyChunkedSums(dataBuf, metaBuf, blockFile, offset); } catch (IOException e) { System.Console.Out.WriteLine("verifyChunkedSums error: " + StringUtils.StringifyException (e)); return(1); } offset += dataRead; } System.Console.Out.WriteLine("Checksum verification succeeded on block file " + blockFile ); return(0); } finally { IOUtils.Cleanup(null, metaStream, dataStream, checksumStream); } }
/// <summary>Load fsimage into the memory.</summary> /// <param name="inputFile">the filepath of the fsimage to load.</param> /// <returns>FSImageLoader</returns> /// <exception cref="System.IO.IOException">if failed to load fsimage.</exception> internal static Org.Apache.Hadoop.Hdfs.Tools.OfflineImageViewer.FSImageLoader Load (string inputFile) { Configuration conf = new Configuration(); RandomAccessFile file = new RandomAccessFile(inputFile, "r"); if (!FSImageUtil.CheckFileFormat(file)) { throw new IOException("Unrecognized FSImage"); } FsImageProto.FileSummary summary = FSImageUtil.LoadSummary(file); using (FileInputStream fin = new FileInputStream(file.GetFD())) { // Map to record INodeReference to the referred id ImmutableList <long> refIdList = null; string[] stringTable = null; byte[][] inodes = null; IDictionary <long, long[]> dirmap = null; AList <FsImageProto.FileSummary.Section> sections = Lists.NewArrayList(summary.GetSectionsList ()); sections.Sort(new _IComparer_126()); foreach (FsImageProto.FileSummary.Section s in sections) { fin.GetChannel().Position(s.GetOffset()); InputStream @is = FSImageUtil.WrapInputStreamForCompression(conf, summary.GetCodec (), new BufferedInputStream(new LimitInputStream(fin, s.GetLength()))); Log.Debug("Loading section " + s.GetName() + " length: " + s.GetLength()); switch (FSImageFormatProtobuf.SectionName.FromString(s.GetName())) { case FSImageFormatProtobuf.SectionName.StringTable: { stringTable = LoadStringTable(@is); break; } case FSImageFormatProtobuf.SectionName.Inode: { inodes = LoadINodeSection(@is); break; } case FSImageFormatProtobuf.SectionName.InodeReference: { refIdList = LoadINodeReferenceSection(@is); break; } case FSImageFormatProtobuf.SectionName.InodeDir: { dirmap = LoadINodeDirectorySection(@is, refIdList); break; } default: { break; } } } return(new Org.Apache.Hadoop.Hdfs.Tools.OfflineImageViewer.FSImageLoader(stringTable , inodes, dirmap)); } }
/// <exception cref="System.IO.IOException"/> public void Visit(RandomAccessFile file) { if (!FSImageUtil.CheckFileFormat(file)) { throw new IOException("Unrecognized FSImage"); } FsImageProto.FileSummary summary = FSImageUtil.LoadSummary(file); using (FileInputStream fin = new FileInputStream(file.GetFD())) { @out.Write("<?xml version=\"1.0\"?>\n<fsimage>"); AList <FsImageProto.FileSummary.Section> sections = Lists.NewArrayList(summary.GetSectionsList ()); sections.Sort(new _IComparer_83()); foreach (FsImageProto.FileSummary.Section s in sections) { fin.GetChannel().Position(s.GetOffset()); InputStream @is = FSImageUtil.WrapInputStreamForCompression(conf, summary.GetCodec (), new BufferedInputStream(new LimitInputStream(fin, s.GetLength()))); switch (FSImageFormatProtobuf.SectionName.FromString(s.GetName())) { case FSImageFormatProtobuf.SectionName.NsInfo: { DumpNameSection(@is); break; } case FSImageFormatProtobuf.SectionName.StringTable: { LoadStringTable(@is); break; } case FSImageFormatProtobuf.SectionName.Inode: { DumpINodeSection(@is); break; } case FSImageFormatProtobuf.SectionName.InodeReference: { DumpINodeReferenceSection(@is); break; } case FSImageFormatProtobuf.SectionName.InodeDir: { DumpINodeDirectorySection(@is); break; } case FSImageFormatProtobuf.SectionName.FilesUnderconstruction: { DumpFileUnderConstructionSection(@is); break; } case FSImageFormatProtobuf.SectionName.Snapshot: { DumpSnapshotSection(@is); break; } case FSImageFormatProtobuf.SectionName.SnapshotDiff: { DumpSnapshotDiffSection(@is); break; } case FSImageFormatProtobuf.SectionName.SecretManager: { DumpSecretManagerSection(@is); break; } case FSImageFormatProtobuf.SectionName.CacheManager: { DumpCacheManagerSection(@is); break; } default: { break; } } } @out.Write("</fsimage>\n"); } }
/// <exception cref="System.IO.IOException"/> private void LoadInternal(RandomAccessFile raFile, FileInputStream fin) { if (!FSImageUtil.CheckFileFormat(raFile)) { throw new IOException("Unrecognized file format"); } FsImageProto.FileSummary summary = FSImageUtil.LoadSummary(raFile); if (requireSameLayoutVersion && summary.GetLayoutVersion() != HdfsConstants.NamenodeLayoutVersion) { throw new IOException("Image version " + summary.GetLayoutVersion() + " is not equal to the software version " + HdfsConstants.NamenodeLayoutVersion); } FileChannel channel = fin.GetChannel(); FSImageFormatPBINode.Loader inodeLoader = new FSImageFormatPBINode.Loader(fsn, this ); FSImageFormatPBSnapshot.Loader snapshotLoader = new FSImageFormatPBSnapshot.Loader (fsn, this); AList <FsImageProto.FileSummary.Section> sections = Lists.NewArrayList(summary.GetSectionsList ()); sections.Sort(new _IComparer_210()); StartupProgress prog = NameNode.GetStartupProgress(); Step currentStep = null; foreach (FsImageProto.FileSummary.Section s in sections) { channel.Position(s.GetOffset()); InputStream @in = new BufferedInputStream(new LimitInputStream(fin, s.GetLength() )); @in = FSImageUtil.WrapInputStreamForCompression(conf, summary.GetCodec(), @in); string n = s.GetName(); switch (FSImageFormatProtobuf.SectionName.FromString(n)) { case FSImageFormatProtobuf.SectionName.NsInfo: { LoadNameSystemSection(@in); break; } case FSImageFormatProtobuf.SectionName.StringTable: { LoadStringTableSection(@in); break; } case FSImageFormatProtobuf.SectionName.Inode: { currentStep = new Step(StepType.Inodes); prog.BeginStep(Phase.LoadingFsimage, currentStep); inodeLoader.LoadINodeSection(@in); break; } case FSImageFormatProtobuf.SectionName.InodeReference: { snapshotLoader.LoadINodeReferenceSection(@in); break; } case FSImageFormatProtobuf.SectionName.InodeDir: { inodeLoader.LoadINodeDirectorySection(@in); break; } case FSImageFormatProtobuf.SectionName.FilesUnderconstruction: { inodeLoader.LoadFilesUnderConstructionSection(@in); break; } case FSImageFormatProtobuf.SectionName.Snapshot: { snapshotLoader.LoadSnapshotSection(@in); break; } case FSImageFormatProtobuf.SectionName.SnapshotDiff: { snapshotLoader.LoadSnapshotDiffSection(@in); break; } case FSImageFormatProtobuf.SectionName.SecretManager: { prog.EndStep(Phase.LoadingFsimage, currentStep); Step step = new Step(StepType.DelegationTokens); prog.BeginStep(Phase.LoadingFsimage, step); LoadSecretManagerSection(@in); prog.EndStep(Phase.LoadingFsimage, step); break; } case FSImageFormatProtobuf.SectionName.CacheManager: { Step step = new Step(StepType.CachePools); prog.BeginStep(Phase.LoadingFsimage, step); LoadCacheManagerSection(@in); prog.EndStep(Phase.LoadingFsimage, step); break; } default: { Log.Warn("Unrecognized section " + n); break; } } } }