/// <summary> /// Create a FileInputStream that shares delete permission on the /// file opened at a given offset, i.e. /// </summary> /// <remarks> /// Create a FileInputStream that shares delete permission on the /// file opened at a given offset, i.e. other process can delete /// the file the FileInputStream is reading. Only Windows implementation /// uses the native interface. /// </remarks> /// <exception cref="System.IO.IOException"/> public static FileInputStream GetShareDeleteFileInputStream(FilePath f, long seekOffset ) { if (!Shell.Windows) { RandomAccessFile rf = new RandomAccessFile(f, "r"); if (seekOffset > 0) { rf.Seek(seekOffset); } return(new FileInputStream(rf.GetFD())); } else { // Use Windows native interface to create a FileInputStream that // shares delete permission on the file opened, and set it to the // given offset. // FileDescriptor fd = NativeIO.Windows.CreateFile(f.GetAbsolutePath(), NativeIO.Windows .GenericRead, NativeIO.Windows.FileShareRead | NativeIO.Windows.FileShareWrite | NativeIO.Windows.FileShareDelete, NativeIO.Windows.OpenExisting); if (seekOffset > 0) { NativeIO.Windows.SetFilePointer(fd, seekOffset, NativeIO.Windows.FileBegin); } return(new FileInputStream(fd)); } }
/// <exception cref="System.IO.IOException"/> public FadvisedChunkedFile(RandomAccessFile file, long position, long count, int chunkSize, bool manageOsCache, int readaheadLength, ReadaheadPool readaheadPool, string identifier) : base(file, position, count, chunkSize) { this.manageOsCache = manageOsCache; this.readaheadLength = readaheadLength; this.readaheadPool = readaheadPool; this.fd = file.GetFD(); this.identifier = identifier; }
/// <exception cref="Db4objects.Db4o.Ext.Db4oIOException"></exception> public virtual void Sync() { try { _file.GetFD().Sync(); } catch (IOException e) { throw new Db4oIOException(e); } }
/// <exception cref="Db4objects.Db4o.Ext.Db4oIOException"></exception> public override void Sync() { try { _delegate.GetFD().Sync(); } catch (IOException e) { throw new Db4oIOException(e); } }
/// <exception cref="System.IO.IOException"/> private void CreateFile(FilePath newFile, int size) { // write random data so that filesystems with compression enabled (e.g., ZFS) // can't compress the file Random random = new Random(); byte[] data = new byte[size]; random.NextBytes(data); newFile.CreateNewFile(); RandomAccessFile file = new RandomAccessFile(newFile, "rws"); file.Write(data); file.GetFD().Sync(); file.Close(); }
/// <exception cref="System.IO.IOException"/> public FadvisedFileRegion(RandomAccessFile file, long position, long count, bool manageOsCache, int readaheadLength, ReadaheadPool readaheadPool, string identifier , int shuffleBufferSize, bool shuffleTransferToAllowed) : base(file.GetChannel(), position, count) { this.manageOsCache = manageOsCache; this.readaheadLength = readaheadLength; this.readaheadPool = readaheadPool; this.fd = file.GetFD(); this.identifier = identifier; this.fileChannel = file.GetChannel(); this.count = count; this.position = position; this.shuffleBufferSize = shuffleBufferSize; this.shuffleTransferToAllowed = shuffleTransferToAllowed; }
protected internal static RandomAccessFile ForceSecureOpenForRandomRead(FilePath f, string mode, string expectedOwner, string expectedGroup) { RandomAccessFile raf = new RandomAccessFile(f, mode); bool success = false; try { NativeIO.POSIX.Stat stat = NativeIO.POSIX.GetFstat(raf.GetFD()); CheckStat(f, stat.GetOwner(), stat.GetGroup(), expectedOwner, expectedGroup); success = true; return(raf); } finally { if (!success) { raf.Close(); } } }
/// <exception cref="System.IO.IOException"/> public virtual void Visit(RandomAccessFile file) { Configuration conf = new Configuration(); if (!FSImageUtil.CheckFileFormat(file)) { throw new IOException("Unrecognized FSImage"); } FsImageProto.FileSummary summary = FSImageUtil.LoadSummary(file); using (FileInputStream fin = new FileInputStream(file.GetFD())) { InputStream @is; AList <FsImageProto.FileSummary.Section> sections = Lists.NewArrayList(summary.GetSectionsList ()); sections.Sort(new _IComparer_427()); foreach (FsImageProto.FileSummary.Section section in sections) { fin.GetChannel().Position(section.GetOffset()); @is = FSImageUtil.WrapInputStreamForCompression(conf, summary.GetCodec(), new BufferedInputStream (new LimitInputStream(fin, section.GetLength()))); switch (FSImageFormatProtobuf.SectionName.FromString(section.GetName())) { case FSImageFormatProtobuf.SectionName.StringTable: { stringTable = FSImageLoader.LoadStringTable(@is); break; } default: { break; } } } LoadDirectories(fin, sections, summary, conf); LoadINodeDirSection(fin, sections, summary, conf); metadataMap.Sync(); Output(conf, summary, fin, sections); } }
/// <exception cref="System.IO.IOException"/> public static Properties ReadPropertiesFile(FilePath from) { RandomAccessFile file = new RandomAccessFile(from, "rws"); FileInputStream @in = null; Properties props = new Properties(); try { @in = new FileInputStream(file.GetFD()); file.Seek(0); props.Load(@in); } finally { if (@in != null) { @in.Close(); } file.Close(); } return(props); }
/// <exception cref="System.IO.IOException"/> internal void Visit(RandomAccessFile file) { if (!FSImageUtil.CheckFileFormat(file)) { throw new IOException("Unrecognized FSImage"); } FsImageProto.FileSummary summary = FSImageUtil.LoadSummary(file); using (FileInputStream @in = new FileInputStream(file.GetFD())) { foreach (FsImageProto.FileSummary.Section s in summary.GetSectionsList()) { if (FSImageFormatProtobuf.SectionName.FromString(s.GetName()) != FSImageFormatProtobuf.SectionName .Inode) { continue; } @in.GetChannel().Position(s.GetOffset()); InputStream @is = FSImageUtil.WrapInputStreamForCompression(conf, summary.GetCodec (), new BufferedInputStream(new LimitInputStream(@in, s.GetLength()))); Run(@is); Output(); } } }
/// <exception cref="System.IO.IOException"/> public virtual void RunBlockReaderLocalTest(TestBlockReaderLocal.BlockReaderLocalTest test, bool checksum, long readahead) { Assume.AssumeThat(DomainSocket.GetLoadingFailureReason(), CoreMatchers.EqualTo(null )); MiniDFSCluster cluster = null; HdfsConfiguration conf = new HdfsConfiguration(); conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, !checksum ); conf.SetLong(DFSConfigKeys.DfsBytesPerChecksumKey, TestBlockReaderLocal.BlockReaderLocalTest .BytesPerChecksum); conf.Set(DFSConfigKeys.DfsChecksumTypeKey, "CRC32C"); conf.SetLong(DFSConfigKeys.DfsClientCacheReadahead, readahead); test.SetConfiguration(conf); FileInputStream dataIn = null; FileInputStream metaIn = null; Path TestPath = new Path("/a"); long RandomSeed = 4567L; BlockReaderLocal blockReaderLocal = null; FSDataInputStream fsIn = null; byte[] original = new byte[TestBlockReaderLocal.BlockReaderLocalTest.TestLength]; FileSystem fs = null; ShortCircuitShm shm = null; RandomAccessFile raf = null; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.WaitActive(); fs = cluster.GetFileSystem(); DFSTestUtil.CreateFile(fs, TestPath, TestBlockReaderLocal.BlockReaderLocalTest.TestLength , (short)1, RandomSeed); try { DFSTestUtil.WaitReplication(fs, TestPath, (short)1); } catch (Exception e) { NUnit.Framework.Assert.Fail("unexpected InterruptedException during " + "waitReplication: " + e); } catch (TimeoutException e) { NUnit.Framework.Assert.Fail("unexpected TimeoutException during " + "waitReplication: " + e); } fsIn = fs.Open(TestPath); IOUtils.ReadFully(fsIn, original, 0, TestBlockReaderLocal.BlockReaderLocalTest.TestLength ); fsIn.Close(); fsIn = null; ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, TestPath); FilePath dataFile = cluster.GetBlockFile(0, block); FilePath metaFile = cluster.GetBlockMetadataFile(0, block); ShortCircuitCache shortCircuitCache = ClientContext.GetFromConf(conf).GetShortCircuitCache (); cluster.Shutdown(); cluster = null; test.Setup(dataFile, checksum); FileInputStream[] streams = new FileInputStream[] { new FileInputStream(dataFile) , new FileInputStream(metaFile) }; dataIn = streams[0]; metaIn = streams[1]; ExtendedBlockId key = new ExtendedBlockId(block.GetBlockId(), block.GetBlockPoolId ()); raf = new RandomAccessFile(new FilePath(sockDir.GetDir().GetAbsolutePath(), UUID. RandomUUID().ToString()), "rw"); raf.SetLength(8192); FileInputStream shmStream = new FileInputStream(raf.GetFD()); shm = new ShortCircuitShm(ShortCircuitShm.ShmId.CreateRandom(), shmStream); ShortCircuitReplica replica = new ShortCircuitReplica(key, dataIn, metaIn, shortCircuitCache , Time.Now(), shm.AllocAndRegisterSlot(ExtendedBlockId.FromExtendedBlock(block)) ); blockReaderLocal = new BlockReaderLocal.Builder(new DFSClient.Conf(conf)).SetFilename (TestPath.GetName()).SetBlock(block).SetShortCircuitReplica(replica).SetCachingStrategy (new CachingStrategy(false, readahead)).SetVerifyChecksum(checksum).Build(); dataIn = null; metaIn = null; test.DoTest(blockReaderLocal, original); // BlockReaderLocal should not alter the file position. NUnit.Framework.Assert.AreEqual(0, streams[0].GetChannel().Position()); NUnit.Framework.Assert.AreEqual(0, streams[1].GetChannel().Position()); } finally { if (fsIn != null) { fsIn.Close(); } if (fs != null) { fs.Close(); } if (cluster != null) { cluster.Shutdown(); } if (dataIn != null) { dataIn.Close(); } if (metaIn != null) { metaIn.Close(); } if (blockReaderLocal != null) { blockReaderLocal.Close(); } if (shm != null) { shm.Free(); } if (raf != null) { raf.Close(); } } }
/// <exception cref="System.IO.IOException"/> public virtual ReplicaOutputStreams CreateStreams(bool isCreate, DataChecksum requestedChecksum ) { // ReplicaInPipelineInterface FilePath blockFile = GetBlockFile(); FilePath metaFile = GetMetaFile(); if (DataNode.Log.IsDebugEnabled()) { DataNode.Log.Debug("writeTo blockfile is " + blockFile + " of size " + blockFile. Length()); DataNode.Log.Debug("writeTo metafile is " + metaFile + " of size " + metaFile.Length ()); } long blockDiskSize = 0L; long crcDiskSize = 0L; // the checksum that should actually be used -- this // may differ from requestedChecksum for appends. DataChecksum checksum; RandomAccessFile metaRAF = new RandomAccessFile(metaFile, "rw"); if (!isCreate) { // For append or recovery, we must enforce the existing checksum. // Also, verify that the file has correct lengths, etc. bool checkedMeta = false; try { BlockMetadataHeader header = BlockMetadataHeader.ReadHeader(metaRAF); checksum = header.GetChecksum(); if (checksum.GetBytesPerChecksum() != requestedChecksum.GetBytesPerChecksum()) { throw new IOException("Client requested checksum " + requestedChecksum + " when appending to an existing block " + "with different chunk size: " + checksum); } int bytesPerChunk = checksum.GetBytesPerChecksum(); int checksumSize = checksum.GetChecksumSize(); blockDiskSize = bytesOnDisk; crcDiskSize = BlockMetadataHeader.GetHeaderSize() + (blockDiskSize + bytesPerChunk - 1) / bytesPerChunk * checksumSize; if (blockDiskSize > 0 && (blockDiskSize > blockFile.Length() || crcDiskSize > metaFile .Length())) { throw new IOException("Corrupted block: " + this); } checkedMeta = true; } finally { if (!checkedMeta) { // clean up in case of exceptions. IOUtils.CloseStream(metaRAF); } } } else { // for create, we can use the requested checksum checksum = requestedChecksum; } FileOutputStream blockOut = null; FileOutputStream crcOut = null; try { blockOut = new FileOutputStream(new RandomAccessFile(blockFile, "rw").GetFD()); crcOut = new FileOutputStream(metaRAF.GetFD()); if (!isCreate) { blockOut.GetChannel().Position(blockDiskSize); crcOut.GetChannel().Position(crcDiskSize); } return(new ReplicaOutputStreams(blockOut, crcOut, checksum, GetVolume().IsTransientStorage ())); } catch (IOException e) { IOUtils.CloseStream(blockOut); IOUtils.CloseStream(metaRAF); throw; } }
/// <exception cref="System.IO.IOException"/> public void Visit(RandomAccessFile file) { if (!FSImageUtil.CheckFileFormat(file)) { throw new IOException("Unrecognized FSImage"); } FsImageProto.FileSummary summary = FSImageUtil.LoadSummary(file); using (FileInputStream fin = new FileInputStream(file.GetFD())) { @out.Write("<?xml version=\"1.0\"?>\n<fsimage>"); AList <FsImageProto.FileSummary.Section> sections = Lists.NewArrayList(summary.GetSectionsList ()); sections.Sort(new _IComparer_83()); foreach (FsImageProto.FileSummary.Section s in sections) { fin.GetChannel().Position(s.GetOffset()); InputStream @is = FSImageUtil.WrapInputStreamForCompression(conf, summary.GetCodec (), new BufferedInputStream(new LimitInputStream(fin, s.GetLength()))); switch (FSImageFormatProtobuf.SectionName.FromString(s.GetName())) { case FSImageFormatProtobuf.SectionName.NsInfo: { DumpNameSection(@is); break; } case FSImageFormatProtobuf.SectionName.StringTable: { LoadStringTable(@is); break; } case FSImageFormatProtobuf.SectionName.Inode: { DumpINodeSection(@is); break; } case FSImageFormatProtobuf.SectionName.InodeReference: { DumpINodeReferenceSection(@is); break; } case FSImageFormatProtobuf.SectionName.InodeDir: { DumpINodeDirectorySection(@is); break; } case FSImageFormatProtobuf.SectionName.FilesUnderconstruction: { DumpFileUnderConstructionSection(@is); break; } case FSImageFormatProtobuf.SectionName.Snapshot: { DumpSnapshotSection(@is); break; } case FSImageFormatProtobuf.SectionName.SnapshotDiff: { DumpSnapshotDiffSection(@is); break; } case FSImageFormatProtobuf.SectionName.SecretManager: { DumpSecretManagerSection(@is); break; } case FSImageFormatProtobuf.SectionName.CacheManager: { DumpCacheManagerSection(@is); break; } default: { break; } } } @out.Write("</fsimage>\n"); } }
/// <summary>Load fsimage into the memory.</summary> /// <param name="inputFile">the filepath of the fsimage to load.</param> /// <returns>FSImageLoader</returns> /// <exception cref="System.IO.IOException">if failed to load fsimage.</exception> internal static Org.Apache.Hadoop.Hdfs.Tools.OfflineImageViewer.FSImageLoader Load (string inputFile) { Configuration conf = new Configuration(); RandomAccessFile file = new RandomAccessFile(inputFile, "r"); if (!FSImageUtil.CheckFileFormat(file)) { throw new IOException("Unrecognized FSImage"); } FsImageProto.FileSummary summary = FSImageUtil.LoadSummary(file); using (FileInputStream fin = new FileInputStream(file.GetFD())) { // Map to record INodeReference to the referred id ImmutableList <long> refIdList = null; string[] stringTable = null; byte[][] inodes = null; IDictionary <long, long[]> dirmap = null; AList <FsImageProto.FileSummary.Section> sections = Lists.NewArrayList(summary.GetSectionsList ()); sections.Sort(new _IComparer_126()); foreach (FsImageProto.FileSummary.Section s in sections) { fin.GetChannel().Position(s.GetOffset()); InputStream @is = FSImageUtil.WrapInputStreamForCompression(conf, summary.GetCodec (), new BufferedInputStream(new LimitInputStream(fin, s.GetLength()))); Log.Debug("Loading section " + s.GetName() + " length: " + s.GetLength()); switch (FSImageFormatProtobuf.SectionName.FromString(s.GetName())) { case FSImageFormatProtobuf.SectionName.StringTable: { stringTable = LoadStringTable(@is); break; } case FSImageFormatProtobuf.SectionName.Inode: { inodes = LoadINodeSection(@is); break; } case FSImageFormatProtobuf.SectionName.InodeReference: { refIdList = LoadINodeReferenceSection(@is); break; } case FSImageFormatProtobuf.SectionName.InodeDir: { dirmap = LoadINodeDirectorySection(@is, refIdList); break; } default: { break; } } } return(new Org.Apache.Hadoop.Hdfs.Tools.OfflineImageViewer.FSImageLoader(stringTable , inodes, dirmap)); } }