public void Dump(TextWriter writer, string indent) { writer.WriteLine(indent + "SECURITY DESCRIPTORS"); using (Stream s = _file.OpenStream(AttributeType.Data, "$SDS", FileAccess.Read)) { byte[] buffer = StreamUtilities.ReadExact(s, (int)s.Length); foreach (KeyValuePair <IdIndexKey, IdIndexData> entry in _idIndex.Entries) { int pos = (int)entry.Value.SdsOffset; SecurityDescriptorRecord rec = new SecurityDescriptorRecord(); if (!rec.Read(buffer, pos)) { break; } string secDescStr = "--unknown--"; if (rec.SecurityDescriptor[0] != 0) { RawSecurityDescriptor sd = new RawSecurityDescriptor(rec.SecurityDescriptor, 0); secDescStr = sd.GetSddlForm(AccessControlSections.All); } writer.WriteLine(indent + " SECURITY DESCRIPTOR RECORD"); writer.WriteLine(indent + " Hash: " + rec.Hash); writer.WriteLine(indent + " Id: " + rec.Id); writer.WriteLine(indent + " File Offset: " + rec.OffsetInFile); writer.WriteLine(indent + " Size: " + rec.EntrySize); writer.WriteLine(indent + " Value: " + secDescStr); } } }
private void ReadResourceTable() { _resources = new Dictionary <uint, List <ResourceInfo> >(); using (Stream s = OpenResourceStream(_fileHeader.OffsetTableHeader)) { long numRead = 0; while (numRead < s.Length) { byte[] resBuffer = StreamUtilities.ReadExact(s, ResourceInfo.Size); numRead += ResourceInfo.Size; ResourceInfo info = new ResourceInfo(); info.Read(resBuffer, 0); uint hashHash = EndianUtilities.ToUInt32LittleEndian(info.Hash, 0); if (!_resources.ContainsKey(hashHash)) { _resources[hashHash] = new List <ResourceInfo>(1); } _resources[hashHash].Add(info); } } }
public override void LoadBtree(Context context) { Children = new Dictionary <ulong, BTreeExtentHeader>(NumberOfRecords); for (int i = 0; i < NumberOfRecords; i++) { BTreeExtentHeader child; if (Level == 1) { child = new BTreeExtentLeaf(); } else { child = new BTreeExtentNode(); } var data = context.RawStream; data.Position = Extent.GetOffset(context, Pointer[i]); var buffer = StreamUtilities.ReadExact(data, (int)context.SuperBlock.Blocksize); child.ReadFrom(buffer, 0); if (child.Magic != BtreeMagic) { throw new IOException("invalid btree directory magic"); } child.LoadBtree(context); Children.Add(Keys[i], child); } }
public static byte[] Receive(Stream stream) { MemoryStream ms = null; bool lastFragFound = false; while (!lastFragFound) { byte[] header = StreamUtilities.ReadExact(stream, 4); uint headerVal = EndianUtilities.ToUInt32BigEndian(header, 0); lastFragFound = (headerVal & 0x80000000) != 0; byte[] frag = StreamUtilities.ReadExact(stream, (int)(headerVal & 0x7FFFFFFF)); if (ms != null) { ms.Write(frag, 0, frag.Length); } else if (!lastFragFound) { ms = new MemoryStream(); ms.Write(frag, 0, frag.Length); } else { return(frag); } } return(ms.ToArray()); }
/// <summary> /// Patches a boot image (esp. for ISOLINUX) before it is written to the disk. /// </summary> /// <param name="bootImage">The original (master) boot image.</param> /// <param name="pvdLba">The logical block address of the primary volume descriptor.</param> /// <param name="bootImageLba">The logical block address of the boot image itself.</param> /// <returns>A stream containing the patched boot image - does not need to be disposed.</returns> private Stream PatchBootImage(Stream bootImage, uint pvdLba, uint bootImageLba) { // Early-exit if no patching to do... if (!UpdateIsolinuxBootTable) { return(bootImage); } byte[] bootData = StreamUtilities.ReadExact(bootImage, (int)bootImage.Length); Array.Clear(bootData, 8, 56); uint checkSum = 0; for (int i = 64; i < bootData.Length; i += 4) { checkSum += EndianUtilities.ToUInt32LittleEndian(bootData, i); } EndianUtilities.WriteBytesLittleEndian(pvdLba, bootData, 8); EndianUtilities.WriteBytesLittleEndian(bootImageLba, bootData, 12); EndianUtilities.WriteBytesLittleEndian(bootData.Length, bootData, 16); EndianUtilities.WriteBytesLittleEndian(checkSum, bootData, 20); return(new MemoryStream(bootData, false)); }
public static byte[] ReadExtent(UdfContext context, LongAllocationDescriptor extent) { LogicalPartition partition = context.LogicalPartitions[extent.ExtentLocation.Partition]; long pos = extent.ExtentLocation.LogicalBlock * partition.LogicalBlockSize; return(StreamUtilities.ReadExact(partition.Content, pos, (int)extent.ExtentLength)); }
/// <summary> /// Initializes a new instance of the BiosPartitionedDiskBuilder class by /// cloning the partition structure of a source disk. /// </summary> /// <param name="sourceDisk">The disk to clone.</param> public BiosPartitionedDiskBuilder(VirtualDisk sourceDisk) { if (sourceDisk == null) { throw new ArgumentNullException(nameof(sourceDisk)); } _capacity = sourceDisk.Capacity; _biosGeometry = sourceDisk.BiosGeometry; _bootSectors = new SparseMemoryStream(); _bootSectors.SetLength(_capacity); foreach (StreamExtent extent in new BiosPartitionTable(sourceDisk).GetMetadataDiskExtents()) { sourceDisk.Content.Position = extent.Start; byte[] buffer = StreamUtilities.ReadExact(sourceDisk.Content, (int)extent.Length); _bootSectors.Position = extent.Start; _bootSectors.Write(buffer, 0, buffer.Length); } PartitionTable = new BiosPartitionTable(_bootSectors, _biosGeometry); _partitionContents = new Dictionary <int, BuilderExtent>(); }
public FileRecord GetRecord(long index, bool ignoreMagic, bool ignoreBitmap) { if (ignoreBitmap || _bitmap == null || _bitmap.IsPresent(index)) { FileRecord result = _recordCache[index]; if (result != null) { return(result); } if ((index + 1) * RecordSize <= _recordStream.Length) { _recordStream.Position = index * RecordSize; byte[] recordBuffer = StreamUtilities.ReadExact(_recordStream, RecordSize); result = new FileRecord(_bytesPerSector); result.FromBytes(recordBuffer, 0, ignoreMagic); result.LoadedIndex = (uint)index; } else { result = new FileRecord(_bytesPerSector, RecordSize, (uint)index); } _recordCache[index] = result; return(result); } return(null); }
public VfsXfsFileSystem(Stream stream, FileSystemParameters parameters) : base(new XfsFileSystemOptions(parameters)) { stream.Position = 0; byte[] superblockData = StreamUtilities.ReadExact(stream, 264); SuperBlock superblock = new SuperBlock(); superblock.ReadFrom(superblockData, 0); if (superblock.Magic != SuperBlock.XfsMagic) { throw new IOException("Invalid superblock magic - probably not an xfs file system"); } Context = new Context { RawStream = stream, SuperBlock = superblock, Options = (XfsFileSystemOptions)Options }; var allocationGroups = new AllocationGroup[superblock.AgCount]; long offset = 0; for (int i = 0; i < allocationGroups.Length; i++) { var ag = new AllocationGroup(Context, offset); allocationGroups[ag.InodeBtreeInfo.SequenceNumber] = ag; offset = (XFS_AG_DADDR(Context.SuperBlock, i + 1, XFS_AGF_DADDR(Context.SuperBlock)) << BBSHIFT) - superblock.SectorSize; } Context.AllocationGroups = allocationGroups; RootDirectory = new Directory(Context, Context.GetInode(superblock.RootInode)); }
public Bin(RegistryHive hive, Stream stream) { _hive = hive; _fileStream = stream; _streamPos = stream.Position; stream.Position = _streamPos; byte[] buffer = StreamUtilities.ReadExact(stream, 0x20); _header = new BinHeader(); _header.ReadFrom(buffer, 0); _fileStream.Position = _streamPos; _buffer = StreamUtilities.ReadExact(_fileStream, _header.BinSize); // Gather list of all free cells. _freeCells = new List <Range <int, int> >(); int pos = 0x20; while (pos < _buffer.Length) { int size = EndianUtilities.ToInt32LittleEndian(_buffer, pos); if (size > 0) { _freeCells.Add(new Range <int, int>(pos, size)); } pos += Math.Abs(size); } }
/// <summary> /// Initializes a new instance of the RegistryHive class. /// </summary> /// <param name="hive">The stream containing the registry hive.</param> /// <param name="ownership">Whether the new object assumes object of the stream.</param> public RegistryHive(Stream hive, Ownership ownership) { _fileStream = hive; _fileStream.Position = 0; _ownsStream = ownership; byte[] buffer = StreamUtilities.ReadExact(_fileStream, HiveHeader.HeaderSize); _header = new HiveHeader(); _header.ReadFrom(buffer, 0); _bins = new List <BinHeader>(); int pos = 0; while (pos < _header.Length) { _fileStream.Position = BinStart + pos; byte[] headerBuffer = StreamUtilities.ReadExact(_fileStream, BinHeader.HeaderSize); BinHeader header = new BinHeader(); header.ReadFrom(headerBuffer, 0); _bins.Add(header); pos += header.BinSize; } }
public override void ReadClusters(long startVcn, int count, byte[] buffer, int offset) { StreamUtilities.AssertBufferParameters(buffer, offset, count * _bytesPerCluster); int runIdx = 0; int totalRead = 0; while (totalRead < count) { long focusVcn = startVcn + totalRead; runIdx = _cookedRuns.FindDataRun(focusVcn, runIdx); CookedDataRun run = _cookedRuns[runIdx]; int toRead = (int)Math.Min(count - totalRead, run.Length - (focusVcn - run.StartVcn)); if (run.IsSparse) { Array.Clear(buffer, offset + totalRead * _bytesPerCluster, toRead * _bytesPerCluster); } else { long lcn = _cookedRuns[runIdx].StartLcn + (focusVcn - run.StartVcn); _fsStream.Position = lcn * _bytesPerCluster; StreamUtilities.ReadExact(_fsStream, buffer, offset + totalRead * _bytesPerCluster, toRead * _bytesPerCluster); } totalRead += toRead; } }
public override bool DetectIsPartitioned(Stream s) { if (s.Length < 1024) { return(false); } s.Position = 0; byte[] initialBytes = StreamUtilities.ReadExact(s, 1024); BlockZero b0 = new BlockZero(); b0.ReadFrom(initialBytes, 0); if (b0.Signature != 0x4552) { return(false); } PartitionMapEntry initialPart = new PartitionMapEntry(s); initialPart.ReadFrom(initialBytes, 512); return(initialPart.Signature == 0x504d); }
/// <summary> /// Initializes a new instance of the SdiFile class. /// </summary> /// <param name="stream">The stream formatted as an SDI file.</param> /// <param name="ownership">Whether to pass ownership of <c>stream</c> to the new instance.</param> public SdiFile(Stream stream, Ownership ownership) { _stream = stream; _ownership = ownership; byte[] page = StreamUtilities.ReadExact(_stream, 512); _header = new FileHeader(); _header.ReadFrom(page, 0); _stream.Position = _header.PageAlignment * 512; byte[] toc = StreamUtilities.ReadExact(_stream, (int)(_header.PageAlignment * 512)); _sections = new List <SectionRecord>(); int pos = 0; while (EndianUtilities.ToUInt64LittleEndian(toc, pos) != 0) { SectionRecord record = new SectionRecord(); record.ReadFrom(toc, pos); _sections.Add(record); pos += SectionRecord.RecordSize; } }
/// <summary> /// Creates a new partition table on a disk. /// </summary> /// <param name="disk">The stream containing the disk data.</param> /// <param name="diskGeometry">The geometry of the disk.</param> /// <returns>An object to access the newly created partition table.</returns> public static BiosPartitionTable Initialize(Stream disk, Geometry diskGeometry) { Stream data = disk; byte[] bootSector; if (data.Length >= Sizes.Sector) { data.Position = 0; bootSector = StreamUtilities.ReadExact(data, Sizes.Sector); } else { bootSector = new byte[Sizes.Sector]; } // Wipe all four 16-byte partition table entries Array.Clear(bootSector, 0x01BE, 16 * 4); // Marker bytes bootSector[510] = 0x55; bootSector[511] = 0xAA; data.Position = 0; data.Write(bootSector, 0, bootSector.Length); return(new BiosPartitionTable(disk, diskGeometry)); }
protected bool LoadGrainTable(int index) { // Current grain table, so early-out if (_grainTable != null && _currentGrainTable == index) { return(true); } // This grain table not present in grain directory, so can't load it... if (_globalDirectory[index] == 0) { return(false); } // Cached grain table? byte[] cachedGrainTable = _grainTableCache[index]; if (cachedGrainTable != null) { _currentGrainTable = index; _grainTable = cachedGrainTable; return(true); } // Not cached, so read _fileStream.Position = (long)_globalDirectory[index] * Sizes.Sector; byte[] newGrainTable = StreamUtilities.ReadExact(_fileStream, (int)_header.NumGTEsPerGT * 4); _currentGrainTable = index; _grainTable = newGrainTable; _grainTableCache[index] = newGrainTable; return(true); }
private BiosPartitionRecord[] GetPrimaryRecords() { _diskData.Position = 0; byte[] bootSector = StreamUtilities.ReadExact(_diskData, Sizes.Sector); return(ReadPrimaryRecords(bootSector)); }
private void SelfCheckIndex(File file, string name) { ReportInfo("About to self-check index {0} in file {1} (MFT:{2})", name, file.BestName, file.IndexInMft); IndexRoot root = file.GetStream(AttributeType.IndexRoot, name).GetContent<IndexRoot>(); byte[] rootBuffer; using (Stream s = file.OpenStream(AttributeType.IndexRoot, name, FileAccess.Read)) { rootBuffer = StreamUtilities.ReadExact(s, (int)s.Length); } Bitmap indexBitmap = null; if (file.GetStream(AttributeType.Bitmap, name) != null) { indexBitmap = new Bitmap(file.OpenStream(AttributeType.Bitmap, name, FileAccess.Read), long.MaxValue); } if (!SelfCheckIndexNode(rootBuffer, IndexRoot.HeaderOffset, indexBitmap, root, file.BestName, name)) { ReportError("Index {0} in file {1} (MFT:{2}) has corrupt IndexRoot attribute", name, file.BestName, file.IndexInMft); } else { ReportInfo("Self-check of index {0} in file {1} (MFT:{2}) complete", name, file.BestName, file.IndexInMft); } }
/// <summary> /// Closes the stream. /// </summary> protected override void Dispose(bool disposing) { if (_mode == CompressionMode.Decompress) { // Can only check Adler checksum on seekable streams. Since DeflateStream // aggresively caches input, it normally has already consumed the footer. if (_stream.CanSeek) { _stream.Seek(-4, SeekOrigin.End); byte[] footerBuffer = StreamUtilities.ReadExact(_stream, 4); if (EndianUtilities.ToInt32BigEndian(footerBuffer, 0) != _adler32.Value) { throw new InvalidDataException("Corrupt decompressed data detected"); } } _deflateStream.Dispose(); } else { _deflateStream.Dispose(); byte[] footerBuffer = new byte[4]; EndianUtilities.WriteBytesBigEndian(_adler32.Value, footerBuffer, 0); _stream.Write(footerBuffer, 0, 4); } base.Dispose(disposing); }
public Index(File file, string name, BiosParameterBlock bpb, UpperCase upCase) { _file = file; _name = name; _bpb = bpb; IsFileIndex = name == "$I30"; _lock = new object(); _blockCache = new ObjectCache <long, IndexBlock>(); _root = _file.GetStream(AttributeType.IndexRoot, _name).GetContent <IndexRoot>(); _comparer = _root.GetCollator(upCase); using (Stream s = _file.OpenStream(AttributeType.IndexRoot, _name, FileAccess.Read)) { byte[] buffer = StreamUtilities.ReadExact(s, (int)s.Length); _rootNode = new IndexNode(WriteRootNodeToDisk, 0, this, true, buffer, IndexRoot.HeaderOffset); // Give the attribute some room to breathe, so long as it doesn't squeeze others out // BROKEN, BROKEN, BROKEN - how to figure this out? Query at the point of adding entries to the root node? _rootNode.TotalSpaceAvailable += _file.MftRecordFreeSpace(AttributeType.IndexRoot, _name) - 100; } if (_file.StreamExists(AttributeType.IndexAllocation, _name)) { AllocationStream = _file.OpenStream(AttributeType.IndexAllocation, _name, FileAccess.ReadWrite); } if (_file.StreamExists(AttributeType.Bitmap, _name)) { _indexBitmap = new Bitmap(_file.OpenStream(AttributeType.Bitmap, _name, FileAccess.ReadWrite), long.MaxValue); } }
private void CheckHeader() { _fileStream.Position = 0; byte[] headerSector = StreamUtilities.ReadExact(_fileStream, Sizes.Sector); Footer header = Footer.FromBytes(headerSector, 0); if (!header.IsValid()) { ReportError("Invalid VHD footer at start of file"); } _fileStream.Position = _fileStream.Length - Sizes.Sector; byte[] footerSector = StreamUtilities.ReadExact(_fileStream, Sizes.Sector); if (!Utilities.AreEqual(footerSector, headerSector)) { ReportError("Header and footer are different"); } if (_footer == null || !_footer.IsValid()) { _footer = header; } }
private IEnumerable <StreamExtent> BatControlledFileExtents() { _batStream.Position = 0; byte[] batData = StreamUtilities.ReadExact(_batStream, (int)_batStream.Length); uint blockSize = _metadata.FileParameters.BlockSize; long chunkSize = (1L << 23) * _metadata.LogicalSectorSize; int chunkRatio = (int)(chunkSize / _metadata.FileParameters.BlockSize); List <StreamExtent> extents = new List <StreamExtent>(); for (int i = 0; i < batData.Length; i += 8) { ulong entry = EndianUtilities.ToUInt64LittleEndian(batData, i); long filePos = (long)((entry >> 20) & 0xFFFFFFFFFFF) * Sizes.OneMiB; if (filePos != 0) { if (i % ((chunkRatio + 1) * 8) == chunkRatio * 8) { // This is a sector bitmap block (always 1MB in size) extents.Add(new StreamExtent(filePos, Sizes.OneMiB)); } else { extents.Add(new StreamExtent(filePos, blockSize)); } } } extents.Sort(); return(extents); }
public MetadataPartition(UdfContext context, LogicalVolumeDescriptor volumeDescriptor, MetadataPartitionMap partitionMap) : base(context, volumeDescriptor) { _partitionMap = partitionMap; PhysicalPartition physical = context.PhysicalPartitions[partitionMap.PartitionNumber]; long fileEntryPos = partitionMap.MetadataFileLocation * (long)volumeDescriptor.LogicalBlockSize; byte[] entryData = StreamUtilities.ReadExact(physical.Content, fileEntryPos, _context.PhysicalSectorSize); if (!DescriptorTag.IsValid(entryData, 0)) { throw new IOException("Invalid descriptor tag looking for Metadata file entry"); } DescriptorTag dt = EndianUtilities.ToStruct <DescriptorTag>(entryData, 0); if (dt.TagIdentifier == TagIdentifier.ExtendedFileEntry) { ExtendedFileEntry efe = EndianUtilities.ToStruct <ExtendedFileEntry>(entryData, 0); _metadataFile = new File(context, physical, efe, _volumeDescriptor.LogicalBlockSize); } else { throw new NotImplementedException("Only EFE implemented for Metadata file entry"); } }
/// <summary> /// Initializes a new instance of the <see cref="DiskImageFile"/> class. /// </summary> /// <param name="stream">The stream to read.</param> /// <param name="ownsStream">Indicates if the new instance should control the lifetime of the stream.</param> public DiskImageFile(Stream stream, Ownership ownsStream) { this.udifHeader = new UdifResourceFile(); this.stream = stream ?? throw new ArgumentNullException(nameof(stream)); this.ownsStream = ownsStream; if (stream.Length < this.udifHeader.Size) { throw new InvalidDataException("The file is not a valid DMG file: could not read the UDIF header."); } stream.Position = stream.Length - this.udifHeader.Size; byte[] data = StreamUtilities.ReadExact(stream, this.udifHeader.Size); this.udifHeader.ReadFrom(data, 0); if (!this.udifHeader.SignatureValid) { throw new InvalidDataException("The file is not a valid DMG file: could not read the UDIF header."); } stream.Position = (long)this.udifHeader.XmlOffset; byte[] xmlData = StreamUtilities.ReadExact(stream, (int)this.udifHeader.XmlLength); Dictionary <string, object> plist = (Dictionary <string, object>)XmlPropertyListParser.Parse(xmlData).ToObject(); this.resources = ResourceFork.FromPlist(plist); this.Buffer = new UdifBuffer(stream, this.resources, this.udifHeader.SectorCount); }
internal ShortResourceHeader LocateImage(int index) { int i = 0; using (Stream s = OpenResourceStream(_fileHeader.OffsetTableHeader)) { long numRead = 0; while (numRead < s.Length) { byte[] resBuffer = StreamUtilities.ReadExact(s, ResourceInfo.Size); numRead += ResourceInfo.Size; ResourceInfo info = new ResourceInfo(); info.Read(resBuffer, 0); if ((info.Header.Flags & ResourceFlags.MetaData) != 0) { if (i == index) { return(info.Header); } ++i; } } } return(null); }
internal DirectoryEntry(FatFileSystemOptions options, Stream stream, FatType fatVariant) { _options = options; _fatVariant = fatVariant; byte[] buffer = StreamUtilities.ReadExact(stream, 32); Load(buffer, 0); }
public FileResourceStream(Stream baseStream, ShortResourceHeader header, bool lzxCompression, int chunkSize) { _baseStream = baseStream; _header = header; _lzxCompression = lzxCompression; _chunkSize = chunkSize; if (baseStream.Length > uint.MaxValue) { throw new NotImplementedException("Large files >4GB"); } int numChunks = (int)MathUtilities.Ceil(header.OriginalSize, _chunkSize); _chunkOffsets = new long[numChunks]; _chunkLength = new long[numChunks]; for (int i = 1; i < numChunks; ++i) { _chunkOffsets[i] = EndianUtilities.ToUInt32LittleEndian(StreamUtilities.ReadExact(_baseStream, 4), 0); _chunkLength[i - 1] = _chunkOffsets[i] - _chunkOffsets[i - 1]; } _chunkLength[numChunks - 1] = _baseStream.Length - _baseStream.Position - _chunkOffsets[numChunks - 1]; _offsetDelta = _baseStream.Position; _currentChunk = -1; }
/// <summary> /// Makes a best guess at the geometry of a disk. /// </summary> /// <param name="disk">String containing the disk image to detect the geometry from.</param> /// <returns>The detected geometry.</returns> public static Geometry DetectGeometry(Stream disk) { if (disk.Length >= Sizes.Sector) { disk.Position = 0; byte[] bootSector = StreamUtilities.ReadExact(disk, Sizes.Sector); if (bootSector[510] == 0x55 && bootSector[511] == 0xAA) { byte maxHead = 0; byte maxSector = 0; foreach (BiosPartitionRecord record in ReadPrimaryRecords(bootSector)) { maxHead = Math.Max(maxHead, record.EndHead); maxSector = Math.Max(maxSector, record.EndSector); } if (maxHead > 0 && maxSector > 0) { int cylSize = (maxHead + 1) * maxSector * 512; return(new Geometry(disk.Length, (int)MathUtilities.Ceil(disk.Length, cylSize), maxHead + 1, maxSector)); } } } return(Geometry.FromCapacity(disk.Length)); }
private void AllocateGrains(int grainTable, int grain, int count) { // Calculate start pos for new grain long grainStartPos = (long)_serverHeader.FreeSector * Sizes.Sector; // Copy-on-write semantics, read the bytes from parent and write them out to this extent. _parentDiskStream.Position = _diskOffset + (grain + _header.NumGTEsPerGT * grainTable) * _header.GrainSize * Sizes.Sector; byte[] content = StreamUtilities.ReadExact(_parentDiskStream, (int)(_header.GrainSize * Sizes.Sector * count)); _fileStream.Position = grainStartPos; _fileStream.Write(content, 0, content.Length); // Update next-free-sector in disk header _serverHeader.FreeSector += (uint)MathUtilities.Ceil(content.Length, Sizes.Sector); byte[] headerBytes = _serverHeader.GetBytes(); _fileStream.Position = 0; _fileStream.Write(headerBytes, 0, headerBytes.Length); LoadGrainTable(grainTable); for (int i = 0; i < count; ++i) { SetGrainTableEntry(grain + i, (uint)(grainStartPos / Sizes.Sector + _header.GrainSize * i)); } WriteGrainTable(); }
public Directory(UdfContext context, LogicalPartition partition, FileEntry fileEntry) : base(context, partition, fileEntry, (uint)partition.LogicalBlockSize) { if (FileContent.Capacity > int.MaxValue) { throw new NotImplementedException("Very large directory"); } _entries = new List <FileIdentifier>(); byte[] contentBytes = StreamUtilities.ReadExact(FileContent, 0, (int)FileContent.Capacity); int pos = 0; while (pos < contentBytes.Length) { FileIdentifier id = new FileIdentifier(); int size = id.ReadFrom(contentBytes, pos); if ((id.FileCharacteristics & (FileCharacteristic.Deleted | FileCharacteristic.Parent)) == 0) { _entries.Add(id); } pos += size; } }