// The maximum NTFS file size is 2^64 bytes, so total number of file clusters can be represented using long public void AllocateAdditionalClusters(NTFSVolume volume, long clustersToAllocate) { ulong desiredStartLCN = (ulong)DataRunSequence.DataLastLCN; KeyValuePairList <ulong, long> freeClusterRunList = volume.AllocateClusters(desiredStartLCN, clustersToAllocate); for (int index = 0; index < freeClusterRunList.Count; index++) { ulong runStartLCN = freeClusterRunList[index].Key; long runLength = freeClusterRunList[index].Value; bool mergeWithLastRun = (index == 0 && runStartLCN == desiredStartLCN); if (mergeWithLastRun) { // we append this run to the last run DataRun lastRun = DataRunSequence[DataRunSequence.Count - 1]; lastRun.RunLength += (long)runLength; HighestVCN += (long)runLength; } else { DataRun run = new DataRun(); ulong previousLCN = (ulong)DataRunSequence.LastDataRunStartLCN; run.RunOffset = (long)(runStartLCN - previousLCN); run.RunLength = (long)runLength; HighestVCN += runLength; DataRunSequence.Add(run); } } }
public NonResidentAttributeData(NTFSVolume volume, FileRecord fileRecord, NonResidentAttributeRecord attributeRecord) { m_volume = volume; m_fileRecord = fileRecord; m_attributeRecord = attributeRecord; m_contentType = GetContentType(fileRecord, attributeRecord.AttributeType); }
public IndexData(NTFSVolume volume, FileRecord fileRecord, AttributeType indexedAttributeType) { m_volume = volume; m_fileRecord = fileRecord; m_indexedAttributeType = indexedAttributeType; m_indexName = IndexHelper.GetIndexName(indexedAttributeType); m_rootRecord = (IndexRootRecord)m_fileRecord.GetAttributeRecord(AttributeType.IndexRoot, m_indexName); // I have observed the NTFS v5.1 driver keeping the IndexAllocation and associated Bitmap attributes after deleting files from the directory even though m_rootRecord.IsParentNode is set to false. m_indexAllocationRecord = (IndexAllocationRecord)m_fileRecord.GetAttributeRecord(AttributeType.IndexAllocation, m_indexName); m_bitmapRecord = m_fileRecord.GetAttributeRecord(AttributeType.Bitmap, m_indexName); if (m_indexAllocationRecord != null && m_bitmapRecord != null) { m_indexAllocationData = new NonResidentAttributeData(m_volume, m_fileRecord, m_indexAllocationRecord); long numberOfUsableBits = (long)(m_indexAllocationRecord.DataLength / m_rootRecord.BytesPerIndexRecord); m_bitmapData = new BitmapData(m_volume, m_fileRecord, m_bitmapRecord, numberOfUsableBits); } else if (m_rootRecord.IsParentNode && m_indexAllocationRecord == null) { throw new InvalidDataException("Missing Index Allocation Record"); } else if (m_rootRecord.IsParentNode && m_bitmapRecord == null) { throw new InvalidDataException("Missing Index Bitmap Record"); } }
public byte[] ReadDataClusters(NTFSVolume volume, long clusterVCN, int count) { if (m_record is NonResidentAttributeRecord) { return(((NonResidentAttributeRecord)m_record).ReadDataClusters(volume, clusterVCN, count)); } else { long numberOfClusters = (long)Math.Ceiling((double)((ResidentAttributeRecord)m_record).Data.Length / volume.BytesPerCluster); long highestVCN = Math.Max(numberOfClusters - 1, 0); if (clusterVCN < 0 || clusterVCN > highestVCN) { throw new ArgumentOutOfRangeException("Cluster VCN is not within the valid range"); } long offset = clusterVCN * volume.BytesPerCluster; int bytesToRead; // last cluster could be partial if (clusterVCN + count < numberOfClusters) { bytesToRead = count * volume.BytesPerCluster; } else { bytesToRead = (int)(((ResidentAttributeRecord)m_record).Data.Length - offset); } byte[] data = new byte[bytesToRead]; Array.Copy(((ResidentAttributeRecord)m_record).Data, offset, data, 0, bytesToRead); return(data); } }
public LogFile(NTFSVolume volume) : base(volume, MasterFileTable.LogSegmentReference) { if (!IsLogClean()) { throw new NotSupportedException("The volume was not dismounted cleanly, the Windows NTFS driver must be used to bring the volume back to a consistent state"); } }
public MasterFileTable(NTFSVolume volume, bool useMftMirror) { m_volume = volume; m_useMftMirror = useMftMirror; m_mftRecord = ReadMftRecord(); }
/// <param name="useMftMirror">Strap the MFT using the MFT mirror</param> public MasterFileTable(NTFSVolume volume, bool useMftMirror, bool manageMftMirror) { m_volume = volume; m_mftRecord = ReadMftRecord(useMftMirror, manageMftMirror); m_mftFile = new NTFSFile(m_volume, m_mftRecord); AttributeRecordLengthToMakeNonResident = m_volume.BytesPerFileRecordSegment * 5 / 16; // We immitate the NTFS v5.1 driver }
/// <param name="attributeName">The name of the data attribute we wish to access</param> public NTFSFile(NTFSVolume volume, FileRecord fileRecord, string attributeName) { m_volume = volume; m_fileRecord = fileRecord; AttributeRecord attributeRecord = fileRecord.GetAttributeRecord(AttributeType.Data, attributeName); m_data = new AttributeData(m_volume, m_fileRecord, attributeRecord); }
/// <summary> /// Will read all of the data the attribute have, this should only be used when the data length is manageable /// </summary> public override byte[] GetData(NTFSVolume volume) { long clusterCount = HighestVCN - LowestVCN + 1; if (clusterCount > Int32.MaxValue) { throw new Exception("Improper usage of GetData() method"); } return(ReadDataClusters(volume, LowestVCN, (int)clusterCount)); }
public byte[] ReadDataSectors(NTFSVolume volume, long firstSectorIndex, int count) { long firstClusterVcn = firstSectorIndex / volume.SectorsPerCluster; int sectorsToSkip = (int)(firstSectorIndex % volume.SectorsPerCluster); int clustersToRead = (int)Math.Ceiling((double)(count + sectorsToSkip) / volume.SectorsPerCluster); byte[] clusters = ReadDataClusters(volume, firstClusterVcn, clustersToRead); byte[] result = new byte[count * volume.BytesPerSector]; Array.Copy(clusters, sectorsToSkip * volume.BytesPerSector, result, 0, result.Length); return(result); }
public void WriteDataSectors(NTFSVolume volume, long firstSectorIndex, byte[] data) { int count = data.Length / volume.BytesPerSector; long firstClusterVcn = firstSectorIndex / volume.SectorsPerCluster; int sectorsToSkip = (int)(firstSectorIndex % volume.SectorsPerCluster); int clustersToRead = (int)Math.Ceiling((double)(count + sectorsToSkip) / volume.SectorsPerCluster); byte[] clusters = ReadDataClusters(volume, firstClusterVcn, clustersToRead); Array.Copy(data, 0, clusters, sectorsToSkip * volume.BytesPerSector, data.Length); WriteDataClusters(volume, firstClusterVcn, clusters); }
public KeyValuePairList <MftSegmentReference, FileNameRecord> GetAllEntries(NTFSVolume volume, IndexRootRecord rootRecord) { KeyValuePairList <MftSegmentReference, FileNameRecord> result = new KeyValuePairList <MftSegmentReference, FileNameRecord>(); List <IndexNodeEntry> parents = new List <IndexNodeEntry>(rootRecord.IndexEntries); List <IndexRecord> leaves = new List <IndexRecord>(); int parentIndex = 0; while (parentIndex < parents.Count) { IndexNodeEntry parent = parents[parentIndex]; byte[] clusters = this.ReadDataClusters(volume, parent.SubnodeVCN, rootRecord.ClustersPerIndexRecord); IndexRecord record = new IndexRecord(clusters, 0); if (record.HasChildren) { foreach (IndexNodeEntry node in record.IndexEntries) { parents.Add(node); } } else { leaves.Add(record); } parentIndex++; } foreach (IndexNodeEntry node in parents) { if (!node.IsLastEntry) { // Some of the tree data in NTFS is contained in non-leaf keys FileNameRecord parentRecord = new FileNameRecord(node.Key, 0); result.Add(node.SegmentReference, parentRecord); } } foreach (IndexRecord record in leaves) { foreach (FileNameIndexEntry entry in record.FileNameEntries) { result.Add(entry.FileReference, entry.Record); } } result.Sort(Compare); return(result); }
public void ExtendRecord(NTFSVolume volume, ulong additionalLength) { long numberOfClusters = (long)Math.Ceiling((double)FileSize / volume.BytesPerCluster); int freeBytesInLastCluster = (int)(numberOfClusters * volume.BytesPerCluster - (long)FileSize); if (additionalLength > (uint)freeBytesInLastCluster) { ulong bytesToAllocate = additionalLength - (uint)freeBytesInLastCluster; long clustersToAllocate = (long)Math.Ceiling((double)bytesToAllocate / volume.BytesPerCluster); AllocateAdditionalClusters(volume, clustersToAllocate); } FileSize += additionalLength; }
public void WriteDataClusters(NTFSVolume volume, long firstClusterVCN, byte[] data) { int count; long lastClusterVcnToWrite; if (data.Length % volume.BytesPerCluster > 0) { int paddedLength = (int)Math.Ceiling((double)data.Length / volume.BytesPerCluster) * volume.BytesPerCluster; // last cluster could be partial, we must zero-fill it before write count = paddedLength / volume.BytesPerCluster; lastClusterVcnToWrite = firstClusterVCN + count - 1; if (lastClusterVcnToWrite == HighestVCN) { byte[] temp = new byte[paddedLength]; Array.Copy(data, temp, data.Length); data = temp; } else { // only the last cluster can be partial throw new ArgumentException("Cannot write partial cluster"); } } else { count = data.Length / volume.BytesPerCluster; lastClusterVcnToWrite = firstClusterVCN + count - 1; } if (firstClusterVCN < LowestVCN || lastClusterVcnToWrite > HighestVCN) { string message = String.Format("Cluster VCN {0}-{1} is not within the valid range ({2}-{3})", firstClusterVCN, firstClusterVCN + count, LowestVCN, HighestVCN); throw new ArgumentOutOfRangeException(message); } KeyValuePairList <long, int> sequence = m_dataRunSequence.TranslateToLCN(firstClusterVCN, count); long bytesWritten = 0; foreach (KeyValuePair <long, int> run in sequence) { byte[] clusters = new byte[run.Value * volume.BytesPerCluster]; Array.Copy(data, bytesWritten, clusters, 0, clusters.Length); volume.WriteClusters(run.Key, clusters); bytesWritten += clusters.Length; } }
public void ExtendRecord(NTFSVolume volume, ulong additionalLength) { ulong currentSize = this.DataRealSize; if (m_record is NonResidentAttributeRecord) { ((NonResidentAttributeRecord)m_record).ExtendRecord(volume, additionalLength); } else { // If the resident data record becomes too long, it will be replaced with a non-resident data record when the file record will be saved byte[] data = ((ResidentAttributeRecord)m_record).Data; int currentLength = data.Length; byte[] temp = new byte[currentLength + (int)additionalLength]; Array.Copy(data, temp, data.Length); ((ResidentAttributeRecord)m_record).Data = temp; } }
public void WriteDataClusters(NTFSVolume volume, long clusterVCN, byte[] data) { if (m_record is NonResidentAttributeRecord) { ((NonResidentAttributeRecord)m_record).WriteDataClusters(volume, clusterVCN, data); } else { int numberOfClusters = (int)Math.Ceiling((double)((ResidentAttributeRecord)m_record).Data.Length / volume.BytesPerCluster); int count = (int)Math.Ceiling((double)data.Length / volume.BytesPerCluster); long highestVCN = Math.Max(numberOfClusters - 1, 0); if (clusterVCN < 0 || clusterVCN > highestVCN) { throw new ArgumentOutOfRangeException("Cluster VCN is not within the valid range"); } long offset = clusterVCN * volume.BytesPerCluster; Array.Copy(data, 0, ((ResidentAttributeRecord)m_record).Data, offset, data.Length); } }
/// <param name="count">Maximum number of clusters to read</param> public byte[] ReadDataClusters(NTFSVolume volume, long firstClusterVCN, int count) { long lastClusterVcnToRead = firstClusterVCN + count - 1; if (firstClusterVCN < LowestVCN || firstClusterVCN > HighestVCN) { string message = String.Format("Cluster VCN {0}-{1} is not within the valid range ({2}-{3})", firstClusterVCN, lastClusterVcnToRead, LowestVCN, HighestVCN); throw new ArgumentOutOfRangeException(message); } if (lastClusterVcnToRead > HighestVCN) { lastClusterVcnToRead = HighestVCN; } byte[] result = new byte[count * volume.BytesPerCluster]; KeyValuePairList <long, int> sequence = m_dataRunSequence.TranslateToLCN(firstClusterVCN - LowestVCN, count); long bytesRead = 0; foreach (KeyValuePair <long, int> run in sequence) { byte[] clusters = volume.ReadClusters(run.Key, run.Value); Array.Copy(clusters, 0, result, bytesRead, clusters.Length); bytesRead += clusters.Length; } // If the last cluster is only partially used or we have been asked to read clusters beyond the last cluster, trim result. // (Either of those cases could only be true if we have just read the last cluster). if (lastClusterVcnToRead == (long)HighestVCN) { long bytesToUse = (long)(FileSize - (ulong)firstClusterVCN * (uint)volume.BytesPerCluster); if (bytesToUse < result.Length) { byte[] resultTrimmed = new byte[bytesToUse]; Array.Copy(result, resultTrimmed, bytesToUse); return(resultTrimmed); } } return(result); }
public AttributeListRecord(NTFSVolume volume, AttributeRecord record) { m_volume = volume; m_record = record; byte[] data = m_record.GetData(volume); int position = 0; while (position < data.Length) { AttributeListEntry entry = new AttributeListEntry(data, position); AttributeList.Add(entry); position += entry.Length; if (entry.Length < AttributeListEntry.HeaderLength) { string message = String.Format("Invalid attribute list entry, data length: {0}, position: {1}", data.Length, position); throw new Exception(message); } } }
/// <param name="useMftMirror">Strap the MFT using the MFT mirror</param> public MasterFileTable(NTFSVolume volume, bool useMftMirror, bool manageMftMirror) { m_volume = volume; m_mftRecord = ReadMftRecord(useMftMirror, manageMftMirror); AttributeRecord dataRecord = m_mftRecord.DataRecord; if (dataRecord == null) { throw new InvalidDataException("Invalid MFT Record, missing Data attribute"); } m_mftData = new AttributeData(m_volume, m_mftRecord, dataRecord); long numberOfUsableBits = (long)(m_mftData.Length / (uint)m_volume.BytesPerFileRecordSegment); if (!manageMftMirror) { AttributeRecord bitmapRecord = m_mftRecord.BitmapRecord; if (bitmapRecord == null) { throw new InvalidDataException("Invalid MFT Record, missing Bitmap attribute"); } m_mftBitmap = new BitmapData(volume, m_mftRecord, bitmapRecord, numberOfUsableBits); } AttributeRecordLengthToMakeNonResident = m_volume.BytesPerFileRecordSegment * 5 / 16; // We immitate the NTFS v5.1 driver if (m_volume.Size >= LargeVolumeThreshold) { ExtendGranularity = ExtendGranularityLargeVolume; } else if (m_volume.Size >= MediumVolumeThreshold) { ExtendGranularity = ExtendGranularityMediumVolume; } else { ExtendGranularity = ExtendGranularitySmallVolume; } NumberOfClustersRequiredToExtend = GetNumberOfClusteredRequiredToExtend(); }
public NTFSFile(NTFSVolume volume, long baseSegmentNumber) { m_volume = volume; m_fileRecord = m_volume.MasterFileTable.GetFileRecord(baseSegmentNumber); }
public AttributeDefinition(NTFSVolume volume) : base(volume, MasterFileTable.AttrDefSegmentReference) { }
private bool m_isBufferDirty; // if set to true, we need to write the buffer public ClusterUsageBitmap(NTFSVolume volume) : base(volume, MasterFileTable.BitmapSegmentNumber) { m_volume = volume; }
public AttributeData(NTFSVolume volume, FileRecord fileRecord, AttributeRecord attributeRecord) { m_volume = volume; m_fileRecord = fileRecord; m_attributeRecord = attributeRecord; }
public NTFSFileSystem(NTFSVolume volume) { m_volume = volume; }
public NTFSFileSystem(Volume volume, bool isReadOnly) { m_volume = new NTFSVolume(volume, isReadOnly); }
public MasterFileTable(NTFSVolume volume) : this(volume, false, false) { }
/// <param name="clusterVCN">Cluster index</param> public byte[] ReadDataCluster(NTFSVolume volume, long clusterVCN) { return(ReadDataClusters(volume, clusterVCN, 1)); }
public NTFSFile(NTFSVolume volume, FileRecord fileRecord) { m_volume = volume; m_fileRecord = fileRecord; }
public abstract byte[] GetData(NTFSVolume volume);
/// <param name="useMftMirror">Strap the MFT using the MFT mirror</param> public MasterFileTable(NTFSVolume volume, bool useMftMirror) : this(volume, useMftMirror, false) { }