public override int WriteClusters(long startVcn, int count, byte[] buffer, int offset) { StreamUtilities.AssertBufferParameters(buffer, offset, count * _bytesPerCluster); int runIdx = 0; int totalWritten = 0; while (totalWritten < count) { long focusVcn = startVcn + totalWritten; runIdx = _cookedRuns.FindDataRun(focusVcn, runIdx); CookedDataRun run = _cookedRuns[runIdx]; if (run.IsSparse) { throw new NotImplementedException("Writing to sparse datarun"); } int toWrite = (int)Math.Min(count - totalWritten, run.Length - (focusVcn - run.StartVcn)); long lcn = _cookedRuns[runIdx].StartLcn + (focusVcn - run.StartVcn); _fsStream.Position = lcn * _bytesPerCluster; _fsStream.Write(buffer, offset + totalWritten * _bytesPerCluster, toWrite * _bytesPerCluster); totalWritten += toWrite; } return(0); }
public override void ReadClusters(long startVcn, int count, byte[] buffer, int offset) { StreamUtilities.AssertBufferParameters(buffer, offset, count * _bytesPerCluster); int runIdx = 0; int totalRead = 0; while (totalRead < count) { long focusVcn = startVcn + totalRead; runIdx = _cookedRuns.FindDataRun(focusVcn, runIdx); CookedDataRun run = _cookedRuns[runIdx]; int toRead = (int)Math.Min(count - totalRead, run.Length - (focusVcn - run.StartVcn)); if (run.IsSparse) { Array.Clear(buffer, offset + totalRead * _bytesPerCluster, toRead * _bytesPerCluster); } else { long lcn = _cookedRuns[runIdx].StartLcn + (focusVcn - run.StartVcn); _fsStream.Position = lcn * _bytesPerCluster; StreamUtilities.ReadExact(_fsStream, buffer, offset + totalRead * _bytesPerCluster, toRead * _bytesPerCluster); } totalRead += toRead; } }
public override int Read(byte[] buffer, int offset, int count) { AssertOpen(); StreamUtilities.AssertBufferParameters(buffer, offset, count); using (new NtfsTransaction()) { return(_baseStream.Read(buffer, offset, count)); } }
public override int Read(long pos, byte[] buffer, int offset, int count) { AttributeRecord record = _attribute.PrimaryRecord; if (!CanRead) { throw new IOException("Attempt to read from file not opened for read"); } StreamUtilities.AssertBufferParameters(buffer, offset, count); if (pos >= Capacity) { return(0); } // Limit read to length of attribute int totalToRead = (int)Math.Min(count, Capacity - pos); int toRead = totalToRead; // Handle uninitialized bytes at end of attribute if (pos + totalToRead > record.InitializedDataLength) { if (pos >= record.InitializedDataLength) { // We're just reading zero bytes from the uninitialized area Array.Clear(buffer, offset, totalToRead); pos += totalToRead; return(totalToRead); } // Partial read of uninitialized area Array.Clear(buffer, offset + (int)(record.InitializedDataLength - pos), (int)(pos + toRead - record.InitializedDataLength)); toRead = (int)(record.InitializedDataLength - pos); } int numRead = 0; while (numRead < toRead) { IBuffer extentBuffer = _attribute.RawBuffer; int justRead = extentBuffer.Read(pos + numRead, buffer, offset + numRead, toRead - numRead); if (justRead == 0) { break; } numRead += justRead; } return(totalToRead); }
public override void Write(byte[] buffer, int offset, int count) { AssertOpen(); StreamUtilities.AssertBufferParameters(buffer, offset, count); using (new NtfsTransaction()) { _isDirty = true; _baseStream.Write(buffer, offset, count); } }
public override int Read(long pos, byte[] buffer, int offset, int count) { if (!CanRead) { throw new IOException("Attempt to read from file not opened for read"); } StreamUtilities.AssertBufferParameters(buffer, offset, count); // Limit read to length of attribute int totalToRead = (int)Math.Min(count, Capacity - pos); if (totalToRead <= 0) { return(0); } long focusPos = pos; while (focusPos < pos + totalToRead) { long vcn = focusPos / _bytesPerCluster; long remaining = pos + totalToRead - focusPos; long clusterOffset = focusPos - vcn * _bytesPerCluster; if (vcn * _bytesPerCluster != focusPos || remaining < _bytesPerCluster) { // Unaligned or short read _activeStream.ReadClusters(vcn, 1, _ioBuffer, 0); int toRead = (int)Math.Min(remaining, _bytesPerCluster - clusterOffset); Array.Copy(_ioBuffer, (int)clusterOffset, buffer, (int)(offset + (focusPos - pos)), toRead); focusPos += toRead; } else { // Aligned, full cluster reads... int fullClusters = (int)(remaining / _bytesPerCluster); _activeStream.ReadClusters(vcn, fullClusters, buffer, (int)(offset + (focusPos - pos))); focusPos += fullClusters * _bytesPerCluster; } } return(totalToRead); }
public override void Write(long pos, byte[] buffer, int offset, int count) { AttributeRecord record = _attribute.PrimaryRecord; if (!CanWrite) { throw new IOException("Attempt to write to file not opened for write"); } StreamUtilities.AssertBufferParameters(buffer, offset, count); if (count == 0) { return; } _attribute.RawBuffer.Write(pos, buffer, offset, count); if (!record.IsNonResident) { _file.MarkMftRecordDirty(); } }
public override void ReadClusters(long startVcn, int count, byte[] buffer, int offset) { StreamUtilities.AssertBufferParameters(buffer, offset, count * _bytesPerCluster); int runIdx = 0; int totalRead = 0; while (totalRead < count) { long focusVcn = startVcn + totalRead; runIdx = _cookedRuns.FindDataRun(focusVcn, runIdx); CookedDataRun run = _cookedRuns[runIdx]; int toRead = (int)Math.Min(count - totalRead, run.Length - (focusVcn - run.StartVcn)); if (run.IsSparse) { Array.Clear(buffer, offset + totalRead * _bytesPerCluster, toRead * _bytesPerCluster); } else { long lcn = _cookedRuns[runIdx].StartLcn + (focusVcn - run.StartVcn); _fsStream.Position = lcn * _bytesPerCluster; int numRead = StreamUtilities.ReadFully(_fsStream, buffer, offset + totalRead * _bytesPerCluster, toRead * _bytesPerCluster); if (numRead != toRead * _bytesPerCluster) { throw new IOException(string.Format(CultureInfo.InvariantCulture, "Short read, reading {0} clusters starting at LCN {1}", toRead, lcn)); } } totalRead += toRead; } }