private void SwitchModeIfNecessary() { if (this._isolatedStorageMode) { if (this._isolatedStorageStream.Length < this._lowWaterMark) { if (this._isolatedStorageStream.Length > 0L) { MemoryStreamBlock item = new MemoryStreamBlock(this._trackingMemoryStreamFactory.Create((int)this._isolatedStorageStream.Length), 0L); this._isolatedStorageStream.Seek(0L, SeekOrigin.Begin); item.Stream.Seek(0L, SeekOrigin.Begin); PackagingUtilities.CopyStream(this._isolatedStorageStream, item.Stream, 0x7fffffffffffffffL, 0x80000); this._memoryStreamList.Add(item); } this._isolatedStorageMode = false; this._isolatedStorageStream.SetLength(0L); this._isolatedStorageStream.Flush(); } } else if (this._trackingMemoryStreamFactory.CurrentMemoryConsumption > this._highWaterMark) { this.EnsureIsolatedStoreStream(); this.CopyMemoryBlocksToStream(this._isolatedStorageStream); this._isolatedStorageMode = true; foreach (MemoryStreamBlock block2 in this._memoryStreamList) { block2.Stream.Close(); } this._memoryStreamList.Clear(); } }
internal void MoveData(long moveBlockSourceOffset, long moveBlockTargetOffset, long moveBlockSize) { if ((moveBlockSize != 0L) && (moveBlockSourceOffset != moveBlockTargetOffset)) { int num2; byte[] buffer = new byte[Math.Min(moveBlockSize, 0x100000L)]; for (long i = 0L; i < moveBlockSize; i += num2) { long num3; long num4; num2 = (int)Math.Min((long)buffer.Length, moveBlockSize - i); if (moveBlockSourceOffset > moveBlockTargetOffset) { num4 = moveBlockSourceOffset + i; num3 = moveBlockTargetOffset + i; } else { num4 = ((moveBlockSourceOffset + moveBlockSize) - i) - num2; num3 = ((moveBlockTargetOffset + moveBlockSize) - i) - num2; } this._archiveStream.Seek(num4, SeekOrigin.Begin); if (PackagingUtilities.ReliableRead(this._archiveStream, buffer, 0, num2) != num2) { throw new FileFormatException(SR.Get("CorruptedData")); } this._archiveStream.Seek(num3, SeekOrigin.Begin); this._archiveStream.Write(buffer, 0, num2); } } }
/// <summary> /// See .NET Framework SDK under System.IO.Stream /// </summary> /// <param name="buffer">Data buffer</param> /// <param name="offset">Buffer write start position</param> /// <param name="count">Number of bytes to write</param> public override void Write(byte[] buffer, int offset, int count) { CheckDisposedStatus(); PackagingUtilities.VerifyStreamWriteArgs(this, buffer, offset, count); int written = 0; // CLR guys have deemed this uninteresting? if (0 == offset) // Zero offset is typical case { _safeIStream.Write(buffer, count, out written); } else // Non-zero offset { // Copy from indicated offset to zero-based temp buffer byte[] localBuffer = new byte[count]; Array.Copy(buffer, offset, localBuffer, 0, count); _safeIStream.Write(localBuffer, count, out written); } if (count != written) { throw new IOException( SR.Get(SRID.WriteFailure)); } }
/// <summary> /// See .NET Framework SDK under System.IO.Stream /// </summary> /// <param name="buffer">Read data buffer</param> /// <param name="offset">Buffer start position</param> /// <param name="count">Number of bytes to read</param> /// <returns>Number of bytes actually read</returns> public override int Read(byte[] buffer, int offset, int count) { CheckDisposedStatus(); PackagingUtilities.VerifyStreamReadArgs(this, buffer, offset, count); int read = 0; if (0 == offset) // Zero offset is typical case { _safeIStream.Read(buffer, count, out read); } else // Non-zero offset { // Read into local array and then copy it into the given buffer at // the specified offset. byte[] localBuffer = new byte[count]; _safeIStream.Read(localBuffer, count, out read); if (read > 0) { Array.Copy(localBuffer, 0, buffer, offset, read); } } return(read); }
public override void Write(byte[] buffer, int offset, int count) { this.CheckDisposed(); PackagingUtilities.VerifyStreamWriteArgs(this, buffer, offset, count); if (count != 0) { switch (this._mode) { case Mode.Start: if ((this._position != 0L) || !IsDeflateStreamEmpty(this._baseStream)) { this.ChangeMode(Mode.Emulation); break; } this.ChangeMode(Mode.WritePassThrough); break; case Mode.ReadPassThrough: this.ChangeMode(Mode.Emulation); break; } this._current.Write(buffer, offset, count); this._position += count; if (this._mode == Mode.WritePassThrough) { this.CachedLength = this._position; } this._dirtyForFlushing = true; this._dirtyForClosing = true; } }
private void EnsureIsolatedStoreStream() { if (this._isolatedStorageStream == null) { this._isolatedStorageStream = PackagingUtilities.CreateUserScopedIsolatedStorageFileStreamWithRandomName(3, out this._isolatedStorageStreamFileName); } }
internal virtual void ParseDataField(BinaryReader reader, UInt16 size) { if (_data == null) { _data = reader.ReadBytes(size); //vaiadte that we didn't reach the end of stream too early if (_data.Length != size) { throw new FileFormatException(SR.Get(SRID.CorruptedData)); } } else // There were some data we sniffed already { Byte[] tempBuffer = _data; _data = new Byte[size]; Array.Copy(tempBuffer, _data, _size); // _size contains the size of data in _data checked { Debug.Assert(size >= _size); if ((PackagingUtilities.ReliableRead(reader, _data, _size, size - _size) + _size) != size) { throw new FileFormatException(SR.Get(SRID.CorruptedData)); } } } _size = size; }
public override void Write(byte[] buffer, int offset, int count) { this.CheckDisposed(); PackagingUtilities.VerifyStreamWriteArgs(this, buffer, offset, count); if (count != 0) { int num2 = 0; this._dirtyFlag = true; this._dataChanged = true; long num = this._currentStreamPosition; if (num < this._persistedSize) { this._blockManager.Stream.Seek(this._persistedOffset + num, SeekOrigin.Begin); num2 = (int)Math.Min((long)count, this._persistedSize - num); this._blockManager.Stream.Write(buffer, offset, num2); num += num2; count -= num2; offset += num2; } if ((num + count) > this._persistedSize) { if (this._sparseMemoryStreamSuffix == null) { this._sparseMemoryStreamSuffix = new SparseMemoryStream(0x19000L, 0xa00000L); } this._sparseMemoryStreamSuffix.Seek(num - this._persistedSize, SeekOrigin.Begin); this._sparseMemoryStreamSuffix.Write(buffer, offset, count); num += count; } this._currentStreamPosition = num; this._currentStreamLength = Math.Max(this._currentStreamLength, this._currentStreamPosition); } }
CopyImageStream(Stream sourceStream, Stream destinationStream) { byte[] buffer = new byte[_readBlockSize]; int bytesRead = PackagingUtilities.ReliableRead(sourceStream, buffer, 0, _readBlockSize); while (bytesRead > 0) { destinationStream.Write(buffer, 0, bytesRead); bytesRead = PackagingUtilities.ReliableRead(sourceStream, buffer, 0, _readBlockSize); } }
internal void WriteToStream(Stream stream) { if (this._isolatedStorageMode) { this._isolatedStorageStream.Seek(0L, SeekOrigin.Begin); PackagingUtilities.CopyStream(this._isolatedStorageStream, stream, 0x7fffffffffffffffL, 0x80000); } else { this.CopyMemoryBlocksToStream(stream); } }
CopyFontStream() { Uri sourceUri = _fontUri; Uri destUri = _fontResourceStream.Uri; Stream destStream = _fontResourceStream.Stream; Stream sourceStream = null; byte [] memoryFont; GlyphTypeface glyphTypeface = new GlyphTypeface(sourceUri); CodeAccessPermission fontReadPermission = glyphTypeface.CriticalFileReadPermission; if (fontReadPermission != null) { fontReadPermission.Assert(); } try { sourceStream = glyphTypeface.GetFontStream(); } finally { if (fontReadPermission != null) { CodeAccessPermission.RevertAssert(); } } memoryFont = new byte[_readBlockSize]; Guid guid = ParseGuidFromUri(destUri); int bytesRead = PackagingUtilities.ReliableRead(sourceStream, memoryFont, 0, _readBlockSize); if (bytesRead > 0) { // Obfuscate the first block ObfuscateData(memoryFont, guid); } while (bytesRead > 0) { destStream.Write(memoryFont, 0, bytesRead); bytesRead = PackagingUtilities.ReliableRead(sourceStream, memoryFont, 0, _readBlockSize); } Uri fontUri = new Uri(_fontUri.GetComponents(UriComponents.SerializationInfoString, UriFormat.SafeUnescaped), UriKind.RelativeOrAbsolute); string fontUriAsString = fontUri.GetComponents(UriComponents.SerializationInfoString, UriFormat.UriEscaped); _packagingPolicy.ReleaseResourceStreamForXpsFont(fontUriAsString); _streamWritten = true; return(destUri); }
/// <summary> /// See .NET Framework SDK under System.IO.Stream /// </summary> public override int Read(byte[] buffer, int offset, int count) { CheckDisposed(); PackagingUtilities.VerifyStreamReadArgs(this, buffer, offset, count); int result = InternalRead(_streamPosition, buffer, offset, count); FlushCacheIfNecessary(); checked { _streamPosition += result; } return(result); }
private int ReadFromCache(SparseMemoryStream cache, long start, int count, byte[] buffer, int bufferOffset) { #if DEBUG // debug only check for valid parameters, as we generally expect callers to verify them PackagingUtilities.VerifyStreamReadArgs(this, buffer, bufferOffset, count); #endif Debug.Assert(cache != null); Debug.Assert(start >= 0); IList <MemoryStreamBlock> collection = cache.MemoryBlockCollection; checked { // use BinarySearch to locate blocks of interest quickly bool match; // exact match? int index = FindIndexOfBlockAtOffset(cache, start, out match); // if match was found, read from it int bytesRead = 0; if (match) { MemoryStreamBlock memStreamBlock = collection[index]; long overlapBlockOffset; long overlapBlockSize; // we have got an overlap which can be used to satisfy the read request, // at least partially PackagingUtilities.CalculateOverlap(memStreamBlock.Offset, memStreamBlock.Stream.Length, start, count, out overlapBlockOffset, out overlapBlockSize); if (overlapBlockSize > 0) { // overlap must be starting at the start as we know for sure that // memStreamBlock.Offset <= start Debug.Assert(overlapBlockOffset == start); memStreamBlock.Stream.Seek(overlapBlockOffset - memStreamBlock.Offset, SeekOrigin.Begin); // we know that memStream will return as much data as we requested // even if this logic changes we do not have to return everything // a partially complete read is acceptable here bytesRead = memStreamBlock.Stream.Read(buffer, bufferOffset, (int)overlapBlockSize); } } return(bytesRead); } }
public override int Read(byte[] buffer, int offset, int count) { int num3; this.CheckDisposed(); PackagingUtilities.VerifyStreamReadArgs(this, buffer, offset, count); if (count == 0) { return(0); } if (this._currentStreamLength <= this._currentStreamPosition) { return(0); } int num2 = (int)Math.Min((long)count, this._currentStreamLength - this._currentStreamPosition); if (this._isolatedStorageMode) { this._isolatedStorageStream.Seek(this._currentStreamPosition, SeekOrigin.Begin); num3 = this._isolatedStorageStream.Read(buffer, offset, num2); } else { Array.Clear(buffer, offset, num2); int num = this._memoryStreamList.BinarySearch(this.GetSearchBlockForOffset(this._currentStreamPosition)); if (num < 0) { num = ~num; } while (num < this._memoryStreamList.Count) { long num4; long num5; MemoryStreamBlock block = this._memoryStreamList[num]; PackagingUtilities.CalculateOverlap(block.Offset, (long)((int)block.Stream.Length), this._currentStreamPosition, (long)num2, out num5, out num4); if (num4 <= 0L) { break; } Array.Copy(block.Stream.GetBuffer(), (int)(num5 - block.Offset), buffer, (int)((offset + num5) - this._currentStreamPosition), (int)num4); num++; } num3 = num2; } this._currentStreamPosition += num3; return(num3); }
internal static PreSaveNotificationScanControlInstruction CommonPreSaveNotificationHandler(Stream stream, long offset, long size, long onDiskOffset, long onDiskSize, ref SparseMemoryStream cachePrefixStream) { if (size != 0L) { long num2; long num4; if (cachePrefixStream != null) { onDiskOffset += cachePrefixStream.Length; onDiskSize -= cachePrefixStream.Length; } if (onDiskSize == 0L) { return(PreSaveNotificationScanControlInstruction.Continue); } PackagingUtilities.CalculateOverlap(onDiskOffset, onDiskSize, offset, size, out num4, out num2); if (num2 <= 0L) { if (onDiskOffset <= offset) { return(PreSaveNotificationScanControlInstruction.Continue); } return(PreSaveNotificationScanControlInstruction.Stop); } long bytesToCopy = (num4 + num2) - onDiskOffset; if (cachePrefixStream == null) { cachePrefixStream = new SparseMemoryStream(0x19000L, 0xa00000L); } else { cachePrefixStream.Seek(0L, SeekOrigin.End); } stream.Seek(onDiskOffset, SeekOrigin.Begin); if (PackagingUtilities.CopyStream(stream, cachePrefixStream, bytesToCopy, 0x1000) != bytesToCopy) { throw new FileFormatException(SR.Get("CorruptedData")); } if ((onDiskOffset + onDiskSize) < (offset + size)) { return(PreSaveNotificationScanControlInstruction.Continue); } } return(PreSaveNotificationScanControlInstruction.Stop); }
public override int Read(byte[] buffer, int offset, int count) { this.CheckDisposed(); PackagingUtilities.VerifyStreamReadArgs(this, buffer, offset, count); if (count == 0) { return(0); } if (this._currentStreamLength <= this._currentStreamPosition) { return(0); } int num2 = 0; int num4 = 0; long num5 = 0L; int num3 = 0; long num = this._currentStreamPosition; if (num < this._persistedSize) { num5 = Math.Min(this._currentStreamLength, this._persistedSize) - num; num4 = (int)Math.Min((long)count, num5); this._blockManager.Stream.Seek(this._persistedOffset + num, SeekOrigin.Begin); num2 = this._blockManager.Stream.Read(buffer, offset, num4); num += num2; count -= num2; offset += num2; if (num2 < num4) { this._currentStreamPosition = num; return(num2); } } if ((this._sparseMemoryStreamSuffix != null) && ((num + count) > this._persistedSize)) { this._sparseMemoryStreamSuffix.Seek(num - this._persistedSize, SeekOrigin.Begin); num3 = this._sparseMemoryStreamSuffix.Read(buffer, offset, count); num += num3; } int num6 = num2 + num3; this._currentStreamPosition = num; return(num6); }
public override int Read(byte[] buffer, int offset, int count) { this.CheckDisposed(); PackagingUtilities.VerifyStreamReadArgs(this, buffer, offset, count); if (count == 0) { return(0); } switch (this._mode) { case Mode.Start: if (this._position != 0L) { this.ChangeMode(Mode.Emulation); break; } this.ChangeMode(Mode.ReadPassThrough); break; case Mode.WritePassThrough: if (this._position != 0L) { this.ChangeMode(Mode.Emulation); break; } this.ChangeMode(Mode.ReadPassThrough); break; } if (this._current == null) { return(0); } int num = this._current.Read(buffer, offset, count); if ((this._mode == Mode.ReadPassThrough) && (num == 0)) { this.UpdateUncompressedDataLength(this._position); this.ChangeMode(Mode.Start); } this._position += num; return(num); }
public override void Write(byte[] buffer, int offset, int count) { this.CheckDisposed(); PackagingUtilities.VerifyStreamWriteArgs(this, buffer, offset, count); if (count != 0) { if (this._isolatedStorageMode) { this._isolatedStorageStream.Seek(this._currentStreamPosition, SeekOrigin.Begin); this._isolatedStorageStream.Write(buffer, offset, count); this._currentStreamPosition += count; } else { this.WriteAndCollapseBlocks(buffer, offset, count); } this._currentStreamLength = Math.Max(this._currentStreamLength, this._currentStreamPosition); this.SwitchModeIfNecessary(); } }
/// <summary> /// ReadBlockHeader - reads the block header and returns true if successful /// </summary> /// <param name="source">stream to read from</param> /// <param name="compressedSize">compressedSize from header</param> /// <param name="uncompressedSize">uncompressedSize from header</param> /// <returns>true if header found</returns> private bool ReadBlockHeader(Stream source, out int uncompressedSize, out int compressedSize) { int bytesRead = PackagingUtilities.ReliableRead(source, _headerBuf, 0, _headerBuf.Length); if (bytesRead > 0) { if (bytesRead < _headerBuf.Length) { throw new FileFormatException(SR.Get(SRID.CorruptStream)); } // header format = 3 ulong's // read and inspect token uint token = BitConverter.ToUInt32(_headerBuf, _ulongSize * 0); if (token != _blockHeaderToken) { throw new FileFormatException(SR.Get(SRID.CorruptStream)); } // convert to int's as that's what we use everywhere checked { uncompressedSize = (int)BitConverter.ToUInt32(_headerBuf, _ulongSize * 1); compressedSize = (int)BitConverter.ToUInt32(_headerBuf, _ulongSize * 2); // screen out malicious data if (uncompressedSize < 0 || uncompressedSize > _maxAllowableBlockSize || compressedSize < 0 || compressedSize > _maxAllowableBlockSize) { throw new FileFormatException(SR.Get(SRID.CorruptStream)); } } } else { uncompressedSize = compressedSize = 0; } return(bytesRead > 0); }
/// <summary> /// See .NET Framework SDK under System.IO.Stream /// </summary> public override void Write(byte[] buffer, int offset, int count) { CheckDisposed(); PackagingUtilities.VerifyStreamWriteArgs(this, buffer, offset, count); _writeCache.Seek(this.Position, SeekOrigin.Begin); _writeCache.Write(buffer, offset, count); // we also might need to recalculate the size of the new updated stream if (_writeCache.Length > Length) { // update our size accordingly SetLength(_writeCache.Length); } checked { _streamPosition += count; } FlushCacheIfNecessary(); }
private static long FindPosition(Stream archiveStream) { byte[] buffer = new byte[_scanBlockSize + 0x16]; long length = archiveStream.Length; for (long i = length; i > 0L; i -= _scanBlockSize) { long offset = Math.Max((long)0L, (long)(i - _scanBlockSize)); archiveStream.Seek(offset, SeekOrigin.Begin); int num6 = PackagingUtilities.ReliableRead(archiveStream, buffer, 0, buffer.Length); long bufferOffsetFromEndOfStream = length - offset; for (int j = num6 - 0x16; j >= 0; j--) { if (IsPositionMatched(j, buffer, bufferOffsetFromEndOfStream)) { return(offset + j); } } } throw new FileFormatException(SR.Get("CorruptedData")); }
internal virtual void ParseDataField(BinaryReader reader, ushort size) { if (this._data == null) { this._data = reader.ReadBytes(size); if (this._data.Length != size) { throw new FileFormatException(SR.Get("CorruptedData")); } } else { byte[] sourceArray = this._data; this._data = new byte[size]; Array.Copy(sourceArray, this._data, (int)this._size); if ((PackagingUtilities.ReliableRead(reader, this._data, this._size, size - this._size) + this._size) != size) { throw new FileFormatException(SR.Get("CorruptedData")); } } this._size = size; }
private static long FindPosition(Stream archiveStream) { Debug.Assert(archiveStream.CanSeek); byte [] buffer = new byte[_scanBlockSize + _fixedMinimalRecordSize]; long streamLength = archiveStream.Length; for (long endPos = streamLength; endPos > 0; endPos -= _scanBlockSize) { // calculate offset position of the block to be read based on the end // Position loop variable long beginPos = Math.Max(0, endPos - _scanBlockSize); //read the block archiveStream.Seek(beginPos, SeekOrigin.Begin); // the reads that we do actually overlap each other by the size == _fixedMinimalRecordSize // this is done in order to simplify our searching logic, this way we do not need to specially // process matches that cross buffer boundaries, as we are guaranteed that if match is present // it falls completely inside one of the buffers, as a result of overlapping in the read requests int bytesRead = PackagingUtilities.ReliableRead(archiveStream, buffer, 0, buffer.Length); // We need to pass this parameter into the function, so it knows // the relative positon of the buffer in regard to the end of the stream; // it needs this info in order to checke whether the candidate record // has length of Comment field consistent with the postion of the record long distanceFromStartOfBufferToTheEndOfStream = streamLength - beginPos; for (int i = bytesRead - _fixedMinimalRecordSize; i >= 0; i--) { if (IsPositionMatched(i, buffer, distanceFromStartOfBufferToTheEndOfStream)) { return(beginPos + i); } } } // At this point we have finished scanning the file and haven't find anything throw new FileFormatException(SR.Get(SRID.CorruptedData)); }
//------------------------------------------------------ // // Private Methods // //------------------------------------------------------ /// <summary> /// Initial update of _streamCachedLength and _streamOnDiskLength /// </summary> private void ParseStreamLength() { if (_streamCachedLength < 0) { // seek to the beginning of the stream _baseStream.Seek(0, SeekOrigin.Begin); // read the size prefix byte[] prefixData = new byte[_prefixLengthSize]; int bytesRead = PackagingUtilities.ReliableRead (_baseStream, prefixData, 0, prefixData.Length); // decode length data (from the prefix) if (bytesRead == 0) { // probably a new stream - just assume length is zero _streamOnDiskLength = 0; } else if (bytesRead < _prefixLengthSize) { // not zero and shorter than legal length == corrupt file throw new FileFormatException(SR.Get(SRID.EncryptedDataStreamCorrupt)); } else { checked { // This will throw on a negative value so we need not // explicitly check for that _streamOnDiskLength = (long)BitConverter.ToUInt64(prefixData, 0); } } _streamCachedLength = _streamOnDiskLength; } }
/// <summary> /// Write /// </summary> /// <param name="buffer"></param> /// <param name="offset"></param> /// <param name="count"></param> /// <remarks>In streaming mode, write should accumulate data into the SparseMemoryStream.</remarks> override public void Write(byte[] buffer, int offset, int count) { CheckDisposed(); PackagingUtilities.VerifyStreamWriteArgs(this, buffer, offset, count); Debug.Assert(_cachePrefixStream == null); // we only expect this thing to be not null during Archive Save execution // that would between PreSaveNotofication call and Save SaveStreaming Debug.Assert(_currentStreamPosition >= 0); if (count == 0) { return; } int diskBytesToWrite = 0; _dirtyFlag = true; _dataChanged = true; long newStreamPosition = _currentStreamPosition; checked { // Try to satisfy request with the Write to the Disk if (newStreamPosition < _persistedSize) { Debug.Assert(!_blockManager.Streaming); // we have at least partial overlap between request and the data on disk _blockManager.Stream.Seek(_persistedOffset + newStreamPosition, SeekOrigin.Begin); // Note on casting: // It is safe to cast the result of Math.Min(count, _persistedSize - newStreamPosition)) // from long to int since it cannot be bigger than count and count is int type diskBytesToWrite = (int)(Math.Min(count, _persistedSize - newStreamPosition)); // this is a safe cast as count has int type _blockManager.Stream.Write(buffer, offset, diskBytesToWrite); newStreamPosition += diskBytesToWrite; count -= diskBytesToWrite; offset += diskBytesToWrite; } // check whether we need to save data to the memory Stream; if (newStreamPosition + count > _persistedSize) { if (_sparseMemoryStreamSuffix == null) { _sparseMemoryStreamSuffix = new SparseMemoryStream(_lowWaterMark, _highWaterMark); } _sparseMemoryStreamSuffix.Seek(newStreamPosition - _persistedSize, SeekOrigin.Begin); _sparseMemoryStreamSuffix.Write(buffer, offset, count); newStreamPosition += count; } _currentStreamPosition = newStreamPosition; _currentStreamLength = Math.Max(_currentStreamLength, _currentStreamPosition); } return; }
override public int Read(byte[] buffer, int offset, int count) { CheckDisposed(); PackagingUtilities.VerifyStreamReadArgs(this, buffer, offset, count); Debug.Assert(_cachePrefixStream == null); // we only expect this thing to be not null during Archive Save execution // that would between PreSaveNotofication call and Save SaveStreaming Debug.Assert(_currentStreamPosition >= 0); if (count == 0) { return(0); } if (_currentStreamLength <= _currentStreamPosition) { // we are past the end of the stream so let's just return 0 return(0); } int totalBytesRead; int diskBytesRead = 0; int diskBytesToRead = 0; long persistedTailSize = 0; int memoryBytesRead = 0; long newStreamPosition = _currentStreamPosition; checked { // Try to satisfy request with the Read from the Disk if (newStreamPosition < _persistedSize) { // we have at least partial overlap between request and the data on disk //first let's get min between size of the stream's tail and the tail of the persisted chunk // in some cases stream might be smaller // e.g. _currentStreamLength < _persistedSize, if let's say stream was truncated persistedTailSize = Math.Min(_currentStreamLength, _persistedSize) - newStreamPosition; Debug.Assert(persistedTailSize > 0); // we also do not want to read more data than was requested by the user diskBytesToRead = (int)Math.Min((long)count, persistedTailSize); // this is a safe cast as count has int type Debug.Assert(diskBytesToRead > 0); // and now we can actually read it _blockManager.Stream.Seek(_persistedOffset + newStreamPosition, SeekOrigin.Begin); // we are ready for getting fewer bytes than reqested diskBytesRead = _blockManager.Stream.Read(buffer, offset, diskBytesToRead); newStreamPosition += diskBytesRead; count -= diskBytesRead; offset += diskBytesRead; if (diskBytesRead < diskBytesToRead) { // we didn't everything that we hae asked for. In such case we shouldn't // try to get data from the _sparseMemoryStreamSuffix _currentStreamPosition = newStreamPosition; return(diskBytesRead); } } // check whether we need to get data from the memory Stream; if ((_sparseMemoryStreamSuffix != null) && (newStreamPosition + count > _persistedSize)) { // we are either trying to finish the request partially satisfied by the // on disk data or the read is entirely within the suffix _sparseMemoryStreamSuffix.Seek(newStreamPosition - _persistedSize, SeekOrigin.Begin); memoryBytesRead = _sparseMemoryStreamSuffix.Read(buffer, offset, count); newStreamPosition += memoryBytesRead; } totalBytesRead = diskBytesRead + memoryBytesRead; } _currentStreamPosition = newStreamPosition; return(totalBytesRead); }
private static byte [] StreamToByteArray(Stream fontStream) { byte[] memoryFont; if (fontStream.CanSeek) { checked { memoryFont = new byte[(int)fontStream.Length]; PackagingUtilities.ReliableRead(fontStream, memoryFont, 0, (int)fontStream.Length); } } else { // this is inefficient, but works for now // we need to spend more time to implement a more performant // version of this code // ideally this should be a part of loader functionality // Initial file read buffer size is set to 1MB. int fileReadBufferSize = 1024 * 1024; byte[] fileReadBuffer = new byte[fileReadBufferSize]; // Actual number of bytes read from the file. int memoryFontSize = 0; for (; ;) { int availableBytes = fileReadBufferSize - memoryFontSize; if (availableBytes < fileReadBufferSize / 3) { // grow the fileReadBuffer fileReadBufferSize *= 2; byte[] newBuffer = new byte[fileReadBufferSize]; Array.Copy(fileReadBuffer, newBuffer, memoryFontSize); fileReadBuffer = newBuffer; availableBytes = fileReadBufferSize - memoryFontSize; } int numberOfBytesRead = fontStream.Read(fileReadBuffer, memoryFontSize, availableBytes); if (numberOfBytesRead == 0) { break; } memoryFontSize += numberOfBytesRead; } // Actual number of bytes read from the file is less or equal to the file read buffer size. Debug.Assert(memoryFontSize <= fileReadBufferSize); if (memoryFontSize == fileReadBufferSize) { memoryFont = fileReadBuffer; } else { // Trim the array if needed to that it contains the right length. memoryFont = new byte[memoryFontSize]; Array.Copy(fileReadBuffer, memoryFont, memoryFontSize); } } return(memoryFont); }
private void FetchBlockIntoReadCache(long start, int count) { /////////////////////////////// // Let's calculate the block information that need to be read /////////////////////////////// long firstBlockOffset; long blockCount; int blockSize = _cryptoProvider.BlockSize; // this call might potentially change blockSize and in case of the CryptoProvider supporting merging // blocks it will become a multiple of original block size, big enough to cover the requested area CalcBlockData(start, count, _cryptoProvider.CanMergeBlocks, ref blockSize, // can be modified to be a multiple of the original value out firstBlockOffset, out blockCount); Debug.Assert(blockCount > 0, "RightsManagementEncryptedStream.Read Unable to process the request, calculated block count <= 0"); checked { /////////////////////////////// // READ CRYPTO DATA /////////////////////////////// // try to seek to the first block // this will take the prefix size into account long newPosition = _baseStream.Seek(_prefixLengthSize + firstBlockOffset, SeekOrigin.Begin); Debug.Assert(newPosition == _prefixLengthSize + firstBlockOffset, "RightsManagementEncryptedStream.Read Unable to seek to required position"); // try to read all the required blocks into memory int totalByteCount = (int)(blockCount * blockSize); byte[] cryptoBuffer = new byte [totalByteCount]; int bytesRead = PackagingUtilities.ReliableRead( _baseStream, cryptoBuffer, 0, totalByteCount, // we are asking for all the bytes _cryptoProvider.BlockSize _cryptoProvider.BlockSize); // we are guaranteed to get at least that much, unless the end of stream is encountered if (bytesRead < _cryptoProvider.BlockSize) { // we have found an unexpected end of stream throw new FileFormatException(SR.Get(SRID.EncryptedDataStreamCorrupt)); } ///////////////////////////////////////////// // DECRYPT DATA AND STORE IT IN THE READ CACHE ///////////////////////////////////////////// //adjust block count according to the data that we were able to read // it could be as few as cryptoProvider.BlockSize bytes or as many as totalByteCount int readCryptoBlockSize = _cryptoProvider.BlockSize; int readCryptoBlockCount = (int)(bytesRead / readCryptoBlockSize); // figure out how many blocks we read Debug.Assert(readCryptoBlockCount >= 1); // we must have at least 1 if (_cryptoProvider.CanMergeBlocks) { readCryptoBlockSize *= readCryptoBlockCount; readCryptoBlockCount = 1; } byte[] cryptoTextBlock = new byte [readCryptoBlockSize]; //prepare read cache stream to accept data in the right position _readCache.Seek(firstBlockOffset, SeekOrigin.Begin); for (long i = 0; i < readCryptoBlockCount; i++) { // copy the appropriate data from the cryptoBuffer (read from disk) // into the cryptoTextBlock for decryption Array.Copy(cryptoBuffer, i * readCryptoBlockSize, cryptoTextBlock, 0, readCryptoBlockSize); byte[] clearTextBlock = _cryptoProvider.Decrypt(cryptoTextBlock); // put the results into the read cache _readCache.Write(clearTextBlock, 0, readCryptoBlockSize); } } }
/// <summary> /// Compress delegate - invoke ZLib in a manner consistent with RMA/Office /// </summary> /// <param name="source"></param> /// <param name="sink"></param> /// <remarks>We are careful to avoid use of Position, Length or SetLength on non-seekable streams. If /// source or sink are non-seekable, it is assumed that positions are correctly set upon entry and that /// they need not be restored. We also assume that destination stream length need not be truncated.</remarks> public void Compress(Stream source, Stream sink) { if (source == null) { throw new ArgumentNullException("source"); } if (sink == null) { throw new ArgumentNullException("sink"); } Invariant.Assert(source.CanRead); Invariant.Assert(sink.CanWrite, "Logic Error - Cannot compress into a read-only stream"); // remember this for later if possible long storedPosition = -1; // default to illegal value to catch any logic errors try { int sourceBufferSize; // don't allocate 4k for really tiny source streams if (source.CanSeek) { storedPosition = source.Position; source.Position = 0; // Casting result to int is safe because _defaultBlockSize is very small and the result // of Math.Min(x, _defaultBlockSize) must be no larger than _defaultBlockSize. sourceBufferSize = (int)(Math.Min(source.Length, (long)_defaultBlockSize)); } else { sourceBufferSize = _defaultBlockSize; // can't call Length so fallback to default } if (sink.CanSeek) { sink.Position = 0; } // zlib state ZLibNative.ZLibStreamHandle zStream; // initialize the zlib library ZLibNative.ErrorCode retVal = ZLibNative.CreateZLibStreamForDeflate( out zStream, ZLibNative.CompressionLevel.DefaultCompression, DEFAULT_WINDOW_BITS, DEFAULT_MEM_LEVEL, ZLibNative.CompressionStrategy.DefaultStrategy); ThrowIfZLibError(retVal); // where to write data - can actually grow if data is uncompressible long destStreamLength = 0; byte[] sourceBuf = null; // source buffer byte[] sinkBuf = null; // destination buffer GCHandle gcSourceBuf = new GCHandle(); GCHandle gcSinkBuf = new GCHandle(); try { // allocate managed buffers AllocOrRealloc(sourceBufferSize, ref sourceBuf, ref gcSourceBuf); AllocOrRealloc(_defaultBlockSize + (_defaultBlockSize >> 1), ref sinkBuf, ref gcSinkBuf); // while (more data is available) // - read into the sourceBuf // - compress into the sinkBuf // - emit the header // - write out to the _baseStream // Suppress 6518 Local IDisposable object not disposed: // Reason: The stream is not owned by us, therefore we cannot // close the BinaryWriter as it will Close the stream underneath. #pragma warning disable 6518 BinaryWriter writer = new BinaryWriter(sink); int bytesRead; while ((bytesRead = PackagingUtilities.ReliableRead(source, sourceBuf, 0, sourceBuf.Length)) > 0) { Invariant.Assert(bytesRead <= sourceBufferSize); // prepare structure // these pointers must be re-assigned for each loop because // ums_deflate modifies them zStream.NextIn = gcSourceBuf.AddrOfPinnedObject(); zStream.NextOut = gcSinkBuf.AddrOfPinnedObject(); zStream.AvailIn = (uint)bytesRead; // this is number of bytes available for compression at pInBuf and is updated by ums_deflate call zStream.AvailOut = (uint)sinkBuf.Length; // this is the number of bytes free in pOutBuf and is updated by ums_deflate call // cast is safe because SyncFlush is a constant retVal = zStream.Deflate(ZLibNative.FlushCode.SyncFlush); ThrowIfZLibError(retVal); checked { int compressedSize = sinkBuf.Length - (int)zStream.AvailOut; Invariant.Assert(compressedSize > 0, "compressing non-zero bytes creates a non-empty block"); // This should never happen because our destination buffer // is twice as large as our source buffer Invariant.Assert(zStream.AvailIn == 0, "Expecting all data to be compressed!"); // write the header writer.Write(_blockHeaderToken); // token writer.Write((UInt32)bytesRead); writer.Write((UInt32)compressedSize); destStreamLength += _headerBuf.Length; // write to the base stream sink.Write(sinkBuf, 0, compressedSize); destStreamLength += compressedSize; } } // post-compression // truncate if necessary if (sink.CanSeek) { sink.SetLength(destStreamLength); } } finally { if (gcSourceBuf.IsAllocated) { gcSourceBuf.Free(); } if (gcSinkBuf.IsAllocated) { gcSinkBuf.Free(); } } #pragma warning restore 6518 } finally { // seek to the current logical position before returning if (sink.CanSeek) { source.Position = storedPosition; } } }
//------------------------------------------------------ // // IDeflateTransform Interface // //------------------------------------------------------ /// <summary> /// Decompress delegate - invoke ZLib in a manner consistent with RMA/Office /// </summary> /// <param name="source">stream to read from</param> /// <param name="sink">stream to write to</param> public void Decompress(Stream source, Stream sink) { if (source == null) { throw new ArgumentNullException("source"); } if (sink == null) { throw new ArgumentNullException("sink"); } Invariant.Assert(source.CanRead); Invariant.Assert(sink.CanWrite, "Logic Error - Cannot decompress into a read-only stream"); // remember this for later long storedPosition = -1; try { if (source.CanSeek) { storedPosition = source.Position; source.Position = 0; } if (sink.CanSeek) { sink.Position = 0; } // zlib state ZLibNative.ZLibStreamHandle zStream; // initialize the zlib library ZLibNative.ErrorCode retVal = ZLibNative.CreateZLibStreamForInflate(out zStream, DEFAULT_WINDOW_BITS); ThrowIfZLibError(retVal); byte[] sourceBuf = null; // source buffer byte[] sinkBuf = null; // destination buffer - where to write data GCHandle gcSourceBuf = new GCHandle(); // Preallocate these so we can safely access them GCHandle gcSinkBuf = new GCHandle(); // in the next finally block. try { // read all available data // each block is preceded by a header that is 3 ulongs int uncompressedSize, compressedSize; long destStreamLength = 0; // keep track of decompressed size while (ReadBlockHeader(source, out uncompressedSize, out compressedSize)) { // ensure we have space AllocOrRealloc(compressedSize, ref sourceBuf, ref gcSourceBuf); AllocOrRealloc(uncompressedSize, ref sinkBuf, ref gcSinkBuf); // read the data into the sourceBuf int bytesRead = PackagingUtilities.ReliableRead(source, sourceBuf, 0, compressedSize); if (bytesRead > 0) { if (compressedSize != bytesRead) { throw new FileFormatException(SR.Get(SRID.CorruptStream)); } // prepare structure // The buffer pointers must be reset for every call // because ZLibNative.Inflate modifies them zStream.NextIn = gcSourceBuf.AddrOfPinnedObject(); zStream.NextOut = gcSinkBuf.AddrOfPinnedObject(); zStream.AvailIn = (uint)bytesRead; // this is number of bytes available for decompression at pInBuf and is updated by ums_deflate call zStream.AvailOut = (uint)sinkBuf.Length; // this is the number of bytes free in pOutBuf and is updated by ums_deflate call // InvokeZLib does the actual interop. It updates zStream, and sinkBuf (sourceBuf passed by ref to avoid copying) // and leaves the decompressed data in sinkBuf. // int decompressedSize = InvokeZLib(bytesRead, ref zStream, ref sourceBuf, ref sinkBuf, pSource, pSink, false); retVal = zStream.Inflate(ZLibNative.FlushCode.SyncFlush); ThrowIfZLibError(retVal); checked { int decompressedSize = sinkBuf.Length - (int)zStream.AvailOut; // verify that data matches header if (decompressedSize != uncompressedSize) { throw new FileFormatException(SR.Get(SRID.CorruptStream)); } destStreamLength += decompressedSize; // write to the base stream sink.Write(sinkBuf, 0, decompressedSize); } } else { // block header but no block data if (compressedSize != 0) { throw new FileFormatException(SR.Get(SRID.CorruptStream)); } } } // make sure we truncate if the destination stream was longer than this current decompress if (sink.CanSeek) { sink.SetLength(destStreamLength); } } finally { if (gcSourceBuf.IsAllocated) { gcSourceBuf.Free(); } if (gcSinkBuf.IsAllocated) { gcSinkBuf.Free(); } } } finally { // seek to the current logical position before returning if (source.CanSeek) { source.Position = storedPosition; } } }