internal void MoveData(long moveBlockSourceOffset, long moveBlockTargetOffset, long moveBlockSize) { if ((moveBlockSize != 0L) && (moveBlockSourceOffset != moveBlockTargetOffset)) { int num2; byte[] buffer = new byte[Math.Min(moveBlockSize, 0x100000L)]; for (long i = 0L; i < moveBlockSize; i += num2) { long num3; long num4; num2 = (int)Math.Min((long)buffer.Length, moveBlockSize - i); if (moveBlockSourceOffset > moveBlockTargetOffset) { num4 = moveBlockSourceOffset + i; num3 = moveBlockTargetOffset + i; } else { num4 = ((moveBlockSourceOffset + moveBlockSize) - i) - num2; num3 = ((moveBlockTargetOffset + moveBlockSize) - i) - num2; } this._archiveStream.Seek(num4, SeekOrigin.Begin); if (PackagingUtilities.ReliableRead(this._archiveStream, buffer, 0, num2) != num2) { throw new FileFormatException(SR.Get("CorruptedData")); } this._archiveStream.Seek(num3, SeekOrigin.Begin); this._archiveStream.Write(buffer, 0, num2); } } }
internal virtual void ParseDataField(BinaryReader reader, UInt16 size) { if (_data == null) { _data = reader.ReadBytes(size); //vaiadte that we didn't reach the end of stream too early if (_data.Length != size) { throw new FileFormatException(SR.Get(SRID.CorruptedData)); } } else // There were some data we sniffed already { Byte[] tempBuffer = _data; _data = new Byte[size]; Array.Copy(tempBuffer, _data, _size); // _size contains the size of data in _data checked { Debug.Assert(size >= _size); if ((PackagingUtilities.ReliableRead(reader, _data, _size, size - _size) + _size) != size) { throw new FileFormatException(SR.Get(SRID.CorruptedData)); } } } _size = size; }
CopyImageStream(Stream sourceStream, Stream destinationStream) { byte[] buffer = new byte[_readBlockSize]; int bytesRead = PackagingUtilities.ReliableRead(sourceStream, buffer, 0, _readBlockSize); while (bytesRead > 0) { destinationStream.Write(buffer, 0, bytesRead); bytesRead = PackagingUtilities.ReliableRead(sourceStream, buffer, 0, _readBlockSize); } }
CopyFontStream() { Uri sourceUri = _fontUri; Uri destUri = _fontResourceStream.Uri; Stream destStream = _fontResourceStream.Stream; Stream sourceStream = null; byte [] memoryFont; GlyphTypeface glyphTypeface = new GlyphTypeface(sourceUri); CodeAccessPermission fontReadPermission = glyphTypeface.CriticalFileReadPermission; if (fontReadPermission != null) { fontReadPermission.Assert(); } try { sourceStream = glyphTypeface.GetFontStream(); } finally { if (fontReadPermission != null) { CodeAccessPermission.RevertAssert(); } } memoryFont = new byte[_readBlockSize]; Guid guid = ParseGuidFromUri(destUri); int bytesRead = PackagingUtilities.ReliableRead(sourceStream, memoryFont, 0, _readBlockSize); if (bytesRead > 0) { // Obfuscate the first block ObfuscateData(memoryFont, guid); } while (bytesRead > 0) { destStream.Write(memoryFont, 0, bytesRead); bytesRead = PackagingUtilities.ReliableRead(sourceStream, memoryFont, 0, _readBlockSize); } Uri fontUri = new Uri(_fontUri.GetComponents(UriComponents.SerializationInfoString, UriFormat.SafeUnescaped), UriKind.RelativeOrAbsolute); string fontUriAsString = fontUri.GetComponents(UriComponents.SerializationInfoString, UriFormat.UriEscaped); _packagingPolicy.ReleaseResourceStreamForXpsFont(fontUriAsString); _streamWritten = true; return(destUri); }
/// <summary> /// ReadBlockHeader - reads the block header and returns true if successful /// </summary> /// <param name="source">stream to read from</param> /// <param name="compressedSize">compressedSize from header</param> /// <param name="uncompressedSize">uncompressedSize from header</param> /// <returns>true if header found</returns> private bool ReadBlockHeader(Stream source, out int uncompressedSize, out int compressedSize) { int bytesRead = PackagingUtilities.ReliableRead(source, _headerBuf, 0, _headerBuf.Length); if (bytesRead > 0) { if (bytesRead < _headerBuf.Length) { throw new FileFormatException(SR.Get(SRID.CorruptStream)); } // header format = 3 ulong's // read and inspect token uint token = BitConverter.ToUInt32(_headerBuf, _ulongSize * 0); if (token != _blockHeaderToken) { throw new FileFormatException(SR.Get(SRID.CorruptStream)); } // convert to int's as that's what we use everywhere checked { uncompressedSize = (int)BitConverter.ToUInt32(_headerBuf, _ulongSize * 1); compressedSize = (int)BitConverter.ToUInt32(_headerBuf, _ulongSize * 2); // screen out malicious data if (uncompressedSize < 0 || uncompressedSize > _maxAllowableBlockSize || compressedSize < 0 || compressedSize > _maxAllowableBlockSize) { throw new FileFormatException(SR.Get(SRID.CorruptStream)); } } } else { uncompressedSize = compressedSize = 0; } return(bytesRead > 0); }
private static long FindPosition(Stream archiveStream) { byte[] buffer = new byte[_scanBlockSize + 0x16]; long length = archiveStream.Length; for (long i = length; i > 0L; i -= _scanBlockSize) { long offset = Math.Max((long)0L, (long)(i - _scanBlockSize)); archiveStream.Seek(offset, SeekOrigin.Begin); int num6 = PackagingUtilities.ReliableRead(archiveStream, buffer, 0, buffer.Length); long bufferOffsetFromEndOfStream = length - offset; for (int j = num6 - 0x16; j >= 0; j--) { if (IsPositionMatched(j, buffer, bufferOffsetFromEndOfStream)) { return(offset + j); } } } throw new FileFormatException(SR.Get("CorruptedData")); }
internal virtual void ParseDataField(BinaryReader reader, ushort size) { if (this._data == null) { this._data = reader.ReadBytes(size); if (this._data.Length != size) { throw new FileFormatException(SR.Get("CorruptedData")); } } else { byte[] sourceArray = this._data; this._data = new byte[size]; Array.Copy(sourceArray, this._data, (int)this._size); if ((PackagingUtilities.ReliableRead(reader, this._data, this._size, size - this._size) + this._size) != size) { throw new FileFormatException(SR.Get("CorruptedData")); } } this._size = size; }
private static long FindPosition(Stream archiveStream) { Debug.Assert(archiveStream.CanSeek); byte [] buffer = new byte[_scanBlockSize + _fixedMinimalRecordSize]; long streamLength = archiveStream.Length; for (long endPos = streamLength; endPos > 0; endPos -= _scanBlockSize) { // calculate offset position of the block to be read based on the end // Position loop variable long beginPos = Math.Max(0, endPos - _scanBlockSize); //read the block archiveStream.Seek(beginPos, SeekOrigin.Begin); // the reads that we do actually overlap each other by the size == _fixedMinimalRecordSize // this is done in order to simplify our searching logic, this way we do not need to specially // process matches that cross buffer boundaries, as we are guaranteed that if match is present // it falls completely inside one of the buffers, as a result of overlapping in the read requests int bytesRead = PackagingUtilities.ReliableRead(archiveStream, buffer, 0, buffer.Length); // We need to pass this parameter into the function, so it knows // the relative positon of the buffer in regard to the end of the stream; // it needs this info in order to checke whether the candidate record // has length of Comment field consistent with the postion of the record long distanceFromStartOfBufferToTheEndOfStream = streamLength - beginPos; for (int i = bytesRead - _fixedMinimalRecordSize; i >= 0; i--) { if (IsPositionMatched(i, buffer, distanceFromStartOfBufferToTheEndOfStream)) { return(beginPos + i); } } } // At this point we have finished scanning the file and haven't find anything throw new FileFormatException(SR.Get(SRID.CorruptedData)); }
//------------------------------------------------------ // // Private Methods // //------------------------------------------------------ /// <summary> /// Initial update of _streamCachedLength and _streamOnDiskLength /// </summary> private void ParseStreamLength() { if (_streamCachedLength < 0) { // seek to the beginning of the stream _baseStream.Seek(0, SeekOrigin.Begin); // read the size prefix byte[] prefixData = new byte[_prefixLengthSize]; int bytesRead = PackagingUtilities.ReliableRead (_baseStream, prefixData, 0, prefixData.Length); // decode length data (from the prefix) if (bytesRead == 0) { // probably a new stream - just assume length is zero _streamOnDiskLength = 0; } else if (bytesRead < _prefixLengthSize) { // not zero and shorter than legal length == corrupt file throw new FileFormatException(SR.Get(SRID.EncryptedDataStreamCorrupt)); } else { checked { // This will throw on a negative value so we need not // explicitly check for that _streamOnDiskLength = (long)BitConverter.ToUInt64(prefixData, 0); } } _streamCachedLength = _streamOnDiskLength; } }
//------------------------------------------------------ // // IDeflateTransform Interface // //------------------------------------------------------ /// <summary> /// Decompress delegate - invoke ZLib in a manner consistent with RMA/Office /// </summary> /// <param name="source">stream to read from</param> /// <param name="sink">stream to write to</param> public void Decompress(Stream source, Stream sink) { if (source == null) { throw new ArgumentNullException("source"); } if (sink == null) { throw new ArgumentNullException("sink"); } Invariant.Assert(source.CanRead); Invariant.Assert(sink.CanWrite, "Logic Error - Cannot decompress into a read-only stream"); // remember this for later long storedPosition = -1; try { if (source.CanSeek) { storedPosition = source.Position; source.Position = 0; } if (sink.CanSeek) { sink.Position = 0; } // zlib state ZLibNative.ZLibStreamHandle zStream; // initialize the zlib library ZLibNative.ErrorCode retVal = ZLibNative.CreateZLibStreamForInflate(out zStream, DEFAULT_WINDOW_BITS); ThrowIfZLibError(retVal); byte[] sourceBuf = null; // source buffer byte[] sinkBuf = null; // destination buffer - where to write data GCHandle gcSourceBuf = new GCHandle(); // Preallocate these so we can safely access them GCHandle gcSinkBuf = new GCHandle(); // in the next finally block. try { // read all available data // each block is preceded by a header that is 3 ulongs int uncompressedSize, compressedSize; long destStreamLength = 0; // keep track of decompressed size while (ReadBlockHeader(source, out uncompressedSize, out compressedSize)) { // ensure we have space AllocOrRealloc(compressedSize, ref sourceBuf, ref gcSourceBuf); AllocOrRealloc(uncompressedSize, ref sinkBuf, ref gcSinkBuf); // read the data into the sourceBuf int bytesRead = PackagingUtilities.ReliableRead(source, sourceBuf, 0, compressedSize); if (bytesRead > 0) { if (compressedSize != bytesRead) { throw new FileFormatException(SR.Get(SRID.CorruptStream)); } // prepare structure // The buffer pointers must be reset for every call // because ZLibNative.Inflate modifies them zStream.NextIn = gcSourceBuf.AddrOfPinnedObject(); zStream.NextOut = gcSinkBuf.AddrOfPinnedObject(); zStream.AvailIn = (uint)bytesRead; // this is number of bytes available for decompression at pInBuf and is updated by ums_deflate call zStream.AvailOut = (uint)sinkBuf.Length; // this is the number of bytes free in pOutBuf and is updated by ums_deflate call // InvokeZLib does the actual interop. It updates zStream, and sinkBuf (sourceBuf passed by ref to avoid copying) // and leaves the decompressed data in sinkBuf. // int decompressedSize = InvokeZLib(bytesRead, ref zStream, ref sourceBuf, ref sinkBuf, pSource, pSink, false); retVal = zStream.Inflate(ZLibNative.FlushCode.SyncFlush); ThrowIfZLibError(retVal); checked { int decompressedSize = sinkBuf.Length - (int)zStream.AvailOut; // verify that data matches header if (decompressedSize != uncompressedSize) { throw new FileFormatException(SR.Get(SRID.CorruptStream)); } destStreamLength += decompressedSize; // write to the base stream sink.Write(sinkBuf, 0, decompressedSize); } } else { // block header but no block data if (compressedSize != 0) { throw new FileFormatException(SR.Get(SRID.CorruptStream)); } } } // make sure we truncate if the destination stream was longer than this current decompress if (sink.CanSeek) { sink.SetLength(destStreamLength); } } finally { if (gcSourceBuf.IsAllocated) { gcSourceBuf.Free(); } if (gcSinkBuf.IsAllocated) { gcSinkBuf.Free(); } } } finally { // seek to the current logical position before returning if (source.CanSeek) { source.Position = storedPosition; } } }
/// <summary> /// Compress delegate - invoke ZLib in a manner consistent with RMA/Office /// </summary> /// <param name="source"></param> /// <param name="sink"></param> /// <remarks>We are careful to avoid use of Position, Length or SetLength on non-seekable streams. If /// source or sink are non-seekable, it is assumed that positions are correctly set upon entry and that /// they need not be restored. We also assume that destination stream length need not be truncated.</remarks> public void Compress(Stream source, Stream sink) { if (source == null) { throw new ArgumentNullException("source"); } if (sink == null) { throw new ArgumentNullException("sink"); } Invariant.Assert(source.CanRead); Invariant.Assert(sink.CanWrite, "Logic Error - Cannot compress into a read-only stream"); // remember this for later if possible long storedPosition = -1; // default to illegal value to catch any logic errors try { int sourceBufferSize; // don't allocate 4k for really tiny source streams if (source.CanSeek) { storedPosition = source.Position; source.Position = 0; // Casting result to int is safe because _defaultBlockSize is very small and the result // of Math.Min(x, _defaultBlockSize) must be no larger than _defaultBlockSize. sourceBufferSize = (int)(Math.Min(source.Length, (long)_defaultBlockSize)); } else { sourceBufferSize = _defaultBlockSize; // can't call Length so fallback to default } if (sink.CanSeek) { sink.Position = 0; } // zlib state ZLibNative.ZLibStreamHandle zStream; // initialize the zlib library ZLibNative.ErrorCode retVal = ZLibNative.CreateZLibStreamForDeflate( out zStream, ZLibNative.CompressionLevel.DefaultCompression, DEFAULT_WINDOW_BITS, DEFAULT_MEM_LEVEL, ZLibNative.CompressionStrategy.DefaultStrategy); ThrowIfZLibError(retVal); // where to write data - can actually grow if data is uncompressible long destStreamLength = 0; byte[] sourceBuf = null; // source buffer byte[] sinkBuf = null; // destination buffer GCHandle gcSourceBuf = new GCHandle(); GCHandle gcSinkBuf = new GCHandle(); try { // allocate managed buffers AllocOrRealloc(sourceBufferSize, ref sourceBuf, ref gcSourceBuf); AllocOrRealloc(_defaultBlockSize + (_defaultBlockSize >> 1), ref sinkBuf, ref gcSinkBuf); // while (more data is available) // - read into the sourceBuf // - compress into the sinkBuf // - emit the header // - write out to the _baseStream // Suppress 6518 Local IDisposable object not disposed: // Reason: The stream is not owned by us, therefore we cannot // close the BinaryWriter as it will Close the stream underneath. #pragma warning disable 6518 BinaryWriter writer = new BinaryWriter(sink); int bytesRead; while ((bytesRead = PackagingUtilities.ReliableRead(source, sourceBuf, 0, sourceBuf.Length)) > 0) { Invariant.Assert(bytesRead <= sourceBufferSize); // prepare structure // these pointers must be re-assigned for each loop because // ums_deflate modifies them zStream.NextIn = gcSourceBuf.AddrOfPinnedObject(); zStream.NextOut = gcSinkBuf.AddrOfPinnedObject(); zStream.AvailIn = (uint)bytesRead; // this is number of bytes available for compression at pInBuf and is updated by ums_deflate call zStream.AvailOut = (uint)sinkBuf.Length; // this is the number of bytes free in pOutBuf and is updated by ums_deflate call // cast is safe because SyncFlush is a constant retVal = zStream.Deflate(ZLibNative.FlushCode.SyncFlush); ThrowIfZLibError(retVal); checked { int compressedSize = sinkBuf.Length - (int)zStream.AvailOut; Invariant.Assert(compressedSize > 0, "compressing non-zero bytes creates a non-empty block"); // This should never happen because our destination buffer // is twice as large as our source buffer Invariant.Assert(zStream.AvailIn == 0, "Expecting all data to be compressed!"); // write the header writer.Write(_blockHeaderToken); // token writer.Write((UInt32)bytesRead); writer.Write((UInt32)compressedSize); destStreamLength += _headerBuf.Length; // write to the base stream sink.Write(sinkBuf, 0, compressedSize); destStreamLength += compressedSize; } } // post-compression // truncate if necessary if (sink.CanSeek) { sink.SetLength(destStreamLength); } } finally { if (gcSourceBuf.IsAllocated) { gcSourceBuf.Free(); } if (gcSinkBuf.IsAllocated) { gcSinkBuf.Free(); } } #pragma warning restore 6518 } finally { // seek to the current logical position before returning if (sink.CanSeek) { source.Position = storedPosition; } } }
private void FetchBlockIntoReadCache(long start, int count) { /////////////////////////////// // Let's calculate the block information that need to be read /////////////////////////////// long firstBlockOffset; long blockCount; int blockSize = _cryptoProvider.BlockSize; // this call might potentially change blockSize and in case of the CryptoProvider supporting merging // blocks it will become a multiple of original block size, big enough to cover the requested area CalcBlockData(start, count, _cryptoProvider.CanMergeBlocks, ref blockSize, // can be modified to be a multiple of the original value out firstBlockOffset, out blockCount); Debug.Assert(blockCount > 0, "RightsManagementEncryptedStream.Read Unable to process the request, calculated block count <= 0"); checked { /////////////////////////////// // READ CRYPTO DATA /////////////////////////////// // try to seek to the first block // this will take the prefix size into account long newPosition = _baseStream.Seek(_prefixLengthSize + firstBlockOffset, SeekOrigin.Begin); Debug.Assert(newPosition == _prefixLengthSize + firstBlockOffset, "RightsManagementEncryptedStream.Read Unable to seek to required position"); // try to read all the required blocks into memory int totalByteCount = (int)(blockCount * blockSize); byte[] cryptoBuffer = new byte [totalByteCount]; int bytesRead = PackagingUtilities.ReliableRead( _baseStream, cryptoBuffer, 0, totalByteCount, // we are asking for all the bytes _cryptoProvider.BlockSize _cryptoProvider.BlockSize); // we are guaranteed to get at least that much, unless the end of stream is encountered if (bytesRead < _cryptoProvider.BlockSize) { // we have found an unexpected end of stream throw new FileFormatException(SR.Get(SRID.EncryptedDataStreamCorrupt)); } ///////////////////////////////////////////// // DECRYPT DATA AND STORE IT IN THE READ CACHE ///////////////////////////////////////////// //adjust block count according to the data that we were able to read // it could be as few as cryptoProvider.BlockSize bytes or as many as totalByteCount int readCryptoBlockSize = _cryptoProvider.BlockSize; int readCryptoBlockCount = (int)(bytesRead / readCryptoBlockSize); // figure out how many blocks we read Debug.Assert(readCryptoBlockCount >= 1); // we must have at least 1 if (_cryptoProvider.CanMergeBlocks) { readCryptoBlockSize *= readCryptoBlockCount; readCryptoBlockCount = 1; } byte[] cryptoTextBlock = new byte [readCryptoBlockSize]; //prepare read cache stream to accept data in the right position _readCache.Seek(firstBlockOffset, SeekOrigin.Begin); for (long i = 0; i < readCryptoBlockCount; i++) { // copy the appropriate data from the cryptoBuffer (read from disk) // into the cryptoTextBlock for decryption Array.Copy(cryptoBuffer, i * readCryptoBlockSize, cryptoTextBlock, 0, readCryptoBlockSize); byte[] clearTextBlock = _cryptoProvider.Decrypt(cryptoTextBlock); // put the results into the read cache _readCache.Write(clearTextBlock, 0, readCryptoBlockSize); } } }
private static byte [] StreamToByteArray(Stream fontStream) { byte[] memoryFont; if (fontStream.CanSeek) { checked { memoryFont = new byte[(int)fontStream.Length]; PackagingUtilities.ReliableRead(fontStream, memoryFont, 0, (int)fontStream.Length); } } else { // this is inefficient, but works for now // we need to spend more time to implement a more performant // version of this code // ideally this should be a part of loader functionality // Initial file read buffer size is set to 1MB. int fileReadBufferSize = 1024 * 1024; byte[] fileReadBuffer = new byte[fileReadBufferSize]; // Actual number of bytes read from the file. int memoryFontSize = 0; for (; ;) { int availableBytes = fileReadBufferSize - memoryFontSize; if (availableBytes < fileReadBufferSize / 3) { // grow the fileReadBuffer fileReadBufferSize *= 2; byte[] newBuffer = new byte[fileReadBufferSize]; Array.Copy(fileReadBuffer, newBuffer, memoryFontSize); fileReadBuffer = newBuffer; availableBytes = fileReadBufferSize - memoryFontSize; } int numberOfBytesRead = fontStream.Read(fileReadBuffer, memoryFontSize, availableBytes); if (numberOfBytesRead == 0) { break; } memoryFontSize += numberOfBytesRead; } // Actual number of bytes read from the file is less or equal to the file read buffer size. Debug.Assert(memoryFontSize <= fileReadBufferSize); if (memoryFontSize == fileReadBufferSize) { memoryFont = fileReadBuffer; } else { // Trim the array if needed to that it contains the right length. memoryFont = new byte[memoryFontSize]; Array.Copy(fileReadBuffer, memoryFont, memoryFontSize); } } return(memoryFont); }