public CalculatorEngine() { TrigonometryMode = TrigonometryMode.DEG; ConstantDB = new ConstantDB(); _linebuffer = new StringBuilder(); _functioncache = new Dictionary <string, FunctionInformation>(); _preprocessor = new Preprocessor(_functioncache, ConstantDB); var options = new Dictionary <string, object>(); options["DivisionOptions"] = PythonDivisionOptions.New; _history = new ZeroStream(); _output = new EventRedirectedStreamWriter(_history); _output.StreamWasWritten += _output_StreamWasWritten; _engine = Python.CreateEngine(options); _engine.Runtime.IO.SetOutput(_history, _output); _engine.Runtime.IO.SetErrorOutput(_history, _output); _scope = _engine.CreateScope(); _functionTypes = new Type[] { typeof(Trigonometry), typeof(Engineering), typeof(GeneralFunctions), typeof(Variations), typeof(TypeFunctions), typeof(Statistics) }; FunctionCache.Fill(ref _functioncache, ref _scope, _functionTypes); }
public void Constructor_InvalidData_Throws() { using (Stream stream = new ZeroStream(0x1000)) { Assert.Throws <InvalidDataException>(() => new HfsPlusFileSystem(stream)); } }
internal MappedStream DoOpenContent(SparseStream parent, Ownership ownsParent) { if (_footer.DiskType == FileType.Fixed) { if (parent != null && ownsParent == Ownership.Dispose) { parent.Dispose(); } return(new SubStream(_fileStream, 0, _fileStream.Length - 512)); } if (_footer.DiskType == FileType.Dynamic) { if (parent != null && ownsParent == Ownership.Dispose) { parent.Dispose(); } return(new DynamicStream(_fileStream, _dynamicHeader, _footer.CurrentSize, new ZeroStream(_footer.CurrentSize), Ownership.Dispose)); } if (parent == null) { parent = new ZeroStream(_footer.CurrentSize); ownsParent = Ownership.Dispose; } return(new DynamicStream(_fileStream, _dynamicHeader, _footer.CurrentSize, parent, ownsParent)); }
public void TestRead() { var Stream1 = new ZeroStream(5, 0x11); var Stream2 = new ZeroStream(3, 0x22); var MapStream = new MapStream(); byte[] Readed1, Readed2, Readed3, Readed4; MapStream.Map(3, Stream1); MapStream.Map(3 + 5, Stream2); MapStream.Position = 4; Readed1 = MapStream.ReadBytesUpTo(3); Assert.Equal(new byte[] { 0x11, 0x11, 0x11 }, Readed1); Readed2 = MapStream.ReadBytesUpTo(3); Assert.Equal(new byte[] { 0x11, 0x22, 0x22 }, Readed2); Readed3 = MapStream.ReadBytesUpTo(1); Assert.Equal(new byte[] { 0x22 }, Readed3); MapStream.Position = 3; Readed4 = MapStream.ReadBytesUpTo(8); Assert.Equal(new byte[] { 0x11, 0x11, 0x11, 0x11, 0x11, 0x22, 0x22, 0x22 }, Readed4); }
private static async Task SendDataAsync(ServerOptions options, X509Certificate2 cert, TcpClient client) { using (client) using (var stream = new SslStream(client.GetStream())) { await stream.AuthenticateAsServerAsync(cert); var data = new ZeroStream(options.Megabytes * 1024 * 1024); Console.WriteLine(); Console.WriteLine($"Sending {string.Format("{0:n0}", data.Length)} bytes..."); var sw = Stopwatch.StartNew(); if (options.Sync) { data.CopyTo(stream, options.BufferLength); } else { await data.CopyToAsync(stream, options.BufferLength); } sw.Stop(); var mbps = ((data.Length * 8) / (1024 * 1024)) / sw.Elapsed.TotalSeconds; Console.WriteLine($"Sent {string.Format("{0:n0}", data.Length)} bytes in {Math.Round(sw.Elapsed.TotalSeconds, 3)} seconds ({mbps} Mbps)"); } }
/// <summary> /// Returns the number of bytes used by the JSON of an object. /// </summary> /// <param name="value">The value.</param> /// <param name="settings">The settings.</param> /// <returns>The number of bytes used by the JSON of an object.</returns> public static int ToJsonByteCount(object value, JsonSettings settings) { using (ZeroStream zeroStream = new ZeroStream()) { ToJsonStream(value, settings, zeroStream); return((int)zeroStream.Length); } }
public async Task ReadExactAsync_ShouldThrowIfCancellationIsRequested() { using (var cts = new CancellationTokenSource()) { cts.Cancel(); var stream = new ZeroStream(); await stream.ReadExactAsync(Buffer, 0, 1, cts.Token); } }
public void WriteAtPositionPastEnd() { ZeroStream stream = new ZeroStream(); stream.Position = 100; Assert.AreEqual(0, stream.Length); stream.WriteByte(1); Assert.AreEqual(101, stream.Length); }
public void ConstructorWithLength() { var stream = new ZeroStream(1234); Assert.IsTrue(stream.CanRead); Assert.IsTrue(stream.CanSeek); Assert.IsTrue(stream.CanWrite); Assert.AreEqual(1234, stream.Length); Assert.AreEqual(0, stream.Position); }
public void TestRead() { var Stream = new ZeroStream(7, 0x11); byte[] Read1, Read2; Read1 = Stream.ReadBytesUpTo(3); Assert.Equal(new byte[] { 0x11, 0x11, 0x11 }, Read1); Read2 = Stream.ReadBytesUpTo(7); Assert.Equal(new byte[] { 0x11, 0x11, 0x11, 0x11 }, Read2); }
public void Constructor() { ZeroStream stream = new ZeroStream(); Assert.IsTrue(stream.CanRead); Assert.IsTrue(stream.CanSeek); Assert.IsTrue(stream.CanWrite); Assert.AreEqual(0, stream.Length); Assert.AreEqual(0, stream.Position); }
public void Dispose() { if (_output != null) { _output.Dispose(); _output = null; } if (_history != null) { _history.Dispose(); _history = null; } }
internal MappedStream DoOpenContent(SparseStream parent, Ownership ownsParent) { SparseStream theParent = parent; Ownership theOwnership = ownsParent; if (parent == null) { theParent = new ZeroStream(Capacity); theOwnership = Ownership.Dispose; } return(new ContentStream(SparseStream.FromStream(_fileStream, Ownership.None), _bat, Capacity, theParent, theOwnership)); }
public void WriteAndRead() { ZeroStream stream = new ZeroStream(); Assert.AreEqual(-1, stream.ReadByte()); stream.WriteByte(1); Assert.AreEqual(-1, stream.ReadByte()); stream.Position--; Assert.AreEqual(0, stream.ReadByte()); Assert.AreEqual(1, stream.Length); Assert.AreEqual(-1, stream.ReadByte()); Assert.AreEqual(1, stream.Length); }
public void ReadEmpty() { byte[] buffer = new byte[16]; ZeroStream stream = new ZeroStream(); Assert.AreEqual(0, stream.Read(buffer, 0, buffer.Length)); stream.SetLength(7); buffer[0] = 1; buffer[7] = 1; Assert.AreEqual(7, stream.Read(buffer, 0, buffer.Length)); Assert.AreEqual(0, buffer[0]); Assert.AreEqual(1, buffer[7]); Assert.AreEqual(0, stream.Read(buffer, 0, buffer.Length)); }
internal MappedStream DoOpenContent(SparseStream parent, Ownership ownsParent) { SparseStream theParent = parent; Ownership theOwnership = ownsParent; if (parent == null) { theParent = new ZeroStream(Capacity); theOwnership = Ownership.Dispose; } ContentStream contentStream = new ContentStream(SparseStream.FromStream(_logicalStream, Ownership.None), _fileStream.CanWrite, _batStream, _freeSpace, _metadata, Capacity, theParent, theOwnership); return(new AligningStream(contentStream, Ownership.Dispose, (int)_metadata.LogicalSectorSize)); }
/// <summary> /// Gets the contents of this disk as a stream. /// </summary> /// <param name="parent">The content of the parent disk (needed if this is a differencing disk).</param> /// <param name="ownsParent">A value indicating whether ownership of the parent stream is transfered.</param> /// <returns>The stream containing the disk contents.</returns> public override SparseStream OpenContent(SparseStream parent, Ownership ownsParent) { if (_descriptor.ParentContentId == uint.MaxValue) { if (parent != null && ownsParent == Ownership.Dispose) { parent.Dispose(); } parent = null; } if (parent == null) { parent = new ZeroStream(Capacity); ownsParent = Ownership.Dispose; } if (_descriptor.Extents.Count == 1) { if (_monolithicStream != null) { return(new HostedSparseExtentStream( _monolithicStream, Ownership.None, 0, parent, ownsParent)); } else { return(OpenExtent(_descriptor.Extents[0], 0, parent, ownsParent)); } } else { long extentStart = 0; SparseStream[] streams = new SparseStream[_descriptor.Extents.Count]; for (int i = 0; i < streams.Length; ++i) { streams[i] = OpenExtent(_descriptor.Extents[i], extentStart, parent, (i == streams.Length - 1) ? ownsParent : Ownership.None); extentStart += _descriptor.Extents[i].SizeInSectors * Sizes.Sector; } return(new ConcatStream(Ownership.Dispose, streams)); } }
public void Truncate() { ZeroStream stream = new ZeroStream(); stream.WriteByte(1); Assert.AreEqual(1, stream.Length); Assert.AreEqual(1, stream.Position); stream.WriteByte(2); Assert.AreEqual(2, stream.Length); Assert.AreEqual(2, stream.Position); stream.SetLength(1); Assert.AreEqual(1, stream.Length); Assert.AreEqual(1, stream.Position); stream.SetLength(0); Assert.AreEqual(0, stream.Length); Assert.AreEqual(0, stream.Position); }
private static async Task SendDataAsync(ServerOptions options, X509Certificate2 cert, TcpClient[] clients) { var data = new ZeroStream(options.Megabytes * 1024 * 1024 * options.Connections); try { var streams = new Stream[clients.Length]; for (var i = 0; i < streams.Length; i++) { var stream = new SslStream(clients[i].GetStream()); await stream.AuthenticateAsServerAsync(cert); streams[i] = stream; } using (var multiStream = new MultiStream(streams, options.MultiStreamBlockLength)) { Console.WriteLine(); Console.WriteLine($"Sending {string.Format("{0:n0}", data.Length)} bytes..."); var sw = Stopwatch.StartNew(); if (options.Sync) { data.CopyTo(multiStream, options.BufferLength); } else { await data.CopyToAsync(multiStream, options.BufferLength); } sw.Stop(); var mbps = ((data.Length * 8) / (1024 * 1024)) / sw.Elapsed.TotalSeconds; Console.WriteLine($"Sent {string.Format("{0:n0}", data.Length)} bytes in {Math.Round(sw.Elapsed.TotalSeconds, 3)} seconds ({mbps} Mbps)"); } } finally { foreach (var client in clients) { ((IDisposable)client).Dispose(); } } }
public void TestReadAnalyzing() { var ZeroStream = new ZeroStream(0x1000000); var StreamAnalyzer = new ProxyStreamReadWriteAnalyzer(ZeroStream); StreamAnalyzer.Position = 100; StreamAnalyzer.ReadBytes(8); StreamAnalyzer.Position = 104; StreamAnalyzer.ReadBytes(8); StreamAnalyzer.Position = 200; StreamAnalyzer.ReadBytes(16); var Usage = StreamAnalyzer.ReadUsage; Assert.AreEqual( "Space(Min=100, Max=112),Space(Min=200, Max=216)", Usage.ToStringArray() ); }
/// <summary> /// Gets the contents of this disk as a stream. /// </summary> internal SparseStream OpenContent(SparseStream parent, Ownership ownsParent) { if (_descriptor.ParentContentId == uint.MaxValue) { if (parent != null && ownsParent == Ownership.Dispose) { parent.Dispose(); } parent = null; } if (parent == null) { parent = new ZeroStream(Capacity); ownsParent = Ownership.Dispose; } if (_descriptor.Extents.Count == 1) { if (_monolithicStream != null) { return new HostedSparseExtentStream( _monolithicStream, Ownership.None, 0, parent, ownsParent); } else { return OpenExtent(_descriptor.Extents[0], 0, parent, ownsParent); } } else { long extentStart = 0; SparseStream[] streams = new SparseStream[_descriptor.Extents.Count]; for (int i = 0; i < streams.Length; ++i) { streams[i] = OpenExtent(_descriptor.Extents[i], extentStart, parent, (i == streams.Length - 1) ? ownsParent : Ownership.None); extentStart += _descriptor.Extents[i].SizeInSectors * Sizes.Sector; } return new ConcatStream(Ownership.Dispose, streams); } }
/// <summary> /// Creates a new stream that contains the XVA image. /// </summary> /// <returns>The new stream.</returns> public override SparseStream Build() { TarFileBuilder tarBuilder = new TarFileBuilder(); int[] diskIds; string ovaFileContent = GenerateOvaXml(out diskIds); tarBuilder.AddFile("ova.xml", Encoding.ASCII.GetBytes(ovaFileContent)); int diskIdx = 0; foreach (DiskRecord diskRec in _disks) { SparseStream diskStream = diskRec.Item2; List <StreamExtent> extents = new List <StreamExtent>(diskStream.Extents); int lastChunkAdded = -1; foreach (StreamExtent extent in extents) { int firstChunk = (int)(extent.Start / Sizes.OneMiB); int lastChunk = (int)((extent.Start + extent.Length - 1) / Sizes.OneMiB); for (int i = firstChunk; i <= lastChunk; ++i) { if (i != lastChunkAdded) { Stream chunkStream; long diskBytesLeft = diskStream.Length - i * Sizes.OneMiB; if (diskBytesLeft < Sizes.OneMiB) { chunkStream = new ConcatStream( Ownership.Dispose, new SubStream(diskStream, i * Sizes.OneMiB, diskBytesLeft), new ZeroStream(Sizes.OneMiB - diskBytesLeft)); } else { chunkStream = new SubStream(diskStream, i * Sizes.OneMiB, Sizes.OneMiB); } Stream chunkHashStream; #if NETCORE IncrementalHash hashAlgCore = IncrementalHash.CreateHash(HashAlgorithmName.SHA1); chunkHashStream = new HashStreamCore(chunkStream, Ownership.Dispose, hashAlgCore); #else HashAlgorithm hashAlgDotnet = new SHA1Managed(); chunkHashStream = new HashStreamDotnet(chunkStream, Ownership.Dispose, hashAlgDotnet); #endif tarBuilder.AddFile(string.Format(CultureInfo.InvariantCulture, "Ref:{0}/{1:D8}", diskIds[diskIdx], i), chunkHashStream); byte[] hash; #if NETCORE hash = hashAlgCore.GetHashAndReset(); #else hashAlgDotnet.TransformFinalBlock(new byte[0], 0, 0); hash = hashAlgDotnet.Hash; #endif string hashString = BitConverter.ToString(hash).Replace("-", "").ToLower(); byte[] hashStringAscii = Encoding.ASCII.GetBytes(hashString); tarBuilder.AddFile(string.Format(CultureInfo.InvariantCulture, "Ref:{0}/{1:D8}.checksum", diskIds[diskIdx], i), hashStringAscii); lastChunkAdded = i; } } } // Make sure the last chunk is present, filled with zero's if necessary int lastActualChunk = (int)((diskStream.Length - 1) / Sizes.OneMiB); if (lastChunkAdded < lastActualChunk) { Stream chunkStream = new ZeroStream(Sizes.OneMiB); Stream chunkHashStream; #if NETCORE IncrementalHash hashAlgCore = IncrementalHash.CreateHash(HashAlgorithmName.SHA1); chunkHashStream = new HashStreamCore(chunkStream, Ownership.Dispose, hashAlgCore); #else HashAlgorithm hashAlgDotnet = new SHA1Managed(); chunkHashStream = new HashStreamDotnet(chunkStream, Ownership.Dispose, hashAlgDotnet); #endif tarBuilder.AddFile(string.Format(CultureInfo.InvariantCulture, "Ref:{0}/{1:D8}", diskIds[diskIdx], lastActualChunk), chunkHashStream); byte[] hash; #if NETCORE hash = hashAlgCore.GetHashAndReset(); #else hashAlgDotnet.TransformFinalBlock(new byte[0], 0, 0); hash = hashAlgDotnet.Hash; #endif string hashString = BitConverter.ToString(hash).Replace("-", "").ToLower(); byte[] hashStringAscii = Encoding.ASCII.GetBytes(hashString); tarBuilder.AddFile(string.Format(CultureInfo.InvariantCulture, "Ref:{0}/{1:D8}.checksum", diskIds[diskIdx], lastActualChunk), hashStringAscii); } ++diskIdx; } return(tarBuilder.Build()); }
public override MappedStream OpenContent(SparseStream parent, Ownership ownsParent) { FileAccess access = FileAccess.Read; FileShare share = FileShare.Read; if (_descriptor.Access == ExtentAccess.ReadWrite && _access != FileAccess.Read) { access = FileAccess.ReadWrite; share = FileShare.None; } if (_descriptor.Type != ExtentType.Sparse && _descriptor.Type != ExtentType.VmfsSparse && _descriptor.Type != ExtentType.Zero) { if (ownsParent == Ownership.Dispose && parent != null) { parent.Dispose(); } } else if (parent == null) { parent = new ZeroStream(_descriptor.SizeInSectors * Sizes.Sector); } if (_monolithicStream != null) { // Early-out for monolithic VMDKs return new HostedSparseExtentStream( _monolithicStream, Ownership.None, _diskOffset, parent, ownsParent); } else { switch (_descriptor.Type) { case ExtentType.Flat: case ExtentType.Vmfs: return MappedStream.FromStream( _fileLocator.Open(_descriptor.FileName, FileMode.Open, access, share), Ownership.Dispose); case ExtentType.Zero: return new ZeroStream(_descriptor.SizeInSectors * Utilities.SectorSize); case ExtentType.Sparse: return new HostedSparseExtentStream( _fileLocator.Open(_descriptor.FileName, FileMode.Open, access, share), Ownership.Dispose, _diskOffset, parent, ownsParent); case ExtentType.VmfsSparse: return new ServerSparseExtentStream( _fileLocator.Open(_descriptor.FileName, FileMode.Open, access, share), Ownership.Dispose, _diskOffset, parent, ownsParent); default: throw new NotSupportedException(); } } }
public void ReadExact_ShouldThrowIfBufferIsNull() { var stream = new ZeroStream(); stream.ReadExact(null, 0, 1); }
internal MappedStream DoOpenContent(SparseStream parent, Ownership ownsParent) { if (_footer.DiskType == FileType.Fixed) { if (parent != null && ownsParent == Ownership.Dispose) { parent.Dispose(); } return new SubStream(_fileStream, 0, _fileStream.Length - 512); } else if (_footer.DiskType == FileType.Dynamic) { if (parent != null && ownsParent == Ownership.Dispose) { parent.Dispose(); } return new DynamicStream(_fileStream, _dynamicHeader, _footer.CurrentSize, new ZeroStream(_footer.CurrentSize), Ownership.Dispose); } else { if (parent == null) { parent = new ZeroStream(_footer.CurrentSize); ownsParent = Ownership.Dispose; } return new DynamicStream(_fileStream, _dynamicHeader, _footer.CurrentSize, parent, ownsParent); } }
public void ReadExact_ShouldThrowIfOffsetIsNegative() { var stream = new ZeroStream(); stream.ReadExact(Buffer, -1, 1); }
public void ReadExact_ShouldThrowIfCountIsNegative() { var stream = new ZeroStream(); stream.ReadExact(Buffer, 0, -1); }
public void ReadExact_ShouldThrowIfBufferIsTooSmall() { var stream = new ZeroStream(); stream.ReadExact(Buffer, 1, Buffer.Length); }
public override MappedStream OpenContent(SparseStream parent, Ownership ownsParent) { FileAccess access = FileAccess.Read; FileShare share = FileShare.Read; if (_descriptor.Access == ExtentAccess.ReadWrite && _access != FileAccess.Read) { access = FileAccess.ReadWrite; share = FileShare.None; } if (_descriptor.Type != ExtentType.Sparse && _descriptor.Type != ExtentType.VmfsSparse && _descriptor.Type != ExtentType.Zero) { if (ownsParent == Ownership.Dispose && parent != null) { parent.Dispose(); } } else if (parent == null) { parent = new ZeroStream(_descriptor.SizeInSectors * Sizes.Sector); } if (_monolithicStream != null) { // Early-out for monolithic VMDKs return(new HostedSparseExtentStream( _monolithicStream, Ownership.None, _diskOffset, parent, ownsParent)); } switch (_descriptor.Type) { case ExtentType.Flat: case ExtentType.Vmfs: return(MappedStream.FromStream( _fileLocator.Open(_descriptor.FileName, FileMode.Open, access, share), Ownership.Dispose)); case ExtentType.Zero: return(new ZeroStream(_descriptor.SizeInSectors * Sizes.Sector)); case ExtentType.Sparse: return(new HostedSparseExtentStream( _fileLocator.Open(_descriptor.FileName, FileMode.Open, access, share), Ownership.Dispose, _diskOffset, parent, ownsParent)); case ExtentType.VmfsSparse: return(new ServerSparseExtentStream( _fileLocator.Open(_descriptor.FileName, FileMode.Open, access, share), Ownership.Dispose, _diskOffset, parent, ownsParent)); default: throw new NotSupportedException(); } }
internal MappedStream DoOpenContent(SparseStream parent, Ownership ownsParent) { SparseStream theParent = parent; Ownership theOwnership = ownsParent; if (parent == null) { theParent = new ZeroStream(Capacity); theOwnership = Ownership.Dispose; } return new ContentStream(SparseStream.FromStream(_fileStream, Ownership.None), _bat, Capacity, theParent, theOwnership); }
/// <summary> /// Creates a new stream that contains the XVA image. /// </summary> /// <returns>The new stream</returns> public override SparseStream Build() { TarFileBuilder tarBuilder = new TarFileBuilder(); int[] diskIds; string ovaFileContent = GenerateOvaXml(out diskIds); tarBuilder.AddFile("ova.xml", Encoding.ASCII.GetBytes(ovaFileContent)); int diskIdx = 0; foreach (var diskRec in _disks) { SparseStream diskStream = diskRec.Second; List <StreamExtent> extents = new List <StreamExtent>(diskStream.Extents); int lastChunkAdded = -1; foreach (StreamExtent extent in extents) { int firstChunk = (int)(extent.Start / Sizes.OneMiB); int lastChunk = (int)((extent.Start + extent.Length - 1) / Sizes.OneMiB); for (int i = firstChunk; i <= lastChunk; ++i) { if (i != lastChunkAdded) { HashAlgorithm hashAlg = new SHA1Managed(); Stream chunkStream; long diskBytesLeft = diskStream.Length - (i * Sizes.OneMiB); if (diskBytesLeft < Sizes.OneMiB) { chunkStream = new ConcatStream( Ownership.Dispose, new SubStream(diskStream, i * Sizes.OneMiB, diskBytesLeft), new ZeroStream(Sizes.OneMiB - diskBytesLeft)); } else { chunkStream = new SubStream(diskStream, i * Sizes.OneMiB, Sizes.OneMiB); } HashStream chunkHashStream = new HashStream(chunkStream, Ownership.Dispose, hashAlg); tarBuilder.AddFile(string.Format(CultureInfo.InvariantCulture, "Ref:{0}/{1:D8}", diskIds[diskIdx], i), chunkHashStream); tarBuilder.AddFile(string.Format(CultureInfo.InvariantCulture, "Ref:{0}/{1:D8}.checksum", diskIds[diskIdx], i), new ChecksumStream(hashAlg)); lastChunkAdded = i; } } } // Make sure the last chunk is present, filled with zero's if necessary int lastActualChunk = (int)((diskStream.Length - 1) / Sizes.OneMiB); if (lastChunkAdded < lastActualChunk) { HashAlgorithm hashAlg = new SHA1Managed(); Stream chunkStream = new ZeroStream(Sizes.OneMiB); HashStream chunkHashStream = new HashStream(chunkStream, Ownership.Dispose, hashAlg); tarBuilder.AddFile(string.Format(CultureInfo.InvariantCulture, "Ref:{0}/{1:D8}", diskIds[diskIdx], lastActualChunk), chunkHashStream); tarBuilder.AddFile(string.Format(CultureInfo.InvariantCulture, "Ref:{0}/{1:D8}.checksum", diskIds[diskIdx], lastActualChunk), new ChecksumStream(hashAlg)); } ++diskIdx; } return(tarBuilder.Build()); }
public Stream GetStream(Context context) { if (Encryption) { throw new IOException("Extent encryption is not supported"); } Stream stream; switch (Type) { case ExtentDataType.Inline: byte[] data = InlineData; stream = new MemoryStream(data); break; case ExtentDataType.Regular: var address = ExtentAddress; if (address == 0) { stream = new ZeroStream((long)LogicalSize); } else { var physicalAddress = context.MapToPhysical(address); stream = new SubStream(context.RawStream, Ownership.None, (long)(physicalAddress + ExtentOffset), (long)ExtentSize); } break; case ExtentDataType.PreAlloc: throw new NotImplementedException(); default: throw new IOException("invalid extent type"); } switch (Compression) { case ExtentDataCompression.None: break; case ExtentDataCompression.Zlib: { var zlib = new ZlibStream(stream, CompressionMode.Decompress, false); var sparse = SparseStream.FromStream(zlib, Ownership.Dispose); var length = new LengthWrappingStream(sparse, (long)LogicalSize, Ownership.Dispose); stream = new PositionWrappingStream(length, 0, Ownership.Dispose); break; } case ExtentDataCompression.Lzo: { var buffer = StreamUtilities.ReadExact(stream, sizeof(uint)); var totalLength = EndianUtilities.ToUInt32LittleEndian(buffer, 0); long processed = sizeof(uint); var parts = new List <SparseStream>(); var remaining = (long)LogicalSize; while (processed < totalLength) { stream.Position = processed; StreamUtilities.ReadExact(stream, buffer, 0, sizeof(uint)); var partLength = EndianUtilities.ToUInt32LittleEndian(buffer, 0); processed += sizeof(uint); var part = new SubStream(stream, Ownership.Dispose, processed, partLength); var uncompressed = new SeekableLzoStream(part, CompressionMode.Decompress, false); uncompressed.SetLength(Math.Min(Sizes.OneKiB * 4, remaining)); remaining -= uncompressed.Length; parts.Add(SparseStream.FromStream(uncompressed, Ownership.Dispose)); processed += partLength; } stream = new ConcatStream(Ownership.Dispose, parts.ToArray()); break; } default: throw new IOException($"Unsupported extent compression ({Compression})"); } return(stream); }
/// <summary> /// Creates a new stream that contains the XVA image. /// </summary> /// <returns>The new stream.</returns> public override SparseStream Build() { TarFileBuilder tarBuilder = new TarFileBuilder(); int[] diskIds; string ovaFileContent = GenerateOvaXml(out diskIds); tarBuilder.AddFile("ova.xml", Encoding.ASCII.GetBytes(ovaFileContent)); int diskIdx = 0; foreach (var diskRec in _disks) { SparseStream diskStream = diskRec.Second; List<StreamExtent> extents = new List<StreamExtent>(diskStream.Extents); int lastChunkAdded = -1; foreach (StreamExtent extent in extents) { int firstChunk = (int)(extent.Start / Sizes.OneMiB); int lastChunk = (int)((extent.Start + extent.Length - 1) / Sizes.OneMiB); for (int i = firstChunk; i <= lastChunk; ++i) { if (i != lastChunkAdded) { HashAlgorithm hashAlg = new SHA1Managed(); Stream chunkStream; long diskBytesLeft = diskStream.Length - (i * Sizes.OneMiB); if (diskBytesLeft < Sizes.OneMiB) { chunkStream = new ConcatStream( Ownership.Dispose, new SubStream(diskStream, i * Sizes.OneMiB, diskBytesLeft), new ZeroStream(Sizes.OneMiB - diskBytesLeft)); } else { chunkStream = new SubStream(diskStream, i * Sizes.OneMiB, Sizes.OneMiB); } HashStream chunkHashStream = new HashStream(chunkStream, Ownership.Dispose, hashAlg); tarBuilder.AddFile(string.Format(CultureInfo.InvariantCulture, "Ref:{0}/{1:D8}", diskIds[diskIdx], i), chunkHashStream); tarBuilder.AddFile(string.Format(CultureInfo.InvariantCulture, "Ref:{0}/{1:D8}.checksum", diskIds[diskIdx], i), new ChecksumStream(hashAlg)); lastChunkAdded = i; } } } // Make sure the last chunk is present, filled with zero's if necessary int lastActualChunk = (int)((diskStream.Length - 1) / Sizes.OneMiB); if (lastChunkAdded < lastActualChunk) { HashAlgorithm hashAlg = new SHA1Managed(); Stream chunkStream = new ZeroStream(Sizes.OneMiB); HashStream chunkHashStream = new HashStream(chunkStream, Ownership.Dispose, hashAlg); tarBuilder.AddFile(string.Format(CultureInfo.InvariantCulture, "Ref:{0}/{1:D8}", diskIds[diskIdx], lastActualChunk), chunkHashStream); tarBuilder.AddFile(string.Format(CultureInfo.InvariantCulture, "Ref:{0}/{1:D8}.checksum", diskIds[diskIdx], lastActualChunk), new ChecksumStream(hashAlg)); } ++diskIdx; } return tarBuilder.Build(); }
internal MappedStream DoOpenContent(SparseStream parent, Ownership ownsParent) { SparseStream theParent = parent; Ownership theOwnership = ownsParent; if (parent == null) { theParent = new ZeroStream(Capacity); theOwnership = Ownership.Dispose; } ContentStream contentStream = new ContentStream(SparseStream.FromStream(_fileStream, Ownership.None), _batStream, _freeSpace, _metadata, Capacity, theParent, theOwnership); return new AligningStream(contentStream, Ownership.Dispose, (int)_metadata.LogicalSectorSize); }