public void Test_MultipleWithSubTasks() { var ct = new CancellationTokenSource().Token; var p = new NefsProgress(ct); p.BeginTask(1.0f, "A"); Assert.Equal(0.0f, p.Percent); Assert.Equal("A", p.StatusMessage); Assert.Equal("", p.StatusSubMessage); { p.BeginTask(0.2f); Assert.Equal(0.0f, p.Percent); Assert.Equal("A", p.StatusMessage); Assert.Equal("", p.StatusSubMessage); { p.BeginSubTask(0.5f, "sub1"); Assert.Equal(0.0f, p.Percent); Assert.Equal("A", p.StatusMessage); Assert.Equal("sub1", p.StatusSubMessage); p.EndTask(); Assert.Equal(0.1f, p.Percent); Assert.Equal("A", p.StatusMessage); Assert.Equal("", p.StatusSubMessage); p.BeginSubTask(0.5f, "sub2"); Assert.Equal(0.1f, p.Percent); Assert.Equal("A", p.StatusMessage); Assert.Equal("sub2", p.StatusSubMessage); p.EndTask(); Assert.Equal(0.2f, p.Percent); Assert.Equal("A", p.StatusMessage); Assert.Equal("", p.StatusSubMessage); } p.EndTask(); Assert.Equal(0.2f, p.Percent); Assert.Equal("A", p.StatusMessage); Assert.Equal("", p.StatusSubMessage); p.BeginTask(0.8f, "B"); Assert.Equal(0.2f, p.Percent); Assert.Equal("B", p.StatusMessage); Assert.Equal("", p.StatusSubMessage); p.EndTask(); Assert.Equal(1.0f, p.Percent); Assert.Equal("A", p.StatusMessage); Assert.Equal("", p.StatusSubMessage); } p.EndTask(); Assert.Equal(1.0f, p.Percent); Assert.Equal("", p.StatusMessage); Assert.Equal("", p.StatusSubMessage); }
/// <inheritdoc/> public async Task DetransformAsync( Stream input, Int64 inputOffset, Stream output, Int64 outputOffset, uint extractedSize, IReadOnlyList <NefsDataChunk> chunks, NefsProgress p) { var numChunks = chunks.Count; var bytesRemaining = extractedSize; input.Seek(inputOffset, SeekOrigin.Begin); output.Seek(outputOffset, SeekOrigin.Begin); using (var t = p.BeginTask(1.0f, $"Detransforming stream")) { for (int i = 0; i < numChunks; i++) { using (var st = p.BeginSubTask(1.0f / numChunks, $"Detransforming chunk {i + 1}/{numChunks}...")) { // Determine the maximum output size for this chunk based on expected output size var maxChunkSize = Math.Min(bytesRemaining, chunks[i].Transform.ChunkSize); // Revert the transform var chunkSize = await this.DetransformChunkAsync(input, output, chunks[i], maxChunkSize, p); bytesRemaining -= chunkSize; } } } }
public void BeginSubTask_SubTaskIsFirstTask_InvalidOperationExceptionThrown() { var ct = new CancellationTokenSource().Token; var p = new NefsProgress(ct); Assert.Throws <InvalidOperationException>(() => p.BeginSubTask(1.0f, "msg")); }
public void Test_MoreTests() { var ct = new CancellationTokenSource().Token; var p = new NefsProgress(ct); p.BeginTask(1.0f); { p.BeginTask(0.1f); this.Verify(p, 0.0f, "", ""); { p.BeginSubTask(0.4f, "sub"); this.Verify(p, 0.0f, "", "sub"); p.EndTask(); this.Verify(p, 0.04f, "", ""); p.BeginSubTask(0.6f, "sub"); this.Verify(p, 0.04f, "", "sub"); p.EndTask(); this.Verify(p, 0.1f, "", ""); } p.EndTask(); this.Verify(p, 0.1f, "", ""); p.BeginTask(0.8f); this.Verify(p, 0.1f, "", ""); p.EndTask(); this.Verify(p, 0.9f, "", ""); p.BeginTask(0.05f); this.Verify(p, 0.9f, "", ""); p.EndTask(); this.Verify(p, 0.95f, "", ""); // 0.1 + 0.8 + 0.05 == 0.95 (does not add up to 1) } p.EndTask(); this.Verify(p, 1.0f, "", ""); }
/// <inheritdoc/> public async Task <NefsItemSize> TransformAsync( Stream input, Int64 inputOffset, UInt32 inputLength, Stream output, Int64 outputOffset, NefsDataTransform transform, NefsProgress p) { var chunks = new List <NefsDataChunk>(); var rawChunkSize = transform.ChunkSize; input.Seek(inputOffset, SeekOrigin.Begin); output.Seek(outputOffset, SeekOrigin.Begin); // Split file into chunks and transform them using (var t = p.BeginTask(1.0f, $"Transforming stream")) { var cumulativeChunkSize = 0U; var bytesRemaining = (int)inputLength; // Determine how many chunks to split file into var numChunks = (int)Math.Ceiling(inputLength / (double)rawChunkSize); for (var i = 0; i < numChunks; ++i) { using (var st = p.BeginSubTask(1.0f / numChunks, $"Transforming chunk {i + 1}/{numChunks}")) { // The last chunk may not be exactly equal to the raw chunk size var nextChunkSize = (int)Math.Min(rawChunkSize, bytesRemaining); bytesRemaining -= nextChunkSize; // Transform chunk and write to output stream var chunkSize = await this.TransformChunkAsync(input, (uint)nextChunkSize, output, transform, p); cumulativeChunkSize += chunkSize; // Record chunk info var chunk = new NefsDataChunk(chunkSize, cumulativeChunkSize, transform); chunks.Add(chunk); } } } // Return item size return(new NefsItemSize(inputLength, chunks)); }
/// <inheritdoc/> public async Task <NefsItemSize> CompressAsync( Stream input, Int64 inputOffset, UInt32 inputLength, Stream output, Int64 outputOffset, UInt32 chunkSize, NefsProgress p) { var chunkSizes = new List <UInt32>(); input.Seek(inputOffset, SeekOrigin.Begin); output.Seek(outputOffset, SeekOrigin.Begin); // Split file into chunks and compress them using (var t = p.BeginTask(1.0f, $"Compressing stream")) { var lastChunkSize = 0; var totalChunkSize = 0; var lastBytesRead = 0; var bytesRemaining = (int)inputLength; // Determine how many chunks to split file into var numChunks = (int)Math.Ceiling(inputLength / (double)chunkSize); for (var i = 0; i < numChunks; ++i) { using (var st = p.BeginSubTask(1.0f / numChunks, $"Compressing chunk {i + 1}/{numChunks}")) { var nextBytes = Math.Min(chunkSize, bytesRemaining); // Compress this chunk and write it to the output file (lastBytesRead, lastChunkSize) = await DeflateHelper.DeflateAsync(input, (int)nextBytes, output, p.CancellationToken); totalChunkSize += lastChunkSize; bytesRemaining -= lastBytesRead; // Record the total compressed size after this chunk chunkSizes.Add((UInt32)totalChunkSize); } } } // Return item size return(new NefsItemSize(inputLength, chunkSizes)); }
/// <inheritdoc/> public async Task DecompressAsync( Stream input, Int64 inputOffset, IReadOnlyList <UInt32> chunkSizes, Stream output, Int64 outputOffset, NefsProgress p, byte[] aes256key = null) { var numChunks = chunkSizes.Count; input.Seek(inputOffset, SeekOrigin.Begin); // For each compressed chunk, decompress it and write it to the output file for (int i = 0; i < numChunks; i++) { using (var st = p.BeginSubTask(1.0f / numChunks, $"Extracting chunk {i + 1}/{numChunks}...")) { // Get chunk size var chunkSize = chunkSizes[i]; // Remember that values in the ChunkSize list are cumulative, so to get the // actual chunk size we need to subtract the previous ChunkSize entry if (i > 0) { chunkSize -= chunkSizes[i - 1]; } // Read in the comrpessed chunk var chunk = new byte[chunkSize]; await input.ReadAsync(chunk, 0, (int)chunkSize, p.CancellationToken); // Decompress the chunk (and decrpyt if needed) if (aes256key == null) { // Not encrypted await this.DecompressChunkAsync(chunk, output, p); } else { // Encrypted await this.DecompressChunkAsync(chunk, aes256key, output, p); } } } }
/// <summary> /// Writes items' data to the output stream. /// </summary> /// <param name="stream">The stream to write to.</param> /// <param name="items">List of items to write.</param> /// <param name="firstDataOffset"> /// The offset from the beginning of the stream to write the first data. /// </param> /// <param name="p">Progress info.</param> /// <returns>The offset to the end of the last data written.</returns> private async Task <UInt64> WriteItemsAsync( Stream stream, NefsItemList items, UInt64 firstDataOffset, NefsProgress p) { var nextDataOffset = firstDataOffset; // Prepare stream stream.Seek((long)firstDataOffset, SeekOrigin.Begin); // Update item info and write out item data var i = 1; foreach (var item in items.EnumerateById()) { using (var t = p.BeginSubTask(1.0f / items.Count, $"Writing data for item {i}/{items.Count}")) { // Get item var itemOffset = nextDataOffset; var itemSize = item.DataSource.Size; // Nothing to write if item is directory if (item.Type == NefsItemType.Directory) { continue; } // Write out item data nextDataOffset = await this.WriteItemAsync(stream, itemOffset, item, p); // Update item data source to point to the newly written data var dataSource = new NefsItemListDataSource(items, itemOffset, itemSize); item.UpdateDataSource(dataSource, NefsItemState.None); } i++; } // Return the next data offset, which is the end of the written data return(nextDataOffset); }
public void BeginSubTask_Valid_ProgressChangedRaised() { var ct = new CancellationTokenSource().Token; var p = new NefsProgress(ct); p.BeginTask(1.0f, "A"); { NefsProgressEventArgs args = null; p.ProgressChanged += (o, e) => args = e; p.BeginSubTask(1.0f, "sub"); this.Verify(p, 0.0f, "A", "sub"); Assert.Equal("A", args.Message); Assert.Equal("sub", args.SubMessage); Assert.Equal(0.0f, args.Progress); p.EndTask(); this.Verify(p, 1.0f, "A", ""); } p.EndTask(); this.Verify(p, 1.0f, "", ""); }