コード例 #1
0
 async Task ReadAllDataAsync(long startOffset, long totalBytesToRead, Synchronizer synchronizer, List <TorrentFile> files, IPieceWriter writer, AsyncProducerConsumerQueue <byte[]> emptyBuffers, AsyncProducerConsumerQueue <(byte[], int, TorrentFile)> filledBuffers, CancellationToken token)
コード例 #2
0
        async Task <byte[]> CalcPiecesHash(int startPiece, long totalBytesToRead, Synchronizer synchronizer, List <TorrentFile> files, CancellationToken token)
        {
            // One buffer will be filled and will be passed to the hashing method.
            // One buffer will be filled and will be waiting to be hashed.
            // One buffer will be empty and will be filled from the disk.
            // Aaaannd one extra buffer for good luck!
            var emptyBuffers = new AsyncProducerConsumerQueue <byte[]> (4);

            // Make this buffer one element larger so it can fit the placeholder which indicates a file has been completely read.
            var filledBuffers = new AsyncProducerConsumerQueue <(byte[], int, TorrentFile)> (emptyBuffers.Capacity + 1);

            // This is the IPieceWriter which we'll use to get our filestream. Each thread gets it's own writer.
            using IPieceWriter writer = CreateReader();

            // Read from the disk in 256kB chunks, instead of 16kB, as a performance optimisation.
            // As the capacity is set to 4, this means we'll have 1 megabyte of buffers to handle.
            for (int i = 0; i < emptyBuffers.Capacity; i++)
            {
                await emptyBuffers.EnqueueAsync(new byte[256 * 1024], token);
            }
            token.ThrowIfCancellationRequested();

            using CancellationTokenRegistration cancellation = token.Register(() => {
                emptyBuffers.CompleteAdding();
                filledBuffers.CompleteAdding();
            });

            // We're going to do single-threaded reading from disk, which (unfortunately) means we're (more or less) restricted
            // to single threaded hashing too as it's unlikely we'll have sufficient data in our buffers to do any better.
            Task readAllTask = ReadAllDataAsync(startPiece * PieceLength, totalBytesToRead, synchronizer, files, writer, emptyBuffers, filledBuffers, token);

            Task <byte[]> hashAllTask = HashAllDataAsync(totalBytesToRead, emptyBuffers, filledBuffers, token);

            Task firstCompleted = null;

            try {
                // We first call 'WhenAny' so that if an exception is thrown in one of the tasks, execution will continue
                // and we can kill the producer/consumer queues.
                firstCompleted = await Task.WhenAny(readAllTask, hashAllTask);

                // If the first completed task has faulted, force the exception to be thrown.
                await firstCompleted;
            } catch {
                // We got an exception from the first or second task, so bail out now!
                emptyBuffers.CompleteAdding();
                filledBuffers.CompleteAdding();
            }

            try {
                // If there is no exception from the first completed task, just wait for the second one.
                await Task.WhenAll(readAllTask, hashAllTask);
            } catch {
                token.ThrowIfCancellationRequested();
                if (firstCompleted != null)
                {
                    await firstCompleted;
                }
                throw;
            }
            return(await hashAllTask);
        }