protected override async Task <UploadResponse> InternalUploadAsync(CancellationToken cancellationToken) { MemoryMappedFile memoryMappedFile = null; try { if (FileStream.Length > 0) { memoryMappedFile = CreateMemoryMappedFile(); AdjustNumberOfThreadsByFileSize(); var partSizeCalc = new PartSizeCalculator(Config.NumberOfThreads, Config.PartConfig); await UploadChunks( memoryMappedFile, partSizeCalc, Config.NumberOfThreads, cancellationToken).ConfigureAwait(false); if (cancellationToken.IsCancellationRequested) { throw CancellationException(); } } return(await FinishUpload(cancellationToken).ConfigureAwait(false)); } finally { memoryMappedFile?.Dispose(); } }
private async Task <List <Task> > Dispatch(FilePartSource partSource, string chunkUploadUrl, string fileName, CancellationToken cancellationToken = default(CancellationToken)) { var incrementLock = new object(); long currentPartSize = partConfig.InitialPartSize; var partSizeCalc = new PartSizeCalculator(concurrentWorkers, partConfig); Func <FilePart, Task> attemptPartUpload = async part => { IStopwatch timer = Stopwatch.StartNew(); try { await UploadPart(chunkUploadUrl, part, fileName, cancellationToken).ConfigureAwait(false); } finally { timer.Stop(); } lock (incrementLock) { currentPartSize = partSizeCalc.NextPartSize(currentPartSize, part.Bytes.Length, timer.Elapsed); } }; var workerTasks = new List <Task>(); try { var activeWorkers = new AsyncSemaphore(concurrentWorkers); bool giveUp = false; while (!giveUp && partSource.HasMore) { if (cancellationToken.IsCancellationRequested) { throw new UploadException( "Upload was cancelled", UploadStatusCode.Cancelled, new ActiveUploadState(UploadSpecification, LastConsecutiveByteUploaded)); } await activeWorkers.WaitAsync().ConfigureAwait(false); if (giveUp) { return(workerTasks); } var part = await partSource.GetNextPart(Interlocked.Read(ref currentPartSize)).ConfigureAwait(false); var task = Task.Run(async() => { try { await AttemptPartUploadWithRetry(attemptPartUpload, part, partConfig.PartRetryCount).ConfigureAwait(false); } catch { giveUp = true; throw; } finally { activeWorkers.Release(); part.Bytes.Dispose(); } }); workerTasks.Add(task); } return(workerTasks); } catch { ObserveExceptions(workerTasks); throw; } }
private async Task UploadChunks(MemoryMappedFile memoryMappedFile, PartSizeCalculator partSizeCalc, int workerCount, CancellationToken cancellationToken) { var hashWorkers = new List <Task <Chunk> >(workerCount); var chunkQueue = new Queue <Chunk>(workerCount); var uploadWorkers = new List <Task <ChunkUploadTime> >(workerCount); int chunkIndex = 0; long nextChunkSize = Config.PartConfig.InitialPartSize; long positionInFile = initialPosition; long bytesRemaining = UploadSpecificationRequest.FileSize - initialPosition; try { while ((bytesRemaining > 0 || hashWorkers.Count > 0 || chunkQueue.Count > 0 || uploadWorkers.Count > 0) && !cancellationToken.IsCancellationRequested) { await TryPauseAsync(cancellationToken).ConfigureAwait(false); if (bytesRemaining > 0 && hashWorkers.Count < workerCount) { long chunkSize = Math.Min(nextChunkSize, bytesRemaining); hashWorkers.Add(HashWorker(memoryMappedFile, chunkIndex, positionInFile, chunkSize, cancellationToken)); positionInFile += chunkSize; bytesRemaining -= chunkSize; chunkIndex += 1; } else if (chunkQueue.Count > 0 && uploadWorkers.Count < workerCount) { uploadWorkers.Add(UploadWorker(chunkQueue.Dequeue(), progressReporter.ChunkProgressReporter(), cancellationToken)); } else if (uploadWorkers.Count > 0 || hashWorkers.Count > 0) { IEnumerable <Task> workersToAwait = uploadWorkers; if (chunkQueue.Count < workerCount) { workersToAwait = workersToAwait.Concat(hashWorkers); } Task finished = await Task.WhenAny(workersToAwait).ConfigureAwait(false); if (finished is Task <Chunk> ) { var finishedHashWorker = (Task <Chunk>)finished; hashWorkers.Remove(finishedHashWorker); chunkQueue.Enqueue(await finishedHashWorker.ConfigureAwait(false)); } else { var finishedUploadWorker = (Task <ChunkUploadTime>)finished; uploadWorkers.Remove(finishedUploadWorker); var chunkTime = await finishedUploadWorker.ConfigureAwait(false); nextChunkSize = partSizeCalc.NextPartSize(nextChunkSize, chunkTime.Size, chunkTime.Elapsed); } } else { throw new Exception("Impossible"); } } } catch (TaskCanceledException) when(cancellationToken.IsCancellationRequested) { throw CancellationException(); } catch (Exception ex) { throw FailureException(ex); } }