/// <summary> /// Uploads the file using the given metadata. /// /// </summary> /// <param name="metadata"></param> private void UploadFile(UploadMetadata metadata) { try { //we need to override the default .NET value for max connections to a host to our number of threads, if necessary (otherwise we won't achieve the parallelism we want) _previousDefaultConnectionLimit = ServicePointManager.DefaultConnectionLimit; ServicePointManager.DefaultConnectionLimit = Math.Max(this.Parameters.ThreadCount, ServicePointManager.DefaultConnectionLimit); //match up the metadata with the information on the server if (this.Parameters.IsResume) { ValidateMetadataForResume(metadata); } else { ValidateMetadataForFreshUpload(metadata); } var segmentProgressTracker = CreateSegmentProgressTracker(metadata); if (metadata.SegmentCount == 0) { // simply create the target stream, overwriting existing streams if they exist _frontEnd.CreateStream(metadata.TargetStreamPath, true, null, 0); } else if (metadata.SegmentCount > 1) { //perform the multi-segment upload var msu = new MultipleSegmentUploader(metadata, this.Parameters.ThreadCount, _frontEnd, _token, segmentProgressTracker); msu.UseSegmentBlockBackOffRetryStrategy = this.Parameters.UseSegmentBlockBackOffRetryStrategy; msu.Upload(); //concatenate the files at the end ConcatenateSegments(metadata); } else { //optimization if we only have one segment: upload it directly to the target stream metadata.Segments[0].Path = metadata.TargetStreamPath; var ssu = new SingleSegmentUploader(0, metadata, _frontEnd, _token, segmentProgressTracker); ssu.UseBackOffRetryStrategy = this.Parameters.UseSegmentBlockBackOffRetryStrategy; ssu.Upload(); } } catch (OperationCanceledException) { // do nothing since we have already marked everything as failed } finally { //revert back the default .NET value for max connections to a host to whatever it was before ServicePointManager.DefaultConnectionLimit = _previousDefaultConnectionLimit; } }
public void MultipleSegmentUploader_OneSegment() { var fe = new InMemoryFrontEnd(); var metadata = CreateMetadata(1); try { var msu = new MultipleSegmentUploader(metadata, 1, fe); msu.UseSegmentBlockBackOffRetryStrategy = false; Assert.DoesNotThrow(() => { msu.Upload(); }); VerifyTargetStreamsAreComplete(metadata, fe); } finally { metadata.DeleteFile(); } }
/// <summary> /// Uploads the file using the given metadata. /// /// </summary> /// <param name="metadata"></param> private void UploadFile(UploadMetadata metadata) { try { //we need to override the default .NET value for max connections to a host to our number of threads, if necessary (otherwise we won't achieve the parallelism we want) _previousDefaultConnectionLimit = ServicePointManager.DefaultConnectionLimit; ServicePointManager.DefaultConnectionLimit = Math.Max(this.Parameters.ThreadCount, ServicePointManager.DefaultConnectionLimit); //match up the metadata with the information on the server if (this.Parameters.IsResume) { ValidateMetadataForResume(metadata); } else { ValidateMetadataForFreshUpload(metadata); } var segmentProgressTracker = CreateSegmentProgressTracker(metadata); if (metadata.SegmentCount == 0) { // simply create the target stream, overwriting existing streams if they exist _frontEnd.CreateStream(metadata.TargetStreamPath, true, null, 0); } else if (metadata.SegmentCount > 1) { //perform the multi-segment upload var msu = new MultipleSegmentUploader(metadata, this.Parameters.ThreadCount, _frontEnd, _token, segmentProgressTracker); msu.UseSegmentBlockBackOffRetryStrategy = this.Parameters.UseSegmentBlockBackOffRetryStrategy; msu.Upload(); //concatenate the files at the end ConcatenateSegments(metadata); } else { //optimization if we only have one segment: upload it directly to the target stream metadata.Segments[0].Path = metadata.TargetStreamPath; var ssu = new SingleSegmentUploader(0, metadata, _frontEnd, _token, segmentProgressTracker); ssu.UseBackOffRetryStrategy = this.Parameters.UseSegmentBlockBackOffRetryStrategy; ssu.Upload(); } } catch (OperationCanceledException) { // do nothing since we have already marked everything as failed } finally { //revert back the default .NET value for max connections to a host to whatever it was before ServicePointManager.DefaultConnectionLimit = _previousDefaultConnectionLimit; } }
private void TestRetry(int segmentFailCount) { //we only have access to the underlying FrontEnd, so we need to simulate many exceptions in order to force a segment to fail the upload (multiply by SingleSegmentUploader.MaxBufferUploadAttemptAccount) //this only works because we have a small file, which we know will fit in only one buffer (for a larger file, more complex operations are necessary) int actualfailCount = segmentFailCount * SingleSegmentUploader.MaxBufferUploadAttemptCount; bool expectSuccess = segmentFailCount < MultipleSegmentUploader.MaxUploadAttemptCount; int callCount = 0; //create a mock front end sitting on top of a working front end that simulates some erros for some time var workingFrontEnd = new InMemoryFrontEnd(); var fe = new MockableFrontEnd(workingFrontEnd); fe.CreateStreamImplementation = (streamPath, overwrite, data, byteCount) => { callCount++; if (callCount <= actualfailCount) { throw new IntentionalException(); } workingFrontEnd.CreateStream(streamPath, overwrite, data, byteCount); }; fe.AppendToStreamImplementation = (streamPath, data, offset, byteCount) => { callCount++; if (callCount <= actualfailCount) { throw new IntentionalException(); } workingFrontEnd.AppendToStream(streamPath, data, offset, byteCount); }; var metadata = CreateMetadata(1); try { var msu = new MultipleSegmentUploader(metadata, 1, fe); msu.UseSegmentBlockBackOffRetryStrategy = false; if (expectSuccess) { //the Upload method should not throw any exceptions in this case Assert.DoesNotThrow(() => { msu.Upload(); }); //if we are expecting success, verify that both the metadata and the target streams are complete VerifyTargetStreamsAreComplete(metadata, workingFrontEnd); } else { //the Upload method should throw an aggregate exception in this case Assert.Throws<AggregateException>(() => { msu.Upload(); }); //if we do not expect success, verify that at least 1 segment was marked as Failed Assert.True(metadata.Segments.Any(s => s.Status == SegmentUploadStatus.Failed), "Could not find any failed segments"); //for every other segment, verify it was completed OK foreach (var segment in metadata.Segments.Where(s => s.Status != SegmentUploadStatus.Failed)) { VerifyTargetStreamIsComplete(segment, metadata, workingFrontEnd); } } } finally { metadata.DeleteFile(); } }
public void MultipleSegmentUploader_ResumedUploadWithMultipleSegments() { //the strategy here is to upload everything, then delete a set of the segments, and verify that a resume will pick up the slack var fe = new InMemoryFrontEnd(); var metadata = CreateMetadata(10); try { var msu = new MultipleSegmentUploader(metadata, 1, fe); msu.UseSegmentBlockBackOffRetryStrategy = false; Assert.DoesNotThrow(() => { msu.Upload(); }); VerifyTargetStreamsAreComplete(metadata, fe); //delete about 50% of segments for (int i = 0; i < metadata.SegmentCount; i++) { var currentSegment = metadata.Segments[i]; if (i % 2 == 0) { currentSegment.Status = SegmentUploadStatus.Pending; fe.DeleteStream(currentSegment.Path); } } //re-upload everything msu = new MultipleSegmentUploader(metadata, 1, fe); Assert.DoesNotThrow(() => { msu.Upload(); }); VerifyTargetStreamsAreComplete(metadata, fe); } finally { metadata.DeleteFile(); } }
public void MultipleSegmentUploader_MultipleSegmentsAndMultipleThreads() { var fe = new InMemoryFrontEnd(); var metadata = CreateMetadata(10); int threadCount = metadata.SegmentCount * 10; //intentionally setting this higher than the # of segments try { var msu = new MultipleSegmentUploader(metadata, threadCount, fe); msu.UseSegmentBlockBackOffRetryStrategy = false; Assert.DoesNotThrow(() => { msu.Upload(); }); VerifyTargetStreamsAreComplete(metadata, fe); } finally { metadata.DeleteFile(); } }