public void MultipleSegmentUploader_ResumedUploadWithMultipleSegments() { //the strategy here is to upload everything, then delete a set of the segments, and verify that a resume will pick up the slack var fe = new InMemoryFrontEnd(); var metadata = CreateMetadata(10); try { var msu = new MultipleSegmentUploader(metadata, 1, fe); msu.UseSegmentBlockBackOffRetryStrategy = false; msu.Upload(); VerifyTargetStreamsAreComplete(metadata, fe); //delete about 50% of segments for (int i = 0; i < metadata.SegmentCount; i++) { var currentSegment = metadata.Segments[i]; if (i % 2 == 0) { currentSegment.Status = SegmentTransferStatus.Pending; fe.DeleteStream(currentSegment.Path); } } //re-upload everything msu = new MultipleSegmentUploader(metadata, 1, fe); msu.Upload(); VerifyTargetStreamsAreComplete(metadata, fe); } finally { metadata.DeleteFile(); } }
public void MultipleSegmentUploader_MultipleSegments() { var fe = new InMemoryFrontEnd(); var metadata = CreateMetadata(10); try { var msu = new MultipleSegmentUploader(metadata, 1, fe); msu.UseSegmentBlockBackOffRetryStrategy = false; msu.Upload(); VerifyTargetStreamsAreComplete(metadata, fe); } finally { metadata.DeleteFile(); } }
public void MultipleSegmentUploader_MultipleSegmentsAndMultipleThreads() { var fe = new InMemoryFrontEnd(); var metadata = CreateMetadata(10); int threadCount = metadata.SegmentCount * 10; //intentionally setting this higher than the # of segments try { var msu = new MultipleSegmentUploader(metadata, threadCount, fe); msu.UseSegmentBlockBackOffRetryStrategy = false; msu.Upload(); VerifyTargetStreamsAreComplete(metadata, fe); } finally { metadata.DeleteFile(); } }
private void TestRetry(int segmentFailCount) { //we only have access to the underlying FrontEnd, so we need to simulate many exceptions in order to force a segment to fail the upload (multiply by SingleSegmentUploader.MaxBufferUploadAttemptAccount) //this only works because we have a small file, which we know will fit in only one buffer (for a larger file, more complex operations are necessary) int actualfailCount = segmentFailCount * SingleSegmentUploader.MaxBufferUploadAttemptCount; bool expectSuccess = segmentFailCount < MultipleSegmentUploader.MaxUploadAttemptCount; int callCount = 0; //create a mock front end sitting on top of a working front end that simulates some erros for some time var workingFrontEnd = new InMemoryFrontEnd(); var fe = new MockableFrontEnd(workingFrontEnd); fe.CreateStreamImplementation = (streamPath, overwrite, data, byteCount) => { callCount++; if (callCount <= actualfailCount) { throw new IntentionalException(); } workingFrontEnd.CreateStream(streamPath, overwrite, data, byteCount); }; fe.AppendToStreamImplementation = (streamPath, data, offset, byteCount) => { callCount++; if (callCount <= actualfailCount) { throw new IntentionalException(); } workingFrontEnd.AppendToStream(streamPath, data, offset, byteCount); }; var metadata = CreateMetadata(1); try { var msu = new MultipleSegmentUploader(metadata, 1, fe); msu.UseSegmentBlockBackOffRetryStrategy = false; if (expectSuccess) { //the Upload method should not throw any exceptions in this case msu.Upload(); //if we are expecting success, verify that both the metadata and the target streams are complete VerifyTargetStreamsAreComplete(metadata, workingFrontEnd); } else { //the Upload method should throw an aggregate exception in this case Assert.Throws <AggregateException>(() => { msu.Upload(); }); //if we do not expect success, verify that at least 1 segment was marked as Failed Assert.True(metadata.Segments.Any(s => s.Status == SegmentTransferStatus.Failed), "Could not find any failed segments"); //for every other segment, verify it was completed OK foreach (var segment in metadata.Segments.Where(s => s.Status != SegmentTransferStatus.Failed)) { VerifyTargetStreamIsComplete(segment, metadata, workingFrontEnd); } } } finally { metadata.DeleteFile(); } }