/// <summary> /// Uploads the file using the given metadata. /// /// </summary> /// <param name="metadata"></param> private void UploadFile(UploadMetadata metadata) { try { //we need to override the default .NET value for max connections to a host to our number of threads, if necessary (otherwise we won't achieve the parallelism we want) _previousDefaultConnectionLimit = ServicePointManager.DefaultConnectionLimit; ServicePointManager.DefaultConnectionLimit = Math.Max(this.Parameters.ThreadCount, ServicePointManager.DefaultConnectionLimit); //match up the metadata with the information on the server if (this.Parameters.IsResume) { ValidateMetadataForResume(metadata); } else { ValidateMetadataForFreshUpload(metadata); } var segmentProgressTracker = CreateSegmentProgressTracker(metadata); if (metadata.SegmentCount == 0) { // simply create the target stream, overwriting existing streams if they exist _frontEnd.CreateStream(metadata.TargetStreamPath, true, null, 0); } else if (metadata.SegmentCount > 1) { //perform the multi-segment upload var msu = new MultipleSegmentUploader(metadata, this.Parameters.ThreadCount, _frontEnd, _token, segmentProgressTracker); msu.UseSegmentBlockBackOffRetryStrategy = this.Parameters.UseSegmentBlockBackOffRetryStrategy; msu.Upload(); //concatenate the files at the end ConcatenateSegments(metadata); } else { //optimization if we only have one segment: upload it directly to the target stream metadata.Segments[0].Path = metadata.TargetStreamPath; var ssu = new SingleSegmentUploader(0, metadata, _frontEnd, _token, segmentProgressTracker); ssu.UseBackOffRetryStrategy = this.Parameters.UseSegmentBlockBackOffRetryStrategy; ssu.Upload(); } } catch (OperationCanceledException) { // do nothing since we have already marked everything as failed } finally { //revert back the default .NET value for max connections to a host to whatever it was before ServicePointManager.DefaultConnectionLimit = _previousDefaultConnectionLimit; } }
/// <summary> /// Uploads the segment. /// </summary> /// <param name="segmentNumber">The segment number.</param> /// <param name="metadata">The metadata.</param> private void UploadSegment(int segmentNumber, UploadMetadata metadata) { //mark the segment as 'InProgress' in the metadata UpdateSegmentMetadataStatus(metadata, segmentNumber, SegmentUploadStatus.InProgress); var segmentUploader = new SingleSegmentUploader(segmentNumber, metadata, _frontEnd, _token, _progressTracker); segmentUploader.UseBackOffRetryStrategy = this.UseSegmentBlockBackOffRetryStrategy; try { segmentUploader.Upload(); //if we reach this point, the upload was successful; mark it as such UpdateSegmentMetadataStatus(metadata, segmentNumber, SegmentUploadStatus.Complete); } catch { //something horrible happened, mark the segment as failed and throw the original exception (the caller will handle it) UpdateSegmentMetadataStatus(metadata, segmentNumber, SegmentUploadStatus.Failed); throw; } }
/// <summary> /// Concatenates all the segments defined in the metadata into a single stream. /// </summary> /// <param name="metadata"></param> private void ConcatenateSegments(UploadMetadata metadata) { string[] inputPaths = new string[metadata.SegmentCount]; //verify if target stream exists if (_frontEnd.StreamExists(metadata.TargetStreamPath)) { if (this.Parameters.IsOverwrite) { _frontEnd.DeleteStream(metadata.TargetStreamPath); } else { throw new InvalidOperationException("Target Stream already exists"); } } //ensure all input streams exist and are of the expected length //ensure all segments in the metadata are marked as 'complete' var exceptions = new List <Exception>(); Parallel.For( 0, metadata.SegmentCount, new ParallelOptions() { MaxDegreeOfParallelism = this.Parameters.ThreadCount }, (i) => { try { if (metadata.Segments[i].Status != SegmentUploadStatus.Complete) { throw new UploadFailedException("Cannot perform 'Concatenate' operation because not all streams are fully uploaded."); } var remoteStreamPath = metadata.Segments[i].Path; var retryCount = 0; long remoteLength = -1; while (retryCount < SingleSegmentUploader.MaxBufferUploadAttemptCount) { _token.ThrowIfCancellationRequested(); retryCount++; try { remoteLength = _frontEnd.GetStreamLength(remoteStreamPath); break; } catch (Exception e) { _token.ThrowIfCancellationRequested(); if (retryCount >= SingleSegmentUploader.MaxBufferUploadAttemptCount) { throw new UploadFailedException( string.Format( "Cannot perform 'Concatenate' operation due to the following exception retrieving file information: {0}", e)); } SingleSegmentUploader.WaitForRetry(retryCount, Parameters.UseSegmentBlockBackOffRetryStrategy, _token); } } if (remoteLength != metadata.Segments[i].Length) { throw new UploadFailedException(string.Format("Cannot perform 'Concatenate' operation because segment {0} has an incorrect length (expected {1}, actual {2}).", i, metadata.Segments[i].Length, remoteLength)); } inputPaths[i] = remoteStreamPath; } catch (Exception ex) { //collect any exceptions, whether we just generated them above or whether they come from the Front End, exceptions.Add(ex); } }); if (exceptions.Count > 0) { throw new AggregateException("At least one concatenate test failed", exceptions.ToArray()); } //issue the command _frontEnd.Concatenate(metadata.TargetStreamPath, inputPaths); }
/// <summary> /// Validates that the metadata is valid for a resume operation, and also updates the internal Segment States to match what the Server looks like. /// If any changes are made, the metadata will be saved to its canonical location. /// </summary> /// <param name="metadata"></param> private void ValidateMetadataForResume(UploadMetadata metadata) { ValidateMetadataMatchesLocalFile(metadata); //verify that the target stream does not already exist (in case we don't want to overwrite) if (!this.Parameters.IsOverwrite && _frontEnd.StreamExists(metadata.TargetStreamPath)) { throw new InvalidOperationException("Target Stream already exists"); } //make sure we don't upload part of the file as binary, while the rest is non-binary (that's just asking for trouble) if (this.Parameters.IsBinary != metadata.IsBinary) { throw new InvalidOperationException( string.Format( "Existing metadata was created for a {0}binary file while the current parameters requested a {1}binary upload.", metadata.IsBinary ? string.Empty : "non-", this.Parameters.IsBinary ? string.Empty : "non-")); } //see what files(segments) already exist - update metadata accordingly (only for segments that are missing from server; if it's on the server but not in metadata, reupload) foreach (var segment in metadata.Segments) { if (segment.Status == SegmentUploadStatus.Complete) { var retryCount = 0; while (retryCount < SingleSegmentUploader.MaxBufferUploadAttemptCount) { _token.ThrowIfCancellationRequested(); retryCount++; try { //verify that the stream exists and that the length is as expected if (!_frontEnd.StreamExists(segment.Path)) { // this segment was marked as completed, but no target stream exists; it needs to be reuploaded segment.Status = SegmentUploadStatus.Pending; } else { var remoteLength = _frontEnd.GetStreamLength(segment.Path); if (remoteLength != segment.Length) { //the target stream has a different length than the input segment, which implies they are inconsistent; it needs to be reuploaded segment.Status = SegmentUploadStatus.Pending; } } break; } catch (Exception e) { _token.ThrowIfCancellationRequested(); if (retryCount >= SingleSegmentUploader.MaxBufferUploadAttemptCount) { throw new UploadFailedException( string.Format( "Cannot validate metadata in order to resume due to the following exception retrieving file information: {0}", e)); } SingleSegmentUploader.WaitForRetry(retryCount, Parameters.UseSegmentBlockBackOffRetryStrategy, _token); } } } else { //anything which is not in 'Completed' status needs to be reuploaded segment.Status = SegmentUploadStatus.Pending; } } metadata.Save(); }