/// <summary> /// Uploads the image file to S3 as a series of distinct 10MB objects, as required by EC2, /// using the constructed import manifest as a guide. If any part fails to upload, we clean /// up successfully uploaded parts before returning the error to the caller. /// </summary> /// <param name="progressCallback">Optional callback to track upload progress.</param> void UploadImageParts(ImportProgressCallback progressCallback = null) { var imageFileinfo = new FileInfo(ImageFilePath); var partsList = ImportManifest.ImportData.PartsList; var activityMessage = string.Format(CultureInfo.InvariantCulture, "Uploading image file ({0:N0} bytes across {1:N0} parts).", imageFileinfo.Length, partsList.Count); if (progressCallback != null) progressCallback(activityMessage, 0); // spin up the threads to handle the parts using (var fs = new FileStream(ImageFilePath, FileMode.Open, FileAccess.Read)) { // CountdownEvent and CancellationToken would be ideal here but they // are not in .net 3.5 var partUploadedEvent = new AutoResetEvent(false); for (var i = 0; i < UploadThreads; i++) { var workerState = new ImagePartUploadState { S3Client = S3Client, BucketName = BucketName, PartsList = partsList, ImageFileStream = fs, PartProcessed = partUploadedEvent }; ThreadPool.QueueUserWorkItem(UploadImageFilePart, workerState); } // Rather than rely on keeping a count of completed uploads to track progress, // which could get out of sync if two threads fire the event at the same time, // we scan and count progress on each event signal - that's been more reliable. // Also, to allow for a loss of network connection we also have a timeout on // the wait so we avoid any possibility of the event not being signalled due to // the workers aborting without signalling. while (true) { partUploadedEvent.WaitOne(5000); if (partsList.HasFailedUploads) break; if (progressCallback != null) { var processedParts = partsList.PartInstances.Count(part => part.UploadCompleted); progressCallback(activityMessage, (int)((double)processedParts / partsList.Count * 100)); } if (_activeUploadWorkers == 0) break; } } if (!partsList.HasFailedUploads) { if (progressCallback != null) progressCallback("Image file upload completed.", null); } else { if (progressCallback != null) { progressCallback("One or more image file parts failed to upload" + (RollbackOnUploadError ? ", rolling back bucket content..." : string.Empty), null); } // wait until all of the workers are done before we start any possible clean up while (_activeUploadWorkers > 0) { Thread.Sleep(500); } var msg = new StringBuilder("Upload of the image file artifacts to Amazon S3 failed.\r\n"); if (RollbackOnUploadError) { var allRemoved = RemoveUploadedArtifacts(ManifestFileKey, partsList.PartInstances); if (allRemoved) msg.Append("All content that had been uploaded has been successfully removed." + "\r\n" + "No further clean-up is required.\r\n"); else msg.AppendFormat("Some content that had been uploaded could not be successfully removed." + "\r\n" + "Inspect the bucket content for objects with keyprefix" + "\r\n" + "'{0}'\r\nand delete them.\r\n", ArtifactsKeyPrefix); } else { msg.Append("All content that had been uploaded successfully has been retained; the import can be resumed.\r\n"); } throw new DiskImageImporterException(DiskImportErrorStage.UploadingImageFile, msg.ToString()); } }