/// <summary> /// Uploads the image file to S3 as a series of distinct 10MB objects, as required by EC2, /// using the constructed import manifest as a guide. If any part fails to upload, we clean /// up successfully uploaded parts before returning the error to the caller. /// </summary> /// <param name="progressCallback">Optional callback to track upload progress.</param> void UploadImageParts(ImporterProgressCallback progressCallback = null) { var imageFileinfo = new FileInfo(ImageFilePath); var partsList = ImportManifest.ImportData.PartsList; var activityMessage = string.Format(CultureInfo.CurrentCulture, "Uploading image file ({0:N0} bytes across {1:N0} parts).", imageFileinfo.Length, partsList.Count); if (progressCallback != null) progressCallback(activityMessage, 0); // spin up the threads to handle the parts using (var fs = new FileStream(ImageFilePath, FileMode.Open, FileAccess.Read)) { // CountdownEvent and CancellationToken would be ideal here but they // are not in .net 3.5 var partUploadedEvent = new AutoResetEvent(false); for (var i = 0; i < UploadThreads; i++) { var workerState = new ImagePartUploadState { S3Client = S3Client, BucketName = BucketName, PartsList = partsList, ImageFileStream = fs, PartProcessed = partUploadedEvent }; ThreadPool.QueueUserWorkItem(UploadImageFilePart, workerState); } // Rather than rely on keeping a count of completed uploads to track progress, // which could get out of sync if two threads fire the event at the same time, // we scan and count progress on each event signal - that's been more reliable. // Also, to allow for a loss of network connection we also have a timeout on // the wait so we avoid any possibility of the event not being signalled due to // the workers aborting without signalling. while (true) { partUploadedEvent.WaitOne(5000); if (partsList.HasFailedUploads) break; if (progressCallback != null) { var processedParts = partsList.PartInstances.Count(part => part.UploadCompleted); progressCallback(activityMessage, (int)((double)processedParts / partsList.Count * 100)); } if (_activeUploadWorkers == 0) break; } } if (!partsList.HasFailedUploads) { if (progressCallback != null) progressCallback("Image file upload completed.", null); } else { if (progressCallback != null) { progressCallback("One or more image file parts failed to upload" + (RollbackOnUploadError ? ", rolling back bucket content..." : string.Empty), null); } // wait until all of the workers are done before we start any possible clean up while (_activeUploadWorkers > 0) { Thread.Sleep(500); } var msg = new StringBuilder("Upload of the image file artifacts to Amazon S3 failed.\r\n"); if (RollbackOnUploadError) { var allRemoved = RemoveUploadedArtifacts(ManifestFileKey, partsList.PartInstances); if (allRemoved) msg.Append("All content that had been uploaded has been successfully removed." + "\r\n" + "No further clean-up is required.\r\n"); else msg.AppendFormat("Some content that had been uploaded could not be successfully removed." + "\r\n" + "Inspect the bucket content for objects with keyprefix" + "\r\n" + "'{0}'\r\nand delete them.\r\n", ArtifactsKeyPrefix); } else { msg.Append("All content that had been uploaded successfully has been retained; the import can be resumed.\r\n"); } throw new DiskImageImporterException(DiskImportErrorStage.UploadingImageFile, msg.ToString()); } }
/// <summary> /// Serialize and upload the constructed import manifest using the supplied S3 client /// with optional progress callback. /// </summary> /// <param name="progressCallback">Optional callback to track upload progress.</param> void UploadManifest(ImporterProgressCallback progressCallback = null) { if (string.IsNullOrEmpty(ManifestFileKey)) throw new InvalidOperationException("Expected ManifestFileKey to have been constructed"); using (var manifestStream = new MemoryStream()) { if (progressCallback != null) progressCallback("Creating import manifest...", null); // Get as close to the xml sent by the existing ec2 cli as possible, so no namespaces, // Unix linefeeds and a standalone instruction, plus indentation of 4 spaces. This makes // comparison of the two manifests easier if needed for debugging. var xmlContext = new XmlSerializer(typeof(ImportManifestRoot)); var namespaces = new XmlSerializerNamespaces(); namespaces.Add("", ""); var writerCfg = new XmlWriterSettings { Encoding = new UTF8Encoding(false), NewLineChars = "\n", NewLineHandling = NewLineHandling.Replace, Indent = true, IndentChars = " " }; using (var writer = XmlWriter.Create(manifestStream, writerCfg)) { writer.WriteProcessingInstruction("xml", "version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\""); xmlContext.Serialize(writer, ImportManifest, namespaces); } if (progressCallback != null) progressCallback("Creating and uploading import manifest...", null); var request = new PutObjectRequest { BucketName = BucketName, Key = ManifestFileKey, InputStream = manifestStream, }; // even though this is the first artifact to be uploaded and as such, if it fails, // there is nothing to clean up catch any exception so we can tell the user there // is no clean up required (for users concerned about storage costs). try { S3Client.PutObject(request); } catch (Exception e) { throw new DiskImageImporterException(DiskImportErrorStage.UploadingManifest, "Upload of the image file manifest to Amazon S3 failed.\nThere are no orphaned objects requiring manual deletion.", e); } } }
/// <summary> /// Uploads and requests import conversion of a virtual disk file to an Amazon EBS volume. /// </summary> /// <param name="imageFilepath">The full path to the image file to be processed</param> /// <param name="fileFormat"> /// The format of the image file (VMDK | RAW | VHD). If not specified, it will be inferred /// from the extension of the image file. /// </param> /// <param name="volumeSize"> /// The requested size (in GiB) for the resulting image volume. If not specified a suitable /// value based on the size of the image file is used. /// </param> /// <param name="keyPrefix"> /// Optional root-level key prefix that will be applied to the uploaded artifacts in S3. /// The artifacts will be placed beneath this (or the root if not set) in a key composed /// of a GUID. /// </param> /// <param name="availabilityZone">The Availability Zone for the resulting Amazon EBS volume.</param> /// <param name="description">An optional description for the volume being imported.</param> /// <param name="progressCallback">Optional callback delegate for upload progress reporting</param> /// <returns> /// The service response containing a ConversionTask object that can be used to monitor the progress of the /// requested conversion. /// </returns> public ImportVolumeResponse ImportVolume(string imageFilepath, string fileFormat, long? volumeSize, string keyPrefix, string availabilityZone, string description, ImporterProgressCallback progressCallback) { Upload(imageFilepath, fileFormat, volumeSize, keyPrefix, progressCallback, false); return StartVolumeConversion(availabilityZone, description); }
/// <summary> /// <para> /// Constructs the import manifest for the image and then uploads it and the /// virtual machine image or disk image to Amazon S3. The S3 key to the uploaded /// manifest file is returned. /// </para> /// <para> /// If an error occurs during upload of the image file the RetainArtifactsOnUploadError /// property governs whether the partially uploaded content is deleted or retained. If /// the content is retained, the import can be resumed. By default the /// RetainArtifactsOnUploadError property is false and the content deleted, avoiding /// storage charges for potentially orphaned content if the command is not re-run. /// </para> /// </summary> /// <param name="imageFilepath">The full path to the image file to be processed</param> /// <param name="fileFormat"> /// The format of the image file (VMDK | RAW | VHD). If not specified, it will be inferred /// from the extension of the image file. /// </param> /// <param name="volumeSize"> /// The requested size (in GiB) for the resulting image volume. If not specified a suitable /// value based on the size of the image file is used. Note that if importing a disk image that /// will be used as an EC2 instance, the minimum required boot volume size is 8GB. /// </param> /// <param name="keyPrefix"> /// Optional root-level key prefix that will be applied to the uploaded artifacts in S3. /// The artifacts will be placed beneath this (or the root if not set) in a key composed /// of a GUID. /// </param> /// <param name="progressCallback">Optional callback delegate for upload progress reporting</param> /// <param name="resumeUpload"> /// Set this to true if a previous upload failed part-way through processing and RetainArtifactsOnUploadError /// was set to true so the partially uploaded content was retained. The existing manifest will /// be inspected and uploads will resume of the retaining content. /// </param> /// <returns>The S3 object key of the uploaded manifest file</returns> public string Upload(string imageFilepath, string fileFormat, long? volumeSize, string keyPrefix, ImporterProgressCallback progressCallback, bool resumeUpload) { ImageFilePath = imageFilepath; if (!resumeUpload) { var guidPart = Guid.NewGuid().ToString("D"); ArtifactsKeyPrefix = string.IsNullOrEmpty(keyPrefix) ? guidPart : string.Format(CultureInfo.InvariantCulture, "{0}/{1}", keyPrefix, guidPart); ImportManifest = CreateImportManifest(fileFormat, volumeSize); if (!AmazonS3Util.DoesS3BucketExist(S3Client, BucketName)) S3Client.PutBucket(new PutBucketRequest { BucketName = this.BucketName, UseClientRegion = true }); UploadManifest(progressCallback); } UploadImageParts(progressCallback); return ManifestFileKey; }
/// <summary> /// Uploads and requests import conversion of a virtual machine image file /// to an Amazon EC2 instance. /// </summary> /// <param name="imageFilepath">The full path to the image file to be processed</param> /// <param name="fileFormat"> /// The format of the image file (VMDK | RAW | VHD). If not specified, it will be inferred /// from the extension of the image file. /// </param> /// <param name="volumeSize"> /// The requested size (in GiB) for the resulting image volume. If not specified a suitable /// value based on the size of the image file is used. Note that the minimum required boot /// volume size for EC2 is 8GB. /// </param> /// <param name="keyPrefix"> /// Optional root-level key prefix that will be applied to the uploaded artifacts in S3. /// The artifacts will be placed beneath this (or the root if not set) in a key composed /// of a GUID. /// </param> /// <param name="launchConfiguration">Launch configuration settings for the imported instance</param> /// <param name="progressCallback">Optional callback delegate for upload progress reporting</param> /// <returns> /// The service response containing a ConversionTask object that can be used to monitor the progress of the /// requested conversion. /// </returns> public ImportInstanceResponse ImportInstance(string imageFilepath, string fileFormat, long? volumeSize, string keyPrefix, ImportLaunchConfiguration launchConfiguration, ImporterProgressCallback progressCallback) { Upload(imageFilepath, fileFormat, volumeSize, keyPrefix, progressCallback, false); return StartInstanceConversion(launchConfiguration); }