/// <summary>
        /// Serialize and upload the constructed import manifest using the supplied S3 client
        /// with optional progress callback.
        /// </summary>
        /// <param name="progressCallback">Optional callback to track upload progress.</param>
        void UploadManifest(ImportProgressCallback progressCallback = null)
        {
            if (string.IsNullOrEmpty(ManifestFileKey))
                throw new InvalidOperationException("Expected ManifestFileKey to have been constructed");

            using (var manifestStream = new MemoryStream())
            {
                if (progressCallback != null)
                    progressCallback("Creating import manifest...", null);

                // Get as close to the xml sent by the existing ec2 cli as possible, so no namespaces, 
                // Unix linefeeds and a standalone instruction, plus indentation of 4 spaces. This makes 
                // comparison of the two manifests easier if needed for debugging.
                var xmlContext = new XmlSerializer(typeof(ImportManifestRoot));
                var namespaces = new XmlSerializerNamespaces();
                namespaces.Add("", "");
                var writerCfg = new XmlWriterSettings
                {
                    Encoding = new UTF8Encoding(false),
                    NewLineChars = "\n",
                    NewLineHandling = NewLineHandling.Replace,
                    Indent = true,
                    IndentChars = "    "
                };
                using (var writer = XmlWriter.Create(manifestStream, writerCfg))
                {
                    writer.WriteProcessingInstruction("xml", "version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"");
                    xmlContext.Serialize(writer, ImportManifest, namespaces);
                }

                if (progressCallback != null)
                    progressCallback("Creating and uploading import manifest...", null);

                var request = new PutObjectRequest
                {
                    BucketName = BucketName,
                    Key = ManifestFileKey,
                    InputStream = manifestStream,
                };

                // even though this is the first artifact to be uploaded and as such, if it fails,
                // there is nothing to clean up catch any exception so we can tell the user there
                // is no clean up required (for users concerned about storage costs).
                try
                {
                    S3Client.PutObject(request);
                }
                catch (Exception e)
                {
                    throw new DiskImageImporterException(DiskImportErrorStage.UploadingManifest,
                                                         "Upload of the image file manifest to Amazon S3 failed.\nThere are no orphaned objects requiring manual deletion.", 
                                                         e);
                }
            }
        }
        /// <summary>
        /// Uploads the image file to S3 as a series of distinct 10MB objects, as required by EC2,
        /// using the constructed import manifest as a guide. If any part fails to upload, we clean
        /// up successfully uploaded parts before returning the error to the caller.
        /// </summary>
        /// <param name="progressCallback">Optional callback to track upload progress.</param>
        void UploadImageParts(ImportProgressCallback progressCallback = null)
        {
            var imageFileinfo = new FileInfo(ImageFilePath);
            var partsList = ImportManifest.ImportData.PartsList;

            var activityMessage = string.Format(CultureInfo.InvariantCulture, 
                                                "Uploading image file ({0:N0} bytes across {1:N0} parts).", 
                                                imageFileinfo.Length, 
                                                partsList.Count);

            if (progressCallback != null)
                progressCallback(activityMessage, 0);

            // spin up the threads to handle the parts
            using (var fs = new FileStream(ImageFilePath, FileMode.Open, FileAccess.Read))
            {
                // CountdownEvent and CancellationToken would be ideal here but they
                // are not in .net 3.5
                var partUploadedEvent = new AutoResetEvent(false);

                for (var i = 0; i < UploadThreads; i++)
                {
                    var workerState = new ImagePartUploadState
                    {
                        S3Client = S3Client,
                        BucketName = BucketName,
                        PartsList = partsList,
                        ImageFileStream = fs,
                        PartProcessed = partUploadedEvent
                    };

                    ThreadPool.QueueUserWorkItem(UploadImageFilePart, workerState);
                }

                // Rather than rely on keeping a count of completed uploads to track progress, 
                // which could get out of sync if two threads fire the event at the same time, 
                // we scan and count progress on each event signal - that's been more reliable. 
                // Also, to allow for a loss of network connection we also have a timeout on 
                // the wait so we avoid any possibility of the event not being signalled due to 
                // the workers aborting without signalling.
                while (true)
                {
                    partUploadedEvent.WaitOne(5000);

                    if (partsList.HasFailedUploads)
                        break;

                    if (progressCallback != null)
                    {
                        var processedParts = partsList.PartInstances.Count(part => part.UploadCompleted);
                        progressCallback(activityMessage, (int)((double)processedParts / partsList.Count * 100));
                    }

                    if (_activeUploadWorkers == 0)
                        break;
                }
            }

            if (!partsList.HasFailedUploads)
            {
                if (progressCallback != null)
                    progressCallback("Image file upload completed.", null);
            }
            else
            {
                if (progressCallback != null)
                {
                    progressCallback("One or more image file parts failed to upload"
                        + (RollbackOnUploadError ? ", rolling back bucket content..." : string.Empty), null);
                }

                // wait until all of the workers are done before we start any possible clean up
                while (_activeUploadWorkers > 0)
                {
                    Thread.Sleep(500);
                }

                var msg = new StringBuilder("Upload of the image file artifacts to Amazon S3 failed.\r\n");
                if (RollbackOnUploadError)
                {
                    var allRemoved = RemoveUploadedArtifacts(ManifestFileKey, partsList.PartInstances);
                    if (allRemoved)
                        msg.Append("All content that had been uploaded has been successfully removed."
                                    + "\r\n"
                                    + "No further clean-up is required.\r\n");
                    else
                        msg.AppendFormat("Some content that had been uploaded could not be successfully removed."
                                        + "\r\n"
                                        + "Inspect the bucket content for objects with keyprefix"
                                        + "\r\n"
                                        + "'{0}'\r\nand delete them.\r\n",
                                         ArtifactsKeyPrefix);
                }
                else
                {
                    msg.Append("All content that had been uploaded successfully has been retained; the import can be resumed.\r\n");
                }

                throw new DiskImageImporterException(DiskImportErrorStage.UploadingImageFile, msg.ToString());
            }
        }
        /// <summary>
        /// <para>
        /// Constructs the import manifest for the image and then uploads it and the 
        /// virtual machine image or disk image to Amazon S3. The S3 key to the uploaded 
        /// manifest file is returned.
        /// </para>
        /// <para>
        /// If an error occurs during upload of the image file the RetainArtifactsOnUploadError
        /// property governs whether the partially uploaded content is deleted or retained. If
        /// the content is retained, the import can be resumed. By default the 
        /// RetainArtifactsOnUploadError property is false and the content deleted, avoiding
        /// storage charges for potentially orphaned content if the command is not re-run.
        /// </para>
        /// </summary>
        /// <param name="imageFilepath">The full path to the image file to be processed</param>
        /// <param name="fileFormat">
        /// The format of the image file (VMDK | RAW | VHD). If not specified, it will be inferred
        /// from the extension of the image file.
        /// </param>
        /// <param name="volumeSize">
        /// The requested size (in GiB) for the resulting image volume. If not specified a suitable 
        /// value based on the size of the image file is used. Note that if importing a disk image that
        /// will be used as an EC2 instance, the minimum required boot volume size is 8GB.
        /// </param>
        /// <param name="keyPrefix">
        /// Optional root-level key prefix that will be applied to the uploaded artifacts in S3.
        /// The artifacts will be placed beneath this (or the root if not set) in a key composed
        /// of a GUID.
        /// </param>
        /// <param name="progressCallback">Optional callback delegate for upload progress reporting</param>
        /// <param name="resumeUpload">
        /// Set this to true if a previous upload failed part-way through processing and RetainArtifactsOnUploadError
        /// was set to true so the partially uploaded content was retained. The existing manifest will
        /// be inspected and uploads will resume of the retaining content.
        /// </param>
        /// <returns>The S3 object key of the uploaded manifest file</returns>
        public string Upload(string imageFilepath,
                             string fileFormat,
                             long? volumeSize,
                             string keyPrefix,
                             ImportProgressCallback progressCallback,
                             bool resumeUpload)
        {
            ImageFilePath = imageFilepath;

            if (!resumeUpload)
            {
                var guidPart = Guid.NewGuid().ToString("D");
                ArtifactsKeyPrefix 
                    = string.IsNullOrEmpty(keyPrefix) 
                        ? guidPart 
                        : string.Format(CultureInfo.InvariantCulture, "{0}/{1}", keyPrefix, guidPart);

                ImportManifest = CreateImportManifest(fileFormat, volumeSize);

                if (!AmazonS3Util.DoesS3BucketExist(S3Client, BucketName))
                    S3Client.PutBucket(new PutBucketRequest { BucketName = this.BucketName, UseClientRegion = true });

                UploadManifest(progressCallback);
            }

            UploadImageParts(progressCallback);
            return ManifestFileKey;
        }
 /// <summary>
 /// Uploads and requests import conversion of a virtual disk file to an Amazon EBS volume.
 /// </summary>
 /// <param name="imageFilepath">The full path to the image file to be processed</param>
 /// <param name="fileFormat">
 /// The format of the image file (VMDK | RAW | VHD). If not specified, it will be inferred
 /// from the extension of the image file.
 /// </param>
 /// <param name="volumeSize">
 /// The requested size (in GiB) for the resulting image volume. If not specified a suitable 
 /// value based on the size of the image file is used.
 /// </param>
 /// <param name="keyPrefix">
 /// Optional root-level key prefix that will be applied to the uploaded artifacts in S3.
 /// The artifacts will be placed beneath this (or the root if not set) in a key composed
 /// of a GUID.
 /// </param>
 /// <param name="availabilityZone">The Availability Zone for the resulting Amazon EBS volume.</param>
 /// <param name="description">An optional description for the volume being imported.</param>
 /// <param name="progressCallback">Optional callback delegate for upload progress reporting</param>
 /// <returns>
 /// The service response containing a ConversionTask object that can be used to monitor the progress of the 
 /// requested conversion.
 /// </returns>
 public ImportVolumeResponse ImportVolume(string imageFilepath,
                                          string fileFormat,
                                          long? volumeSize,
                                          string keyPrefix,
                                          string availabilityZone, 
                                          string description,
                                          ImportProgressCallback progressCallback)
 {
     Upload(imageFilepath, fileFormat, volumeSize, keyPrefix, progressCallback, false);
     return StartVolumeConversion(availabilityZone, description);
 }
 /// <summary>
 /// Uploads and requests import conversion of a virtual machine image file
 /// to an Amazon EC2 instance.
 /// </summary>
 /// <param name="imageFilepath">The full path to the image file to be processed</param>
 /// <param name="fileFormat">
 /// The format of the image file (VMDK | RAW | VHD). If not specified, it will be inferred
 /// from the extension of the image file.
 /// </param>
 /// <param name="volumeSize">
 /// The requested size (in GiB) for the resulting image volume. If not specified a suitable 
 /// value based on the size of the image file is used. Note that the minimum required boot 
 /// volume size for EC2 is 8GB.
 /// </param>
 /// <param name="keyPrefix">
 /// Optional root-level key prefix that will be applied to the uploaded artifacts in S3.
 /// The artifacts will be placed beneath this (or the root if not set) in a key composed
 /// of a GUID.
 /// </param>
 /// <param name="launchConfiguration">Launch configuration settings for the imported instance</param>
 /// <param name="progressCallback">Optional callback delegate for upload progress reporting</param>
 /// <returns>
 /// The service response containing a ConversionTask object that can be used to monitor the progress of the 
 /// requested conversion.
 /// </returns>
 public ImportInstanceResponse ImportInstance(string imageFilepath,
                                              string fileFormat,
                                              long? volumeSize,
                                              string keyPrefix,
                                              ImportLaunchConfiguration launchConfiguration,
                                              ImportProgressCallback progressCallback)
 {
     Upload(imageFilepath, fileFormat, volumeSize, keyPrefix, progressCallback, false);
     return StartInstanceConversion(launchConfiguration);
 }
Esempio n. 6
0
        /// <summary>
        /// This method returns the number of items imported
        /// </summary>
        /// <param name="text">The contents to be imported.</param>
        /// <param name="tableName">Only used in cases where more than one import takes place.</param>
        /// <param name="expectedCount">Number of expected columns in the csv file</param>
        /// <param name="callback">The method to call to update progress</param>
        /// <returns></returns>
        public static int ImportData(string text, string tableName, int expectedCount, ImportProgressCallback callback)
        {
            // initial value
            int savedCount = 0;

            // locals
            char[]      delimiters  = { ',' };
            int         count       = 0;
            int         totalCount  = 0;
            int         refreshRate = 5;
            List <Word> words       = null;
            RawImport   rawImport   = null;

            // if the fileExists
            if (TextHelper.Exists(text))
            {
                // Create a new instance of a 'Gateway' object.
                Gateway gateway = new Gateway();

                // set the textLines
                List <TextLine> textLines = WordParser.GetTextLines(text.Trim());

                // If the textLines collection exists and has one or more items
                if (ListHelper.HasOneOrMoreItems(textLines))
                {
                    // If the callback object exists
                    if (NullHelper.Exists(callback))
                    {
                        // notify the delegate to setup the Graph
                        callback(textLines.Count, tableName);
                    }

                    // change the RefreshRate
                    if (textLines.Count > 1000)
                    {
                        // change this to whatever makes since for your app
                        refreshRate = 25;
                    }

                    // set the totalCount
                    totalCount = textLines.Count - 1;

                    // Iterate the collection of TextLine objects
                    foreach (TextLine textLine in textLines)
                    {
                        // Increment the value for count
                        count++;

                        // skip the first row
                        if (count > 1)
                        {
                            // get the list of words
                            words = WordParser.GetWords(textLine.Text, delimiters, true);

                            // if the words collection has exactly the right amount
                            if ((ListHelper.HasOneOrMoreItems(words)) && (words.Count == expectedCount))
                            {
                                // Create a new instance of a 'RawImport' object.
                                rawImport = new RawImport();

                                // Load the RawImport with the words
                                SetRawImportProperties(ref rawImport, words);

                                // save the rawImport object
                                bool saved = gateway.SaveRawImport(ref rawImport);

                                // if the value for saved is true
                                if (saved)
                                {
                                    // Increment the value for savedCount
                                    savedCount++;

                                    // refresh every x number of records
                                    if (savedCount % refreshRate == 0)
                                    {
                                        // update the graph (for a large project, you might want to update every 25, 50 or 100 records or so
                                        callback(savedCount, tableName);
                                    }
                                }
                            }
                        }
                    }
                }
            }

            // return value
            return(savedCount);
        }
Esempio n. 7
0
        /// <summary>
        /// This method returns the number of items imported
        /// </summary>
        /// <param name="fileName"></param>
        /// <param name="tableName"></param>
        /// <param name="expectedCount"></param>
        /// <param name="callback">The method to call to update progress</param>
        /// <returns></returns>
        public static int ImportDataFromFile(string fileName, string tableName, int expectedCount, ImportProgressCallback callback)
        {
            // initial value
            int recordsImported = 0;

            // if the fileName exists
            if ((TextHelper.Exists(fileName)) && (File.Exists(fileName)))
            {
                // read all text
                string fileText = File.ReadAllText(fileName);

                // Call the override
                recordsImported = CSVImportJob.ImportData(fileText, tableName, expectedCount, callback);
            }

            // return value
            return(recordsImported);
        }