public void NormalUploadReplacesRemoteStreamIfRemoteStreamExists()
        {
            this.mockedDocument.Setup(doc => doc.ContentStreamId).Returns("StreamId");
            this.mockedDocument.Setup(doc => doc.DeleteContentStream(It.IsAny <bool>())).Callback(() => {
                if (this.remoteStream != null)
                {
                    this.remoteStream.Dispose();
                }

                this.remoteStream = new MemoryStream();
            }).Returns(this.mockedDocument.Object);

            this.remoteStream.WriteByte(1);
            this.transmissionEvent.TransmissionStatus += delegate(object sender, TransmissionProgressEventArgs e) {
                this.AssertThatProgressFitsMinimumLimits(e, 0, 0, 0);
            };

            using (IFileUploader uploader = new ChunkedUploader(this.chunkSize)) {
                uploader.UploadFile(this.mockedDocument.Object, this.localFileStream, this.transmissionEvent, this.hashAlg);
            }

            this.mockedDocument.Verify(doc => doc.DeleteContentStream(It.IsAny <bool>()), Times.Once());
            this.AssertThatLocalAndRemoteContentAreEqualToHash();
            Assert.AreEqual(1, this.lastChunk);
        }
Exemple #2
0
        public void NormalUpload()
        {
            using (IFileUploader uploader = new ChunkedUploader(this.chunkSize)) {
                uploader.UploadFile(this.mockedDocument.Object, this.localFileStream, this.transmission, this.hashAlg);
            }

            this.AssertThatLocalAndRemoteContentAreEqualToHash();
            Assert.AreEqual(1, this.lastChunk);
        }
        public void ContructorWorksWithValidInput()
        {
            using (var uploader = new ChunkedUploader()) {
                Assert.Greater(uploader.ChunkSize, 0);
            }

            using (var uploader = new ChunkedUploader(this.chunkSize)) {
                Assert.AreEqual(this.chunkSize, uploader.ChunkSize);
            }
        }
Exemple #4
0
 public void IOExceptionOnUploadTest()
 {
     this.mockedDocument.Setup(doc => doc.AppendContentStream(It.IsAny <IContentStream>(), It.IsAny <bool>(), It.Is <bool>(b => b == true)))
     .Throws(new IOException());
     using (IFileUploader uploader = new ChunkedUploader(this.chunkSize)) {
         var e = Assert.Throws <UploadFailedException>(() => uploader.UploadFile(this.mockedDocument.Object, this.localFileStream, this.transmission, this.hashAlg));
         Assert.IsInstanceOf(typeof(UploadFailedException), e);
         Assert.IsInstanceOf(typeof(IOException), e.InnerException);
         Assert.AreEqual(this.mockedDocument.Object, ((UploadFailedException)e).LastSuccessfulDocument);
     }
 }
        public void NormalUpload()
        {
            this.transmissionEvent.TransmissionStatus += delegate(object sender, TransmissionProgressEventArgs e) {
                this.AssertThatProgressFitsMinimumLimits(e, 0, 0, 0);
            };

            using (IFileUploader uploader = new ChunkedUploader(this.chunkSize)) {
                uploader.UploadFile(this.mockedDocument.Object, this.localFileStream, this.transmissionEvent, this.hashAlg);
            }

            this.AssertThatLocalAndRemoteContentAreEqualToHash();
            Assert.AreEqual(1, this.lastChunk);
        }
Exemple #6
0
        public void ResumeUploadWithUtils()
        {
            double successfulUploadPart = 0.2;
            int    successfulUploaded   = (int)(this.fileLength * successfulUploadPart);
            double minPercent           = 100 * successfulUploadPart;

            this.InitRemoteChunkWithSize(successfulUploaded);
            this.transmission.AddLengthConstraint(Is.GreaterThanOrEqualTo(successfulUploaded));
            this.transmission.AddPercentConstraint(Is.GreaterThanOrEqualTo(minPercent));
            this.transmission.AddPositionConstraint(Is.GreaterThanOrEqualTo(successfulUploaded));

            using (IFileUploader uploader = new ChunkedUploader(this.chunkSize)) {
                ContentTaskUtils.PrepareResume(successfulUploaded, this.localFileStream, this.hashAlg);
                uploader.UploadFile(this.mockedDocument.Object, this.localFileStream, this.transmission, this.hashAlg);
            }

            this.AssertThatLocalAndRemoteContentAreEqualToHash();
            Assert.AreEqual(1, this.lastChunk);
        }
        public void ResumeUploadWithUtils()
        {
            double successfulUploadPart = 0.2;
            int    successfulUploaded   = (int)(this.fileLength * successfulUploadPart);
            double minPercent           = 100 * successfulUploadPart;

            this.InitRemoteChunkWithSize(successfulUploaded);
            this.transmissionEvent.TransmissionStatus += delegate(object sender, TransmissionProgressEventArgs e) {
                this.AssertThatProgressFitsMinimumLimits(e, successfulUploaded, minPercent, successfulUploaded);
            };

            using (IFileUploader uploader = new ChunkedUploader(this.chunkSize)) {
                ContentTaskUtils.PrepareResume(successfulUploaded, this.localFileStream, this.hashAlg);
                uploader.UploadFile(this.mockedDocument.Object, this.localFileStream, this.transmissionEvent, this.hashAlg);
            }

            this.AssertThatLocalAndRemoteContentAreEqualToHash();
            Assert.AreEqual(1, this.lastChunk);
        }
Exemple #8
0
        public void ResumeUpload()
        {
            double successfulUploadPart = 0.5;
            int    successfulUploaded   = (int)(this.fileLength * successfulUploadPart);
            double minPercent           = 100 * successfulUploadPart;

            this.transmission.AddLengthConstraint(Is.Null.Or.GreaterThanOrEqualTo(successfulUploaded));
            this.transmission.AddPercentConstraint(Is.Null.Or.GreaterThanOrEqualTo(minPercent));
            this.transmission.AddPositionConstraint(Is.Null.Or.GreaterThanOrEqualTo(successfulUploaded));

            // Copy half of data before start uploading
            this.InitRemoteChunkWithSize(successfulUploaded);
            this.hashAlg.TransformBlock(this.localContent, 0, successfulUploaded, this.localContent, 0);
            this.localFileStream.Seek(successfulUploaded, SeekOrigin.Begin);

            using (IFileUploader uploader = new ChunkedUploader(this.chunkSize)) {
                uploader.UploadFile(this.mockedDocument.Object, this.localFileStream, this.transmission, this.hashAlg);
            }

            this.AssertThatLocalAndRemoteContentAreEqualToHash();
            Assert.AreEqual(1, this.lastChunk);
        }
        public void ResumeUpload()
        {
            double successfulUploadPart = 0.5;
            int    successfulUploaded   = (int)(this.fileLength * successfulUploadPart);
            double minPercent           = 100 * successfulUploadPart;

            this.transmissionEvent.TransmissionStatus += delegate(object sender, TransmissionProgressEventArgs e) {
                this.AssertThatProgressFitsMinimumLimits(e, successfulUploaded, minPercent, successfulUploaded);
            };

            // Copy half of data before start uploading
            this.InitRemoteChunkWithSize(successfulUploaded);
            this.hashAlg.TransformBlock(this.localContent, 0, successfulUploaded, this.localContent, 0);
            this.localFileStream.Seek(successfulUploaded, SeekOrigin.Begin);

            using (IFileUploader uploader = new ChunkedUploader(this.chunkSize)) {
                uploader.UploadFile(this.mockedDocument.Object, this.localFileStream, this.transmissionEvent, this.hashAlg);
            }

            this.AssertThatLocalAndRemoteContentAreEqualToHash();
            Assert.AreEqual(1, this.lastChunk);
        }
        /// <summary>
        /// The <see cref="StagedUploadAsync"/> operation will create a new
        /// block blob of arbitrary size by uploading it as indiviually staged
        /// blocks if it's larger than the
        /// <paramref name="singleBlockThreshold"/>.
        /// </summary>
        /// <param name="content">
        /// A <see cref="Stream"/> containing the content to upload.
        /// </param>
        /// <param name="blobHttpHeaders">
        /// Optional standard HTTP header properties that can be set for the
        /// block blob.
        /// </param>
        /// <param name="metadata">
        /// Optional custom metadata to set for this block blob.
        /// </param>
        /// <param name="blobAccessConditions">
        /// Optional <see cref="BlobAccessConditions"/> to add conditions on
        /// the creation of this new block blob.
        /// </param>
        /// <param name="progressHandler">
        /// Optional <see cref="IProgress{StorageProgress}"/> to provide
        /// progress updates about data transfers.
        /// </param>
        /// <param name="singleBlockThreshold">
        /// The maximum size stream that we'll upload as a single block.  The
        /// default value is 256MB.
        /// </param>
        /// <param name="blockSize">
        /// The size of individually staged blocks.  The default value is 4MB.
        /// </param>
        /// <param name="async">
        /// </param>
        /// <param name="cancellationToken">
        /// Optional <see cref="CancellationToken"/> to propagate
        /// notifications that the operation should be cancelled.
        /// </param>
        /// <returns>
        /// A <see cref="Response{BlobContentInfo}"/> describing the
        /// state of the updated block blob.
        /// </returns>
        /// <remarks>
        /// A <see cref="StorageRequestFailedException"/> will be thrown if
        /// a failure occurs.
        /// </remarks>
        internal async Task <Response <BlobContentInfo> > StagedUploadAsync(
            Stream content,
            BlobHttpHeaders?blobHttpHeaders,
            Metadata metadata,
            BlobAccessConditions?blobAccessConditions,
            IProgress <StorageProgress> progressHandler,
            long singleBlockThreshold = BlockBlobClient.BlockBlobMaxUploadBlobBytes,
            int blockSize             = Constants.DefaultBufferSize,
            bool async = true,
            CancellationToken cancellationToken = default)
        {
            Debug.Assert(singleBlockThreshold <= BlockBlobClient.BlockBlobMaxUploadBlobBytes);

            var client     = new BlockBlobClient(this.Uri, this.Pipeline);
            var blockList  = new List <string>();
            var uploadTask = ChunkedUploader.UploadAsync(
                UploadStreamAsync,
                StageBlockAsync,
                CommitBlockListAsync,
                content,
                singleBlockThreshold,
                blockSize,
                async,
                cancellationToken);

            return(async ?
                   await uploadTask.ConfigureAwait(false) :
                   uploadTask.EnsureCompleted());

            // Upload the entire stream
            Task <Response <BlobContentInfo> > UploadStreamAsync(
                Stream content,
                bool async,
                CancellationToken cancellation) =>
            client.UploadInternal(
                content,
                blobHttpHeaders,
                metadata,
                blobAccessConditions,
                progressHandler,
                async,
                cancellationToken);

            // Upload a single chunk of the stream
            Task <Response <BlockInfo> > StageBlockAsync(
                Stream chunk,
                int blockNumber,
                bool async,
                CancellationToken cancellation)
            {
                // Create a new block ID
                var blockId = Constants.BlockNameFormat;

                blockId = String.Format(CultureInfo.InvariantCulture, blockId, blockNumber);
                blockId = Convert.ToBase64String(Encoding.UTF8.GetBytes(blockId));
                blockList.Add(blockId);

                // Upload the block
                return(client.StageBlockInternal(
                           blockId,
                           chunk,
                           null,
                           blobAccessConditions?.LeaseAccessConditions,
                           progressHandler,
                           async,
                           cancellationToken));
            }

            // Commit a series of chunks
            Task <Response <BlobContentInfo> > CommitBlockListAsync(
                bool async,
                CancellationToken cancellation) =>
            client.CommitBlockListInternal(
                blockList,
                blobHttpHeaders,
                metadata,
                blobAccessConditions,
                async,
                cancellationToken);
        }
Exemple #11
0
 public void ContructorWorksWithValidInput()
 {
     using (var uploader = new ChunkedUploader(this.chunkSize)) {
         Assert.That(uploader.ChunkSize, Is.EqualTo(this.chunkSize));
     }
 }
Exemple #12
0
 public void ContructorWorksWithoutInput()
 {
     using (var uploader = new ChunkedUploader()) {
         Assert.That(uploader.ChunkSize, Is.GreaterThan(0));
     }
 }