Пример #1
0
        /// <summary>
        /// Asynchronously dispatches a write operation.
        /// </summary>
        /// <returns>A task that represents the asynchronous write operation.</returns>
        private async Task DispatchWriteAsync()
        {
            if (this.internalBuffer.Length == 0)
            {
                return;
            }

            MultiBufferMemoryStream bufferToUpload = this.internalBuffer;

            this.internalBuffer = new MultiBufferMemoryStream(this.file.ServiceClient.BufferManager);
            bufferToUpload.Seek(0, SeekOrigin.Begin);

            string bufferMD5 = null;

            if (this.rangeMD5 != null)
            {
                bufferMD5 = this.rangeMD5.ComputeHash();
                this.rangeMD5.Dispose();
                this.rangeMD5 = new MD5Wrapper();
            }

            long offset = this.currentFileOffset;

            this.currentFileOffset += bufferToUpload.Length;
            await this.WriteRangeAsync(bufferToUpload, offset, bufferMD5).ConfigureAwait(false);
        }
        private RESTCommand <NullType> SetServicePropertiesImpl(ServiceProperties properties, TableRequestOptions requestOptions)
        {
            MultiBufferMemoryStream str = new MultiBufferMemoryStream(null /* bufferManager */, (int)(1 * Constants.KB));

            try
            {
                properties.WriteServiceProperties(str);
            }
            catch (InvalidOperationException invalidOpException)
            {
                throw new ArgumentException(invalidOpException.Message, "properties");
            }

            str.Seek(0, SeekOrigin.Begin);

            RESTCommand <NullType> retCmd = new RESTCommand <NullType>(this.Credentials, this.StorageUri);

            retCmd.SendStream           = str;
            retCmd.BuildRequestDelegate = TableHttpWebRequestFactory.SetServiceProperties;
            retCmd.RecoveryAction       = RecoveryActions.RewindStream;
            retCmd.SignRequest          = this.AuthenticationHandler.SignRequest;
            retCmd.ParseError           = StorageExtendedErrorInformation.ReadFromStreamUsingODataLib;
            retCmd.PreProcessResponse   =
                (cmd, resp, ex, ctx) => HttpResponseParsers.ProcessExpectedStatusCodeNoException(HttpStatusCode.Accepted, resp, NullType.Value, cmd, ex);

            requestOptions.ApplyToStorageCommand(retCmd);
            return(retCmd);
        }
Пример #3
0
        /// <summary>
        /// Asynchronously dispatches a write operation.
        /// </summary>
        /// <returns>A task that represents the asynchronous write operation.</returns>
        private async Task DispatchWriteAsync(TaskCompletionSource <bool> continuetcs, CancellationToken token)
        {
            if (this.internalBuffer.Length == 0)
            {
                if (continuetcs != null)
                {
                    Task.Run(() => continuetcs.TrySetResult(true));
                }
                return;
            }

            MultiBufferMemoryStream bufferToUpload = this.internalBuffer;

            this.internalBuffer = new MultiBufferMemoryStream(this.file.ServiceClient.BufferManager);
            bufferToUpload.Seek(0, SeekOrigin.Begin);

            string bufferMD5 = null;

            if (this.rangeMD5 != null)
            {
                bufferMD5 = this.rangeMD5.ComputeHash();
                this.rangeMD5.Dispose();
                this.rangeMD5 = new MD5Wrapper();
            }

            long offset = this.currentFileOffset;

            this.currentFileOffset += bufferToUpload.Length;
            await this.WriteRangeAsync(continuetcs, bufferToUpload, offset, bufferMD5, token).ConfigureAwait(false);
        }
        /// <summary>
        /// Dispatches a write operation.
        /// </summary>
        /// <param name="asyncResult">The reference to the pending asynchronous request to finish.</param>
        private void DispatchWrite(StorageAsyncResult <NullType> asyncResult)
        {
            if (this.internalBuffer.Length == 0)
            {
                if (asyncResult != null)
                {
                    asyncResult.OnComplete(this.lastException);
                }

                return;
            }

            MultiBufferMemoryStream bufferToUpload = this.internalBuffer;

            this.internalBuffer = new MultiBufferMemoryStream(this.file.ServiceClient.BufferManager);
            bufferToUpload.Seek(0, SeekOrigin.Begin);

            string bufferMD5 = null;

            if (this.rangeMD5 != null)
            {
                bufferMD5 = this.rangeMD5.ComputeHash();
                this.rangeMD5.Dispose();
                this.rangeMD5 = new MD5Wrapper();
            }

            long offset = this.currentFileOffset;

            this.currentFileOffset += bufferToUpload.Length;
            this.WriteRange(bufferToUpload, offset, bufferMD5, asyncResult);
        }
Пример #5
0
        /// <summary>
        /// Dispatches a write operation.
        /// </summary>
        /// <param name="asyncResult">The reference to the pending asynchronous request to finish.</param>
        private void DispatchWrite(StorageAsyncResult <NullType> asyncResult)
        {
            if (this.internalBuffer.Length == 0)
            {
                if (asyncResult != null)
                {
                    asyncResult.OnComplete(this.lastException);
                }

                return;
            }

            MultiBufferMemoryStream bufferToUpload = this.internalBuffer;

            this.internalBuffer = new MultiBufferMemoryStream(this.Blob.ServiceClient.BufferManager);
            bufferToUpload.Seek(0, SeekOrigin.Begin);

            string bufferMD5 = null;

            if (this.blockMD5 != null)
            {
                bufferMD5 = this.blockMD5.ComputeHash();
                this.blockMD5.Dispose();
                this.blockMD5 = new MD5Wrapper();
            }

            if (this.blockBlob != null)
            {
                string blockId = this.GetCurrentBlockId();
                this.blockList.Add(blockId);
                this.WriteBlock(bufferToUpload, blockId, bufferMD5, asyncResult);
            }
            else if (this.pageBlob != null)
            {
                if ((bufferToUpload.Length % Constants.PageSize) != 0)
                {
                    this.lastException = new IOException(SR.InvalidPageSize);
                    throw this.lastException;
                }

                long offset = this.currentBlobOffset;
                this.currentBlobOffset += bufferToUpload.Length;
                this.WritePages(bufferToUpload, offset, bufferMD5, asyncResult);
            }
            else
            {
                long offset = this.currentBlobOffset;
                this.currentBlobOffset += bufferToUpload.Length;

                // We cannot differentiate between max size condition failing only in the retry versus failing in the first attempt and retry.
                // So we will eliminate the latter and handle the former in the append operation callback.
                if (this.accessCondition.IfMaxSizeLessThanOrEqual.HasValue && this.currentBlobOffset > this.accessCondition.IfMaxSizeLessThanOrEqual.Value)
                {
                    this.lastException = new IOException(SR.InvalidBlockSize);
                    throw this.lastException;
                }

                this.WriteAppendBlock(bufferToUpload, offset, bufferMD5, asyncResult);
            }
        }
Пример #6
0
        private async Task DispatchWriteAsync(TaskCompletionSource <bool> continuetcs, CancellationToken token)
        {
            if (this.internalBuffer.Length == 0)
            {
                if (continuetcs != null)
                {
                    Task.Run(() => continuetcs.TrySetResult(true));
                }
                return;
            }

            MultiBufferMemoryStream bufferToUpload = this.internalBuffer;

            this.internalBuffer = new MultiBufferMemoryStream(this.Blob.ServiceClient.BufferManager);
            bufferToUpload.Seek(0, SeekOrigin.Begin);

            string bufferMD5 = null;

            if (this.blockMD5 != null)
            {
                bufferMD5 = this.blockMD5.ComputeHash();
                this.blockMD5.Dispose();
                this.blockMD5 = new MD5Wrapper();
            }

            if (this.blockBlob != null)
            {
                string blockId = this.GetCurrentBlockId();
                this.blockList.Add(blockId);
                await this.WriteBlockAsync(continuetcs, bufferToUpload, blockId, bufferMD5, token).ConfigureAwait(false);
            }
            else if (this.pageBlob != null)
            {
                if ((bufferToUpload.Length % Constants.PageSize) != 0)
                {
                    this.lastException = new IOException(SR.InvalidPageSize);
                    throw this.lastException;
                }

                long offset = this.currentBlobOffset;
                this.currentBlobOffset += bufferToUpload.Length;
                await this.WritePagesAsync(continuetcs, bufferToUpload, offset, bufferMD5, token).ConfigureAwait(false);
            }
            else
            {
                long offset = this.currentBlobOffset;
                this.currentBlobOffset += bufferToUpload.Length;

                // We cannot differentiate between max size condition failing only in the retry versus failing in the first attempt and retry.
                // So we will eliminate the latter and handle the former in the append operation callback.
                if (this.accessCondition.IfMaxSizeLessThanOrEqual.HasValue && this.currentBlobOffset > this.accessCondition.IfMaxSizeLessThanOrEqual.Value)
                {
                    this.lastException = new IOException(SR.InvalidBlockSize);
                    throw this.lastException;
                }

                await this.WriteAppendBlockAsync(continuetcs, bufferToUpload, offset, bufferMD5, token).ConfigureAwait(false);
            }
        }
Пример #7
0
        /// <summary>
        /// Stores a new entity in the configured blob container.
        /// </summary>
        /// <param name="changes">The changes to write to storage.</param>
        /// <param name="cancellationToken">A cancellation token that can be used by other objects
        /// or threads to receive notice of cancellation.</param>
        /// <returns>A task that represents the work queued to execute.</returns>
        public async Task WriteAsync(IDictionary <string, object> changes, CancellationToken cancellationToken = default(CancellationToken))
        {
            if (changes == null)
            {
                throw new ArgumentNullException(nameof(changes));
            }

            var blobClient    = _storageAccount.CreateCloudBlobClient();
            var blobContainer = blobClient.GetContainerReference(_containerName);

            // this should only happen once - assuming this is a singleton
            if (Interlocked.CompareExchange(ref _checkforContainerExistance, 0, 1) == 1)
            {
                await blobContainer.CreateIfNotExistsAsync(cancellationToken).ConfigureAwait(false);
            }

            var blobRequestOptions = new BlobRequestOptions();
            var operationContext   = new OperationContext();

            foreach (var keyValuePair in changes)
            {
                var newValue  = keyValuePair.Value;
                var storeItem = newValue as IStoreItem;

                // "*" eTag in IStoreItem converts to null condition for AccessCondition
                var accessCondition = storeItem?.ETag != "*"
                    ? AccessCondition.GenerateIfMatchCondition(storeItem?.ETag)
                    : AccessCondition.GenerateEmptyCondition();

                var blobName      = GetBlobName(keyValuePair.Key);
                var blobReference = blobContainer.GetBlockBlobReference(blobName);

                try
                {
                    using (var memoryStream = new MultiBufferMemoryStream(blobReference.ServiceClient.BufferManager))
                        using (var streamWriter = new StreamWriter(memoryStream))
                        {
                            _jsonSerializer.Serialize(streamWriter, newValue);

                            await streamWriter.FlushAsync().ConfigureAwait(false);

                            memoryStream.Seek(0, SeekOrigin.Begin);
                            await blobReference.UploadFromStreamAsync(memoryStream, accessCondition, blobRequestOptions, operationContext, cancellationToken).ConfigureAwait(false);
                        }
                }
                catch (StorageException ex)
                    when(ex.RequestInformation.HttpStatusCode == (int)HttpStatusCode.BadRequest &&
                         ex.RequestInformation.ErrorCode == BlobErrorCodeStrings.InvalidBlockList)
                    {
                        throw new InvalidOperationException(
                                  $"An error ocurred while trying to write an object. The underlying '{BlobErrorCodeStrings.InvalidBlockList}' error is commonly caused due to concurrently uploading an object larger than 128MB in size.",
                                  ex);
                    }
            }
        }
        internal static StorageRequestMessage BuildRequestForTableOperation <T>(RESTCommand <T> cmd, Uri uri, UriQueryBuilder builder, int?timeout, TableOperation operation, CloudTableClient client, HttpContent content, OperationContext ctx, TablePayloadFormat payloadFormat, ICanonicalizer canonicalizer, StorageCredentials credentials)
        {
            StorageRequestMessage msg = BuildRequestCore(uri, builder, operation.HttpMethod, timeout, content, ctx, canonicalizer, credentials);

            // Set Accept and Content-Type based on the payload format.
            SetAcceptHeaderForHttpWebRequest(msg, payloadFormat);
            Logger.LogInformational(ctx, SR.PayloadFormat, payloadFormat);

            msg.Headers.Add(Constants.HeaderConstants.DataServiceVersion, Constants.HeaderConstants.DataServiceVersionValue);

            if (operation.OperationType == TableOperationType.InsertOrMerge || operation.OperationType == TableOperationType.Merge)
            {
                // post tunnelling
                msg.Headers.Add(Constants.HeaderConstants.PostTunnelling, "MERGE");
            }

            // etag
            if (operation.OperationType == TableOperationType.Delete ||
                operation.OperationType == TableOperationType.Replace ||
                operation.OperationType == TableOperationType.Merge)
            {
                msg.Headers.Add(Constants.HeaderConstants.IfMatch, operation.ETag);
            }

            // Prefer header
            if (operation.OperationType == TableOperationType.Insert)
            {
                msg.Headers.Add(Constants.HeaderConstants.Prefer, operation.EchoContent ? Constants.HeaderConstants.PreferReturnContent : Constants.HeaderConstants.PreferReturnNoContent);
            }

            if (operation.OperationType == TableOperationType.Insert ||
                operation.OperationType == TableOperationType.Merge ||
                operation.OperationType == TableOperationType.InsertOrMerge ||
                operation.OperationType == TableOperationType.InsertOrReplace ||
                operation.OperationType == TableOperationType.Replace)
            {
                MultiBufferMemoryStream ms = new MultiBufferMemoryStream(client.BufferManager);
                using (JsonTextWriter jsonWriter = new JsonTextWriter(new StreamWriter(new NonCloseableStream(ms))))
                {
                    WriteEntityContent(operation, ctx, jsonWriter);
                }

                ms.Seek(0, SeekOrigin.Begin);
                msg.Content = new StreamContent(ms);
                msg.Content.Headers.ContentLength = ms.Length;
                if (!operation.HttpMethod.Equals("HEAD") && !operation.HttpMethod.Equals("GET"))
                {
                    SetContentTypeForHttpWebRequest(msg);
                }
                return(msg);
            }

            return(msg);
        }
        /// <summary>
        /// Stores a new entity in the configured blob container.
        /// </summary>
        /// <param name="changes">The Dictionary of changes that are to be made.</param>
        /// <param name="cancellationToken">A cancellation token that can be used by other objects
        /// or threads to receive notice of cancellation.</param>
        /// <returns>A <see cref="Task"/>A task that represents the work queued to execute.</returns>
        public async Task WriteAsync(IDictionary <string, object> changes, CancellationToken cancellationToken = default(CancellationToken))
        {
            if (changes == null)
            {
                throw new ArgumentNullException(nameof(changes));
            }

            var blobContainer = await GetBlobContainer().ConfigureAwait(false);

            var blobRequestOptions = new BlobRequestOptions();
            var operationContext   = new OperationContext();

            await Task.WhenAll(
                changes.Select(async(keyValuePair) =>
            {
                var newValue  = keyValuePair.Value;
                var storeItem = newValue as IStoreItem;

                // "*" eTag in IStoreItem converts to null condition for AccessCondition
                var accessCondition = storeItem?.ETag == "*"
                        ? AccessCondition.GenerateEmptyCondition()
                        : AccessCondition.GenerateIfMatchCondition(storeItem?.ETag);

                var blobName      = GetBlobName(keyValuePair.Key);
                var blobReference = blobContainer.GetBlockBlobReference(blobName);

                try
                {
                    using (var memoryStream = new MultiBufferMemoryStream(blobReference.ServiceClient.BufferManager))
                        using (var streamWriter = new StreamWriter(memoryStream))
                        {
                            JsonSerializer.Serialize(streamWriter, newValue);
                            streamWriter.Flush();
                            memoryStream.Seek(0, SeekOrigin.Begin);
                            await blobReference.UploadFromStreamAsync(memoryStream, accessCondition, blobRequestOptions, operationContext).ConfigureAwait(false);
                        }
                }
                catch (StorageException ex)
                    when(ex.RequestInformation.HttpStatusCode == (int)HttpStatusCode.BadRequest &&
                         ex.RequestInformation.ErrorCode == BlobErrorCodeStrings.InvalidBlockList)
                    {
                        throw new Exception(
                            $"An error ocurred while trying to write an object. The underlying '{BlobErrorCodeStrings.InvalidBlockList}' error is commonly caused due to concurrently uploading an object larger than 128MB in size.",
                            ex);
                    }
            })).ConfigureAwait(false);
        }
Пример #10
0
        /// <summary>
        /// Asynchronously dispatches a write operation.
        /// </summary>
        /// <returns>A task that represents the asynchronous write operation.</returns>
        private async Task DispatchWriteAsync(TaskCompletionSource <bool> continuetcs, CancellationToken token)
        {
            if (this.internalBuffer.Length == 0)
            {
                if (continuetcs != null)
                {
                    Task.Run(() => continuetcs.TrySetResult(true));
                }
                return;
            }

            // bufferToUpload needs to be disposed, or we will leak memory
            //
            // Unfortunately, because of the async nature of the work, we cannot safely
            // put this in a using block, so the Write*Async methods must handle disposal.
            MultiBufferMemoryStream bufferToUpload = this.internalBuffer;

            this.internalBuffer = new MultiBufferMemoryStream(this.file.ServiceClient.BufferManager);
            bufferToUpload.Seek(0, SeekOrigin.Begin);

            Checksum bufferChecksum = Checksum.None;

            if (this.rangeChecksum != null)
            {
                bool computeCRC64 = false;
                bool computeMD5   = false;
                if (this.rangeChecksum.MD5 != null)
                {
                    bufferChecksum.MD5 = this.rangeChecksum.MD5.ComputeHash();
                    computeMD5         = true;
                }
                if (this.rangeChecksum.CRC64 != null)
                {
                    bufferChecksum.CRC64 = this.rangeChecksum.CRC64.ComputeHash();
                    computeCRC64         = true;
                }
                this.rangeChecksum.Dispose();
                this.rangeChecksum = new ChecksumWrapper(computeMD5, computeCRC64);
            }

            long offset = this.currentFileOffset;

            this.currentFileOffset += bufferToUpload.Length;
            await this.WriteRangeAsync(continuetcs, bufferToUpload, offset, bufferChecksum, token).ConfigureAwait(false);
        }
Пример #11
0
        /// <summary>
        /// Dispatches a write operation.
        /// </summary>
        /// <param name="asyncResult">The reference to the pending asynchronous request to finish.</param>
        private void DispatchWrite(StorageAsyncResult <NullType> asyncResult)
        {
            if (this.internalBuffer.Length == 0)
            {
                if (asyncResult != null)
                {
                    asyncResult.OnComplete(this.lastException);
                }

                return;
            }

            MultiBufferMemoryStream bufferToUpload = this.internalBuffer;

            this.internalBuffer = new MultiBufferMemoryStream(this.Blob.ServiceClient.BufferManager);
            bufferToUpload.Seek(0, SeekOrigin.Begin);

            string bufferMD5 = null;

            if (this.blockMD5 != null)
            {
                bufferMD5 = this.blockMD5.ComputeHash();
                this.blockMD5.Dispose();
                this.blockMD5 = new MD5Wrapper();
            }

            if (this.blockBlob != null)
            {
                string blockId = this.GetCurrentBlockId();
                this.blockList.Add(blockId);
                this.WriteBlock(bufferToUpload, blockId, bufferMD5, asyncResult);
            }
            else
            {
                if ((bufferToUpload.Length % Constants.PageSize) != 0)
                {
                    this.lastException = new IOException(SR.InvalidPageSize);
                    throw this.lastException;
                }

                long offset = this.currentPageOffset;
                this.currentPageOffset += bufferToUpload.Length;
                this.WritePages(bufferToUpload, offset, bufferMD5, asyncResult);
            }
        }
Пример #12
0
        private async Task DispatchWriteAsync(TaskCompletionSource <bool> continuetcs, CancellationToken token)
        {
            if (this.internalBuffer.Length == 0)
            {
                if (continuetcs != null)
                {
                    Task.Run(() => continuetcs.TrySetResult(true));
                }
                return;
            }

            // bufferToUpload needs to be disposed, or we will leak memory
            //
            // Unfortunately, because of the async nature of the work, we cannot safely
            // put this in a using block, so the Write*Async methods must handle disposal.
            MultiBufferMemoryStream bufferToUpload = this.internalBuffer;

            this.internalBuffer = new MultiBufferMemoryStream(this.Blob.ServiceClient.BufferManager);
            bufferToUpload.Seek(0, SeekOrigin.Begin);

            Checksum bufferChecksum = Checksum.None;

            if (this.blockChecksum != null)
            {
                bool computeCRC64 = false;
                bool computeMD5   = false;
                if (this.blockChecksum.MD5 != null)
                {
                    bufferChecksum.MD5 = this.blockChecksum.MD5.ComputeHash();
                    computeMD5         = true;
                }
                if (this.blockChecksum.CRC64 != null)
                {
                    bufferChecksum.CRC64 = this.blockChecksum.CRC64.ComputeHash();
                    computeCRC64         = true;
                }
                this.blockChecksum.Dispose();
                this.blockChecksum = new ChecksumWrapper(computeMD5, computeCRC64);
            }

            if (this.blockBlob != null)
            {
                string blockId = this.GetCurrentBlockId();
                this.blockList.Add(blockId);
                await this.WriteBlockAsync(continuetcs, bufferToUpload, blockId, bufferChecksum, token).ConfigureAwait(false);
            }
            else if (this.pageBlob != null)
            {
                if ((bufferToUpload.Length % Constants.PageSize) != 0)
                {
                    this.lastException = new IOException(SR.InvalidPageSize);
                    throw this.lastException;
                }

                long offset = this.currentBlobOffset;
                this.currentBlobOffset += bufferToUpload.Length;
                await this.WritePagesAsync(continuetcs, bufferToUpload, offset, bufferChecksum, token).ConfigureAwait(false);
            }
            else
            {
                long offset = this.currentBlobOffset;
                this.currentBlobOffset += bufferToUpload.Length;

                // We cannot differentiate between max size condition failing only in the retry versus failing in the first attempt and retry.
                // So we will eliminate the latter and handle the former in the append operation callback.
                if (this.accessCondition.IfMaxSizeLessThanOrEqual.HasValue && this.currentBlobOffset > this.accessCondition.IfMaxSizeLessThanOrEqual.Value)
                {
                    this.lastException = new IOException(SR.InvalidBlockSize);
                    throw this.lastException;
                }

                await this.WriteAppendBlockAsync(continuetcs, bufferToUpload, offset, bufferChecksum, token).ConfigureAwait(false);
            }
        }
        internal static StorageRequestMessage BuildRequestForTableBatchOperation <T>(RESTCommand <T> cmd, Uri uri, UriQueryBuilder builder, int?timeout, string tableName, TableBatchOperation batch, CloudTableClient client, HttpContent content, OperationContext ctx, TablePayloadFormat payloadFormat, ICanonicalizer canonicalizer, StorageCredentials credentials)
        {
            StorageRequestMessage msg = BuildRequestCore(NavigationHelper.AppendPathToSingleUri(uri, "$batch"), builder, HttpMethod.Post, timeout, content, ctx, canonicalizer, credentials);

            Logger.LogInformational(ctx, SR.PayloadFormat, payloadFormat);



            MultiBufferMemoryStream batchContentStream = new MultiBufferMemoryStream(client.BufferManager);

            string batchID     = Guid.NewGuid().ToString();
            string changesetID = Guid.NewGuid().ToString();

            using (StreamWriter contentWriter = new StreamWriter(new NonCloseableStream(batchContentStream)))
            {
                msg.Headers.Add(Constants.HeaderConstants.DataServiceVersion, Constants.HeaderConstants.DataServiceVersionValue);

                string batchSeparator     = Constants.BatchSeparator + batchID;
                string changesetSeparator = Constants.ChangesetSeparator + changesetID;
                string acceptHeader       = "Accept: ";

                switch (payloadFormat)
                {
                case TablePayloadFormat.Json:
                    acceptHeader = acceptHeader + Constants.JsonLightAcceptHeaderValue;
                    break;

                case TablePayloadFormat.JsonFullMetadata:
                    acceptHeader = acceptHeader + Constants.JsonFullMetadataAcceptHeaderValue;
                    break;

                case TablePayloadFormat.JsonNoMetadata:
                    acceptHeader = acceptHeader + Constants.JsonNoMetadataAcceptHeaderValue;
                    break;
                }

                contentWriter.WriteLine(batchSeparator);

                bool isQuery = batch.Count == 1 && batch[0].OperationType == TableOperationType.Retrieve;

                // Query operations should not be inside changeset in payload
                if (!isQuery)
                {
                    // Start Operation
                    contentWriter.WriteLine(Constants.ChangesetBoundaryMarker + changesetID);
                    contentWriter.WriteLine();
                }

                foreach (TableOperation operation in batch)
                {
                    string httpMethod = operation.OperationType == TableOperationType.Merge || operation.OperationType == TableOperationType.InsertOrMerge ? "MERGE" : operation.HttpMethod.Method;

                    if (!isQuery)
                    {
                        contentWriter.WriteLine(changesetSeparator);
                    }

                    contentWriter.WriteLine(Constants.ContentTypeApplicationHttp);
                    contentWriter.WriteLine(Constants.ContentTransferEncodingBinary);
                    contentWriter.WriteLine();

                    string tableURI = Uri.EscapeUriString(operation.GenerateRequestURI(uri, tableName).ToString());

                    // "EscapeUriString" is almost exactly what we need, except that it contains special logic for
                    // the percent sign, which results in an off-by-one error in the number of times "%" is encoded.
                    // This corrects for that.
                    tableURI = tableURI.Replace(@"%25", @"%");

                    contentWriter.WriteLine(httpMethod + " " + tableURI + " " + Constants.HTTP1_1);
                    contentWriter.WriteLine(acceptHeader);
                    contentWriter.WriteLine(Constants.ContentTypeApplicationJson);

                    if (operation.OperationType == TableOperationType.Insert)
                    {
                        contentWriter.WriteLine(Constants.HeaderConstants.Prefer + @": " + (operation.EchoContent ? Constants.HeaderConstants.PreferReturnContent : Constants.HeaderConstants.PreferReturnNoContent));
                    }

                    contentWriter.WriteLine(Constants.HeaderConstants.DataServiceVersion + ": " + Constants.HeaderConstants.DataServiceVersionValue);

                    // etag
                    if (operation.OperationType == TableOperationType.Delete ||
                        operation.OperationType == TableOperationType.Replace ||
                        operation.OperationType == TableOperationType.Merge)
                    {
                        contentWriter.WriteLine(Constants.HeaderConstants.IfMatch + @": " + operation.ETag);
                    }

                    contentWriter.WriteLine();

                    if (operation.OperationType != TableOperationType.Delete && operation.OperationType != TableOperationType.Retrieve)
                    {
                        using (JsonTextWriter jsonWriter = new JsonTextWriter(contentWriter))
                        {
                            jsonWriter.CloseOutput = false;
                            WriteEntityContent(operation, ctx, jsonWriter);
                        }
                        contentWriter.WriteLine();
                    }
                }

                if (!isQuery)
                {
                    contentWriter.WriteLine(changesetSeparator + "--");
                }

                contentWriter.WriteLine(batchSeparator + "--");
            }

            batchContentStream.Seek(0, SeekOrigin.Begin);
            msg.Content = new StreamContent(batchContentStream);
            msg.Content.Headers.ContentLength = batchContentStream.Length;
            msg.Content.Headers.ContentType   = System.Net.Http.Headers.MediaTypeHeaderValue.Parse(Constants.BatchBoundaryMarker + batchID);//new System.Net.Http.Headers.MediaTypeHeaderValue(Constants.BatchBoundaryMarker + batchID);

            return(msg);
        }
        internal static Tuple <HttpWebRequest, Stream> BuildRequestForTableOperation(Uri uri, UriQueryBuilder builder, IBufferManager bufferManager, int?timeout, TableOperation operation, bool useVersionHeader, OperationContext ctx, TableRequestOptions options)
        {
            HttpWebRequest msg = BuildRequestCore(uri, builder, operation.HttpMethod, timeout, useVersionHeader, ctx);

            TablePayloadFormat payloadFormat = options.PayloadFormat.Value;

            // Set Accept and Content-Type based on the payload format.
            SetAcceptHeaderForHttpWebRequest(msg, payloadFormat);
            Logger.LogInformational(ctx, SR.PayloadFormat, payloadFormat);

            msg.Headers.Add(Constants.HeaderConstants.DataServiceVersion, Constants.HeaderConstants.DataServiceVersionValue);
            if (operation.HttpMethod != "HEAD" && operation.HttpMethod != "GET")
            {
                msg.ContentType = Constants.JsonContentTypeHeaderValue;
            }

            if (operation.OperationType == TableOperationType.InsertOrMerge || operation.OperationType == TableOperationType.Merge)
            {
                // Client-side encryption is not supported on merge requests.
                // This is because we maintain the list of encrypted properties as a property on the entity, and we can't update this
                // properly for merge operations.
                options.AssertNoEncryptionPolicyOrStrictMode();

                // post tunnelling
                msg.Headers.Add(Constants.HeaderConstants.PostTunnelling, "MERGE");
            }

            if (operation.OperationType == TableOperationType.RotateEncryptionKey)
            {
                // post tunnelling
                msg.Headers.Add(Constants.HeaderConstants.PostTunnelling, "MERGE");
            }

            // etag
            if (operation.OperationType == TableOperationType.Delete ||
                operation.OperationType == TableOperationType.Replace ||
                operation.OperationType == TableOperationType.Merge ||
                operation.OperationType == TableOperationType.RotateEncryptionKey)
            {
                if (operation.ETag != null)
                {
                    msg.Headers.Add(Constants.HeaderConstants.IfMatch, operation.ETag);
                }
            }

            // Prefer header
            if (operation.OperationType == TableOperationType.Insert)
            {
                msg.Headers.Add(Constants.HeaderConstants.Prefer, operation.EchoContent ? Constants.HeaderConstants.PreferReturnContent : Constants.HeaderConstants.PreferReturnNoContent);
            }

            if (operation.OperationType == TableOperationType.Insert ||
                operation.OperationType == TableOperationType.Merge ||
                operation.OperationType == TableOperationType.InsertOrMerge ||
                operation.OperationType == TableOperationType.InsertOrReplace ||
                operation.OperationType == TableOperationType.Replace ||
                operation.OperationType == TableOperationType.RotateEncryptionKey)
            {
                MultiBufferMemoryStream ms = new MultiBufferMemoryStream(bufferManager);
                using (JsonTextWriter jsonWriter = new JsonTextWriter(new StreamWriter(new NonCloseableStream(ms))))
                {
                    WriteEntityContent(operation, ctx, options, jsonWriter);
                }

                ms.Seek(0, SeekOrigin.Begin);
                msg.ContentLength = ms.Length;
                return(new Tuple <HttpWebRequest, Stream>(msg, ms));
            }

            return(new Tuple <HttpWebRequest, Stream>(msg, null));
        }
        internal static Tuple <HttpWebRequest, Stream> BuildRequestForTableBatchOperation(Uri uri, UriQueryBuilder builder, IBufferManager bufferManager, int?timeout, string tableName, TableBatchOperation batch, bool useVersionHeader, OperationContext ctx, TableRequestOptions options)
        {
            HttpWebRequest     msg           = BuildRequestCore(NavigationHelper.AppendPathToSingleUri(uri, "$batch"), builder, "POST", timeout, useVersionHeader, ctx);
            TablePayloadFormat payloadFormat = options.PayloadFormat.Value;

            Logger.LogInformational(ctx, SR.PayloadFormat, payloadFormat);

            MultiBufferMemoryStream batchContentStream = new MultiBufferMemoryStream(bufferManager);

            using (StreamWriter contentWriter = new StreamWriter(new NonCloseableStream(batchContentStream)))
            {
                string batchID     = Guid.NewGuid().ToString();
                string changesetID = Guid.NewGuid().ToString();

                msg.Headers.Add(Constants.HeaderConstants.DataServiceVersion, Constants.HeaderConstants.DataServiceVersionValue);
                msg.ContentType = Constants.BatchBoundaryMarker + batchID;

                string batchSeparator     = Constants.BatchSeparator + batchID;
                string changesetSeparator = Constants.ChangesetSeparator + changesetID;
                string acceptHeader       = "Accept: ";

                switch (payloadFormat)
                {
                case TablePayloadFormat.Json:
                    acceptHeader = acceptHeader + Constants.JsonLightAcceptHeaderValue;
                    break;

                case TablePayloadFormat.JsonFullMetadata:
                    acceptHeader = acceptHeader + Constants.JsonFullMetadataAcceptHeaderValue;
                    break;

                case TablePayloadFormat.JsonNoMetadata:
                    acceptHeader = acceptHeader + Constants.JsonNoMetadataAcceptHeaderValue;
                    break;
                }

                contentWriter.WriteLine(batchSeparator);

                bool isQuery = batch.Count == 1 && batch[0].OperationType == TableOperationType.Retrieve;

                // Query operations should not be inside changeset in payload
                if (!isQuery)
                {
                    // Start Operation
                    contentWriter.WriteLine(Constants.ChangesetBoundaryMarker + changesetID);
                    contentWriter.WriteLine();
                }


                foreach (TableOperation operation in batch)
                {
                    string httpMethod = operation.HttpMethod;
                    if (operation.OperationType == TableOperationType.Merge || operation.OperationType == TableOperationType.InsertOrMerge)
                    {
                        options.AssertNoEncryptionPolicyOrStrictMode();
                        httpMethod = "MERGE";
                    }

                    if (operation.OperationType == TableOperationType.RotateEncryptionKey)
                    {
                        httpMethod = "MERGE";
                    }

                    if (!isQuery)
                    {
                        contentWriter.WriteLine(changesetSeparator);
                    }

                    contentWriter.WriteLine(Constants.ContentTypeApplicationHttp);
                    contentWriter.WriteLine(Constants.ContentTransferEncodingBinary);
                    contentWriter.WriteLine();

                    string tableURI = Uri.EscapeUriString(operation.GenerateRequestURI(uri, tableName).ToString());

                    // "EscapeUriString" is almost exactly what we need, except that it contains special logic for
                    // the percent sign, which results in an off-by-one error in the number of times "%" is encoded.
                    // This corrects for that.
                    tableURI = tableURI.Replace(@"%25", @"%");

                    contentWriter.WriteLine(httpMethod + " " + tableURI + " " + Constants.HTTP1_1);
                    contentWriter.WriteLine(acceptHeader);
                    contentWriter.WriteLine(Constants.ContentTypeApplicationJson);

                    if (operation.OperationType == TableOperationType.Insert)
                    {
                        contentWriter.WriteLine(Constants.HeaderConstants.Prefer + @": " + (operation.EchoContent ? Constants.HeaderConstants.PreferReturnContent : Constants.HeaderConstants.PreferReturnNoContent));
                    }

                    contentWriter.WriteLine(Constants.HeaderConstants.DataServiceVersion + ": " + Constants.HeaderConstants.DataServiceVersionValue);

                    // etag
                    if (operation.OperationType == TableOperationType.Delete ||
                        operation.OperationType == TableOperationType.Replace ||
                        operation.OperationType == TableOperationType.Merge ||
                        operation.OperationType == TableOperationType.RotateEncryptionKey)
                    {
                        contentWriter.WriteLine(Constants.HeaderConstants.IfMatch + @": " + operation.ETag);
                    }

                    contentWriter.WriteLine();

                    if (operation.OperationType != TableOperationType.Delete && operation.OperationType != TableOperationType.Retrieve)
                    {
                        using (JsonTextWriter jsonWriter = new JsonTextWriter(contentWriter))
                        {
                            jsonWriter.CloseOutput = false;
                            WriteEntityContent(operation, ctx, options, jsonWriter);
                        }
                        contentWriter.WriteLine();
                    }
                }

                if (!isQuery)
                {
                    contentWriter.WriteLine(changesetSeparator + "--");
                }

                contentWriter.WriteLine(batchSeparator + "--");
            }

            batchContentStream.Seek(0, SeekOrigin.Begin);
            msg.ContentLength = batchContentStream.Length;

            return(new Tuple <HttpWebRequest, Stream>(msg, batchContentStream));
        }