/// <summary>
        /// Trains a model from a collection of custom forms in a blob storage container.
        /// </summary>
        /// <param name="trainingFilesUri">An externally accessible Azure storage blob container Uri.
        /// For more information see <a href="https://docs.microsoft.com/azure/cognitive-services/form-recognizer/build-training-data-set#upload-your-training-data"/>.</param>
        /// <param name="useTrainingLabels">If <c>true</c>, use a label file created in the &lt;link-to-label-tool-doc&gt; to provide training-time labels for training a model. If <c>false</c>, the model will be trained from forms only.</param>
        /// <param name="trainingOptions">A set of options available for configuring the training request.</param>
        /// <param name="cancellationToken">A <see cref="CancellationToken"/> controlling the request lifetime.</param>
        /// <returns>
        /// <para>A <see cref="TrainingOperation"/> to wait on this long-running operation. Its <see cref="TrainingOperation.Value"/> upon successful
        /// completion will contain meta-data about the trained model.</para>
        /// <para>Even if training fails, a model is created in the Form Recognizer account with an "invalid" status.
        /// A <see cref="RequestFailedException"/> will be raised containing the modelId to access this invalid model.</para>
        /// </returns>
        public virtual TrainingOperation StartTraining(Uri trainingFilesUri, bool useTrainingLabels, TrainingOptions trainingOptions = default, CancellationToken cancellationToken = default)
        {
            Argument.AssertNotNull(trainingFilesUri, nameof(trainingFilesUri));
            trainingOptions ??= new TrainingOptions();

            using DiagnosticScope scope = Diagnostics.CreateScope($"{nameof(FormTrainingClient)}.{nameof(StartTraining)}");
            scope.Start();

            try
            {
                var trainRequest = new TrainRequest(trainingFilesUri.AbsoluteUri)
                {
                    SourceFilter = trainingOptions.TrainingFileFilter, UseLabelFile = useTrainingLabels
                };

                ResponseWithHeaders <ServiceTrainCustomModelAsyncHeaders> response = ServiceClient.TrainCustomModelAsync(trainRequest);
                return(new TrainingOperation(response.Headers.Location, ServiceClient, Diagnostics));
            }
            catch (Exception e)
            {
                scope.Failed(e);
                throw;
            }
        }
예제 #2
0
        /// <summary>
        /// Retrieves a <see cref="CryptographyClient"/> capable of performing cryptographic operations with the key represented by the specfiied <paramref name="keyId"/>.
        /// </summary>
        /// <param name="keyId">The key idenitifier of the key used by the created <see cref="CryptographyClient"/>.</param>
        /// <param name="cancellationToken">A <see cref="CancellationToken"/> controlling the request lifetime.</param>
        /// <returns>A new <see cref="CryptographyClient"/> capable of performing cryptographic operations with the key represented by the specfiied <paramref name="keyId"/>.</returns>
        /// <exception cref="ArgumentNullException"><paramref name="keyId"/> is null.</exception>
        public virtual CryptographyClient Resolve(Uri keyId, CancellationToken cancellationToken = default)
        {
            Argument.AssertNotNull(keyId, nameof(keyId));

            using DiagnosticScope scope = _clientDiagnostics.CreateScope("Azure.Security.KeyVault.Keys.Cryptography.KeyResolver.Resolve");
            scope.AddAttribute("key", keyId);
            scope.Start();

            try
            {
                Argument.AssertNotNull(keyId, nameof(keyId));

                KeyVaultKey key = GetKey(keyId, cancellationToken);

                KeyVaultPipeline pipeline = new KeyVaultPipeline(keyId, _apiVersion, _pipeline, _clientDiagnostics);

                return((key != null) ? new CryptographyClient(key, pipeline) : new CryptographyClient(keyId, pipeline));
            }
            catch (Exception e)
            {
                scope.Failed(e);
                throw;
            }
        }
 /// <summary>
 /// Gets a list of tables from the storage account.
 /// </summary>
 /// <param name="filter">Returns only tables that satisfy the specified filter.</param>
 /// <param name="maxPerPage">The maximum number of tables that will be returned per page.</param>
 /// <param name="cancellationToken">A <see cref="CancellationToken"/> controlling the request lifetime.</param>
 /// <returns></returns>
 public virtual AsyncPageable <TableItem> GetTablesAsync(string filter = null, int?maxPerPage = null, CancellationToken cancellationToken = default)
 {
     using DiagnosticScope scope = _diagnostics.CreateScope($"{nameof(TableServiceClient)}.{nameof(GetTables)}");
     scope.Start();
     try
     {
         return(PageableHelpers.CreateAsyncEnumerable(async _ =>
         {
             var response = await _tableOperations.QueryAsync(
                 null,
                 null,
                 new QueryOptions()
             {
                 Filter = filter, Select = null, Top = maxPerPage, Format = _format
             },
                 cancellationToken).ConfigureAwait(false);
             return Page.FromValues(response.Value.Value, response.Headers.XMsContinuationNextTableName, response.GetRawResponse());
         }, async(nextLink, _) =>
         {
             var response = await _tableOperations.QueryAsync(
                 null,
                 nextTableName: nextLink,
                 new QueryOptions()
             {
                 Filter = filter, Select = null, Top = maxPerPage, Format = _format
             },
                 cancellationToken).ConfigureAwait(false);
             return Page.FromValues(response.Value.Value, response.Headers.XMsContinuationNextTableName, response.GetRawResponse());
         }));
     }
     catch (Exception ex)
     {
         scope.Failed(ex);
         throw;
     }
 }
        /// <summary>
        /// Retrieves a <see cref="CryptographyClient"/> capable of performing cryptographic operations with the key represented by the specfiied <paramref name="keyId"/>.
        /// </summary>
        /// <param name="keyId">The key idenitifier of the key used by the created <see cref="CryptographyClient"/>.</param>
        /// <param name="cancellationToken">A <see cref="CancellationToken"/> controlling the request lifetime.</param>
        /// <returns>A new <see cref="CryptographyClient"/> capable of performing cryptographic operations with the key represented by the specfiied <paramref name="keyId"/>.</returns>
        /// <exception cref="ArgumentNullException"><paramref name="keyId"/> is null.</exception>
        public virtual async Task <CryptographyClient> ResolveAsync(Uri keyId, CancellationToken cancellationToken = default)
        {
            Argument.AssertNotNull(keyId, nameof(keyId));

            using DiagnosticScope scope = _clientDiagnostics.CreateScope($"{nameof(KeyResolver)}.{nameof(Resolve)}");
            scope.AddAttribute("key", keyId);
            scope.Start();

            try
            {
                Argument.AssertNotNull(keyId, nameof(keyId));

                KeyVaultKey key = await GetKeyAsync(keyId, cancellationToken).ConfigureAwait(false);

                KeyVaultPipeline pipeline = new KeyVaultPipeline(keyId, _apiVersion, _pipeline, _clientDiagnostics);

                return((key != null) ? new CryptographyClient(key, pipeline) : new CryptographyClient(keyId, pipeline));
            }
            catch (Exception e)
            {
                scope.Failed(e);
                throw;
            }
        }
        public virtual Response <WrapResult> WrapKey(KeyWrapAlgorithm algorithm, byte[] key, CancellationToken cancellationToken = default)
        {
            var parameters = new KeyWrapParameters()
            {
                Algorithm = algorithm.ToString(),
                Key       = key
            };

            using DiagnosticScope scope = Pipeline.CreateScope($"{nameof(RemoteCryptographyClient)}.{nameof(WrapKey)}");
            scope.AddAttribute("key", _keyId);
            scope.Start();

            try
            {
                return(Pipeline.SendRequest(RequestMethod.Post, parameters, () => new WrapResult {
                    Algorithm = algorithm
                }, cancellationToken, "/wrapKey"));
            }
            catch (Exception e)
            {
                scope.Failed(e);
                throw;
            }
        }
        /// <summary>
        /// Creates a composed model from a collection of existing models.
        /// A composed model allows multiple models to be called with a single model ID. When a document is
        /// submitted to be analyzed with a composed model ID, a classification step is first performed to
        /// route it to the correct custom model.
        /// </summary>
        /// <param name="modelIds">List of model ids to use in the composed model.</param>
        /// <param name="modelId">A unique ID for your composed model. If not specified, a model ID will be created for you.</param>
        /// <param name="modelDescription">An optional description to add to the model.</param>
        /// <param name="cancellationToken">A <see cref="CancellationToken"/> controlling the request lifetime.</param>
        /// <returns>
        /// <para>A <see cref="BuildModelOperation"/> to wait on this long-running operation. Its Value upon successful
        /// completion will contain meta-data about the composed model.</para>
        /// </returns>
        public virtual BuildModelOperation StartCreateComposedModel(IEnumerable <string> modelIds, string modelId = default, string modelDescription = default, CancellationToken cancellationToken = default)
        {
            Argument.AssertNotNull(modelIds, nameof(modelIds));

            using DiagnosticScope scope = Diagnostics.CreateScope($"{nameof(DocumentModelAdministrationClient)}.{nameof(StartCreateComposedModel)}");
            scope.Start();

            try
            {
                modelId ??= Guid.NewGuid().ToString();
                var composeRequest = new ComposeDocumentModelRequest(modelId, ConvertToComponentModelInfo(modelIds))
                {
                    Description = modelDescription
                };

                var response = ServiceClient.ComposeDocumentModel(composeRequest, cancellationToken);
                return(new BuildModelOperation(response.Headers.OperationLocation, response.GetRawResponse(), ServiceClient, Diagnostics));
            }
            catch (Exception e)
            {
                scope.Failed(e);
                throw;
            }
        }
        public virtual Response <UnwrapResult> UnwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, CancellationToken cancellationToken = default)
        {
            var parameters = new KeyWrapParameters()
            {
                Algorithm = algorithm.ToString(),
                Key       = encryptedKey
            };

            using DiagnosticScope scope = Pipeline.CreateScope("Azure.Security.KeyVault.Keys.Cryptography.RemoteCryptographyClient.UnwrapKey");
            scope.AddAttribute("key", _keyId);
            scope.Start();

            try
            {
                return(Pipeline.SendRequest(RequestMethod.Post, parameters, () => new UnwrapResult {
                    Algorithm = algorithm
                }, cancellationToken, "/unwrapKey"));
            }
            catch (Exception e)
            {
                scope.Failed(e);
                throw;
            }
        }
        public virtual Response <SignResult> Sign(SignatureAlgorithm algorithm, byte[] digest, CancellationToken cancellationToken = default)
        {
            var parameters = new KeySignParameters
            {
                Algorithm = algorithm.ToString(),
                Digest    = digest
            };

            using DiagnosticScope scope = Pipeline.CreateScope("Azure.Security.KeyVault.Keys.Cryptography.RemoteCryptographyClient.Sign");
            scope.AddAttribute("key", _keyId);
            scope.Start();

            try
            {
                return(Pipeline.SendRequest(RequestMethod.Post, parameters, () => new SignResult {
                    Algorithm = algorithm
                }, cancellationToken, "/sign"));
            }
            catch (Exception e)
            {
                scope.Failed(e);
                throw;
            }
        }
        /// <summary>
        /// Recognizes layout elements from one or more passed-in forms.
        /// </summary>
        /// <param name="form">The stream containing one or more forms to recognize elements from.</param>
        /// <param name="recognizeContentOptions">A set of options available for configuring the recognize request.</param>
        /// <param name="cancellationToken">A <see cref="CancellationToken"/> controlling the request lifetime.</param>
        /// <returns>A <see cref="RecognizeContentOperation"/> to wait on this long-running operation.  Its <see cref="RecognizeContentOperation.Value"/> upon successful
        /// completion will contain layout elements extracted from the form.</returns>
        public virtual RecognizeContentOperation StartRecognizeContent(Stream form, RecognizeContentOptions recognizeContentOptions = default, CancellationToken cancellationToken = default)
        {
            Argument.AssertNotNull(form, nameof(form));

            recognizeContentOptions ??= new RecognizeContentOptions();

            using DiagnosticScope scope = Diagnostics.CreateScope($"{nameof(FormRecognizerClient)}.{nameof(StartRecognizeContent)}");
            scope.Start();

            try
            {
                FormContentType contentType = recognizeContentOptions.ContentType ?? DetectContentType(form, nameof(form));

                Response response = ServiceClient.AnalyzeLayoutAsync(contentType, form, cancellationToken);
                string   location = ClientCommon.GetResponseHeader(response.Headers, Constants.OperationLocationHeader);

                return(new RecognizeContentOperation(ServiceClient, Diagnostics, location));
            }
            catch (Exception e)
            {
                scope.Failed(e);
                throw;
            }
        }
        /// <summary>
        /// Generate authorization for copying a custom model into the target Form Recognizer resource.
        /// </summary>
        /// <param name="modelId">A unique ID for your copied model. If not specified, a model ID will be created for you.</param>
        /// <param name="modelDescription">An optional description to add to the model.</param>
        /// <param name="cancellationToken">A <see cref="CancellationToken"/> controlling the request lifetime.</param>
        /// <returns>A <see cref="Response{T}"/> representing the result of the operation. It can be cast to <see cref="CopyAuthorization"/> containing
        /// the authorization information necessary to copy a custom model into a target Form Recognizer resource.</returns>
        public virtual async Task <Response <CopyAuthorization> > GetCopyAuthorizationAsync(string modelId = default, string modelDescription = default, CancellationToken cancellationToken = default)
        {
            modelId ??= Guid.NewGuid().ToString();

            var request = new AuthorizeCopyRequest(modelId)
            {
                Description = modelDescription
            };

            using DiagnosticScope scope = Diagnostics.CreateScope($"{nameof(DocumentModelAdministrationClient)}.{nameof(GetCopyAuthorization)}");
            scope.Start();

            try
            {
                var response = await ServiceClient.AuthorizeCopyDocumentModelAsync(request, cancellationToken).ConfigureAwait(false);

                return(Response.FromValue(response.Value, response.GetRawResponse()));
            }
            catch (Exception e)
            {
                scope.Failed(e);
                throw;
            }
        }
예제 #11
0
        private static async Task UpdateClientsideKeyEncryptionKeyInternal(
            BlobClient client,
            ClientSideEncryptionOptions encryptionOptionsOverride,
            BlobRequestConditions conditions,
            bool async,
            CancellationToken cancellationToken)
        {
            // argument validation
            Argument.AssertNotNull(client, nameof(client));
            ClientSideEncryptionOptions operationEncryptionOptions = encryptionOptionsOverride
                                                                     ?? client.ClientSideEncryption
                                                                     ?? throw new ArgumentException($"{nameof(ClientSideEncryptionOptions)} are not configured on this client and none were provided for the operation.");

            Argument.AssertNotNull(operationEncryptionOptions.KeyEncryptionKey, nameof(ClientSideEncryptionOptions.KeyEncryptionKey));
            Argument.AssertNotNull(operationEncryptionOptions.KeyResolver, nameof(ClientSideEncryptionOptions.KeyResolver));
            Argument.AssertNotNull(operationEncryptionOptions.KeyWrapAlgorithm, nameof(ClientSideEncryptionOptions.KeyWrapAlgorithm));

            using (client.ClientConfiguration.Pipeline.BeginLoggingScope(nameof(BlobClient)))
            {
                client.ClientConfiguration.Pipeline.LogMethodEnter(
                    nameof(BlobBaseClient),
                    message:
                    $"{nameof(Uri)}: {client.Uri}\n" +
                    $"{nameof(conditions)}: {conditions}");

                DiagnosticScope scope = client.ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(BlobClient)}.{nameof(UpdateClientSideKeyEncryptionKey)}");

                try
                {
                    // hold onto etag, metadata, encryptiondata
                    BlobProperties getPropertiesResponse = await client.GetPropertiesInternal(conditions, async, cancellationToken).ConfigureAwait(false);

                    ETag etag = getPropertiesResponse.ETag;
                    IDictionary <string, string> metadata = getPropertiesResponse.Metadata;
                    EncryptionData encryptionData         = BlobClientSideDecryptor.GetAndValidateEncryptionDataOrDefault(metadata)
                                                            ?? throw new InvalidOperationException("Resource has no client-side encryption key to rotate.");

                    // rotate keywrapping
                    byte[] newWrappedKey = await WrapKeyInternal(
                        await UnwrapKeyInternal(
                            encryptionData,
                            operationEncryptionOptions.KeyResolver,
                            async,
                            cancellationToken).ConfigureAwait(false),
                        operationEncryptionOptions.KeyWrapAlgorithm,
                        operationEncryptionOptions.KeyEncryptionKey,
                        async,
                        cancellationToken).ConfigureAwait(false);

                    // set new wrapped key info and reinsert into metadata
                    encryptionData.WrappedContentKey = new KeyEnvelope
                    {
                        EncryptedKey = newWrappedKey,
                        Algorithm    = operationEncryptionOptions.KeyWrapAlgorithm,
                        KeyId        = operationEncryptionOptions.KeyEncryptionKey.KeyId
                    };
                    metadata[Constants.ClientSideEncryption.EncryptionDataKey] = EncryptionDataSerializer.Serialize(encryptionData);

                    // update blob ONLY IF ETAG MATCHES (do not take chances encryption info is now out of sync)
                    BlobRequestConditions modifiedRequestConditions = BlobRequestConditions.CloneOrDefault(conditions) ?? new BlobRequestConditions();
                    modifiedRequestConditions.IfMatch = etag;
                    await client.SetMetadataInternal(metadata, modifiedRequestConditions, async, cancellationToken).ConfigureAwait(false);
                }
                catch (Exception ex)
                {
                    client.ClientConfiguration.Pipeline.LogException(ex);
                    scope.Failed(ex);
                    throw;
                }
                finally
                {
                    client.ClientConfiguration.Pipeline.LogMethodExit(nameof(BlobBaseClient));
                    scope.Dispose();
                }
            }
        }
예제 #12
0
        /// <summary>
        /// The <see cref="BreakInternal"/> operation breaks the files's
        /// previously-acquired lease (if it exists).
        ///
        /// Once a lease is broken, it cannot be renewed.  Any authorized
        /// request can break the lease; the request is not required to
        /// specify a matching lease ID.
        ///
        /// A lease that has been broken can also be released.  A client can
        /// immediately acquire a file lease that has been
        /// released.
        ///
        /// </summary>
        /// <param name="async">
        /// Whether to invoke the operation asynchronously.
        /// </param>
        /// <param name="cancellationToken">
        /// Optional <see cref="CancellationToken"/> to propagate
        /// notifications that the operation should be cancelled.
        /// </param>
        /// <returns>
        /// A <see cref="Response{FileLease}"/> describing the broken lease.
        /// </returns>
        /// <remarks>
        /// A <see cref="RequestFailedException"/> will be thrown if
        /// a failure occurs.
        /// </remarks>
        private async Task <Response <ShareFileLease> > BreakInternal(
            bool async,
            CancellationToken cancellationToken)
        {
            EnsureClient();
            using (ClientConfiguration.Pipeline.BeginLoggingScope(nameof(ShareLeaseClient)))
            {
                ClientConfiguration.Pipeline.LogMethodEnter(
                    nameof(ShareLeaseClient),
                    message:
                    $"{nameof(Uri)}: {Uri}\n" +
                    $"{nameof(LeaseId)}: {LeaseId}");

                DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(ShareLeaseClient)}.{nameof(Break)}");

                try
                {
                    scope.Start();

                    if (FileClient != null)
                    {
                        ResponseWithHeaders <FileBreakLeaseHeaders> response;

                        if (async)
                        {
                            response = await FileClient.FileRestClient.BreakLeaseAsync(
                                leaseAccessConditions : null,
                                cancellationToken : cancellationToken)
                                       .ConfigureAwait(false);
                        }
                        else
                        {
                            response = FileClient.FileRestClient.BreakLease(
                                leaseAccessConditions: null,
                                cancellationToken: cancellationToken);
                        }

                        return(Response.FromValue(
                                   response.ToShareFileLease(),
                                   response.GetRawResponse()));
                    }
                    else
                    {
                        ResponseWithHeaders <ShareBreakLeaseHeaders> response;

                        if (async)
                        {
                            response = await ShareClient.ShareRestClient.BreakLeaseAsync(
                                breakPeriod : null,
                                leaseAccessConditions : null,
                                cancellationToken : cancellationToken)
                                       .ConfigureAwait(false);
                        }
                        else
                        {
                            response = ShareClient.ShareRestClient.BreakLease(
                                breakPeriod: null,
                                leaseAccessConditions: null,
                                cancellationToken: cancellationToken);
                        }

                        return(Response.FromValue(
                                   response.ToShareFileLease(),
                                   response.GetRawResponse()));
                    }
                }
                catch (Exception ex)
                {
                    ClientConfiguration.Pipeline.LogException(ex);
                    scope.Failed(ex);
                    throw;
                }
                finally
                {
                    ClientConfiguration.Pipeline.LogMethodExit(nameof(ShareLeaseClient));
                    scope.Dispose();
                }
            }
        }
예제 #13
0
        /// <summary>
        /// Submit a <see cref="BlobBatch"/> of sub-operations.
        /// </summary>
        /// <param name="batch">
        /// A <see cref="BlobBatch"/> of sub-operations.
        /// </param>
        /// <param name="throwOnAnyFailure">
        /// A value indicating whether or not to throw exceptions for
        /// sub-operation failures.
        /// </param>
        /// <param name="async">
        /// Whether to invoke the operation asynchronously.
        /// </param>
        /// <param name="cancellationToken">
        /// Optional <see cref="CancellationToken"/> to propagate notifications
        /// that the operation should be cancelled.
        /// </param>
        /// <returns>
        /// A <see cref="Response"/> on successfully submitting.
        /// </returns>
        /// <remarks>
        /// A <see cref="RequestFailedException"/> will be thrown if
        /// a failure to submit the batch occurs.  Individual sub-operation
        /// failures will only throw if <paramref name="throwOnAnyFailure"/> is
        /// true and be wrapped in an <see cref="AggregateException"/>.
        /// </remarks>
        private async Task <Response> SubmitBatchInternal(
            BlobBatch batch,
            bool throwOnAnyFailure,
            bool async,
            CancellationToken cancellationToken)
        {
            DiagnosticScope scope = ClientDiagnostics.CreateScope($"{nameof(BlobBatchClient)}.{nameof(SubmitBatch)}");

            try
            {
                scope.Start();

                batch = batch ?? throw new ArgumentNullException(nameof(batch));
                if (batch.Submitted)
                {
                    throw BatchErrors.CannotResubmitBatch(nameof(batch));
                }
                else if (!batch.IsAssociatedClient(this))
                {
                    throw BatchErrors.BatchClientDoesNotMatch(nameof(batch));
                }

                // Get the sub-operation messages to submit
                IList <HttpMessage> messages = batch.GetMessagesToSubmit();
                if (messages.Count == 0)
                {
                    throw BatchErrors.CannotSubmitEmptyBatch(nameof(batch));
                }
                // TODO: Consider validating the upper limit of 256 messages

                // Merge the sub-operations into a single multipart/mixed Stream
                (Stream content, string contentType) =
                    await MergeOperationRequests(
                        messages,
                        async,
                        cancellationToken)
                    .ConfigureAwait(false);

                if (IsContainerScoped)
                {
                    ResponseWithHeaders <Stream, ContainerSubmitBatchHeaders> response;

                    if (async)
                    {
                        response = await _containerRestClient.SubmitBatchAsync(
                            containerName : ContainerName,
                            contentLength : content.Length,
                            multipartContentType : contentType,
                            body : content,
                            cancellationToken : cancellationToken)
                                   .ConfigureAwait(false);
                    }
                    else
                    {
                        response = _containerRestClient.SubmitBatch(
                            containerName: ContainerName,
                            contentLength: content.Length,
                            multipartContentType: contentType,
                            body: content,
                            cancellationToken: cancellationToken);
                    }

                    await UpdateOperationResponses(
                        messages,
                        response.GetRawResponse(),
                        response.Value,
                        response.Headers.ContentType,
                        throwOnAnyFailure,
                        async,
                        cancellationToken)
                    .ConfigureAwait(false);

                    return(response.GetRawResponse());
                }
                else
                {
                    ResponseWithHeaders <Stream, ServiceSubmitBatchHeaders> response;

                    if (async)
                    {
                        response = await _serviceRestClient.SubmitBatchAsync(
                            contentLength : content.Length,
                            multipartContentType : contentType,
                            body : content,
                            cancellationToken : cancellationToken)
                                   .ConfigureAwait(false);
                    }
                    else
                    {
                        response = _serviceRestClient.SubmitBatch(
                            contentLength: content.Length,
                            multipartContentType: contentType,
                            body: content,
                            cancellationToken: cancellationToken);
                    }

                    await UpdateOperationResponses(
                        messages,
                        response.GetRawResponse(),
                        response.Value,
                        response.Headers.ContentType,
                        throwOnAnyFailure,
                        async,
                        cancellationToken)
                    .ConfigureAwait(false);

                    return(response.GetRawResponse());
                }
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
        /// <summary>
        /// Returns a single segment of containers starting from the specified marker.
        ///
        /// For more information, see
        /// <see href="https://docs.microsoft.com/rest/api/storageservices/list-queues1">
        /// List Queues</see>.
        /// </summary>
        /// <param name="marker">
        /// Marker from the previous request.
        /// </param>
        /// <param name="traits">
        /// Optional trait options for shaping the queues.
        /// </param>
        /// <param name="prefix">
        /// Optional string that filters the results to return only queues
        /// whose name begins with the specified <paramref name="prefix"/>.
        /// </param>
        /// <param name="pageSizeHint">
        /// Optional hint to specify the desired size of the page returned.
        /// </param>
        /// <param name="async">
        /// Whether to invoke the operation asynchronously.
        /// </param>
        /// <param name="cancellationToken">
        /// <see cref="CancellationToken"/>
        /// </param>
        /// <returns>
        /// A single segment of containers starting from the specified marker, including the next marker if appropriate.
        /// </returns>
        /// <remarks>
        /// Use an empty marker to start enumeration from the beginning. Queue names are returned in lexicographic order.
        /// After getting a segment, process it, and then call ListQueuesSegmentAsync again (passing in the next marker) to get the next segment.
        /// </remarks>
        internal async Task <Response <ListQueuesSegmentResponse> > GetQueuesInternal(
            string marker,
            QueueTraits traits,
            string prefix,
            int?pageSizeHint,
            bool async,
            CancellationToken cancellationToken)
        {
            using (ClientConfiguration.Pipeline.BeginLoggingScope(nameof(QueueServiceClient)))
            {
                ClientConfiguration.Pipeline.LogMethodEnter(
                    nameof(QueueServiceClient),
                    message:
                    $"{nameof(Uri)}: {Uri}\n" +
                    $"{nameof(marker)}: {marker}\n" +
                    $"{nameof(traits)}: {traits}\n" +
                    $"{nameof(prefix)}: {prefix}");

                DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(QueueServiceClient)}.{nameof(GetQueues)}");

                try
                {
                    ResponseWithHeaders <ListQueuesSegmentResponse, ServiceListQueuesSegmentHeaders> response;

                    scope.Start();
                    IEnumerable <string> includeTypes = traits.AsIncludeTypes();
                    if (async)
                    {
                        response = await _serviceRestClient.ListQueuesSegmentAsync(
                            prefix : prefix,
                            marker : marker,
                            maxresults : pageSizeHint,
                            include : includeTypes.Any()?includeTypes : null,
                            cancellationToken : cancellationToken)
                                   .ConfigureAwait(false);
                    }
                    else
                    {
                        response = _serviceRestClient.ListQueuesSegment(
                            prefix: prefix,
                            marker: marker,
                            maxresults: pageSizeHint,
                            include: includeTypes.Any() ? includeTypes : null,
                            cancellationToken: cancellationToken);
                    }

                    if ((traits & QueueTraits.Metadata) != QueueTraits.Metadata)
                    {
                        IEnumerable <QueueItem> queueItems = response.Value.QueueItems;
                        foreach (QueueItem queueItem in queueItems)
                        {
                            queueItem.Metadata = null;
                        }
                    }
                    return(response);
                }
                catch (Exception ex)
                {
                    ClientConfiguration.Pipeline.LogException(ex);
                    scope.Failed(ex);
                    throw;
                }
                finally
                {
                    ClientConfiguration.Pipeline.LogMethodExit(nameof(QueueServiceClient));
                    scope.Dispose();
                }
            }
        }
        private Response <PathInfo> UploadInSequence(
            Stream content,
            int blockSize,
            PathHttpHeaders httpHeaders,
            DataLakeRequestConditions conditions,
            IProgress <long> progressHandler,
            CancellationToken cancellationToken)
        {
            // Wrap the append and flush calls in an Upload span for
            // distributed tracing
            DiagnosticScope scope = _client.ClientDiagnostics.CreateScope(
                _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Files)}.{nameof(DataLake)}.{nameof(DataLakeFileClient)}.{nameof(DataLakeFileClient.Upload)}");

            try
            {
                scope.Start();

                // Wrap progressHandler in a AggregatingProgressIncrementer to prevent
                // progress from being reset with each append file operation.
                if (progressHandler != null)
                {
                    progressHandler = new AggregatingProgressIncrementer(progressHandler);
                }

                // Partition the stream into individual blocks and stage them
                // We need to keep track of how much data we have appended to
                // calculate offsets for the next appends, and the final
                // position to flush
                long appendedBytes = 0;
                foreach (ChunkedStream block in PartitionedUploadExtensions.GetBlocksAsync(
                             content, blockSize, async: false, _arrayPool, cancellationToken).EnsureSyncEnumerable())
                {
                    // Dispose the block after the loop iterates and return its memory to our ArrayPool
                    using (block)
                    {
                        // Append the next block
                        _client.Append(
                            new MemoryStream(block.Bytes, 0, block.Length, writable: false),
                            offset: appendedBytes,
                            leaseId: conditions?.LeaseId,
                            progressHandler: progressHandler,
                            cancellationToken: cancellationToken);

                        appendedBytes += block.Length;
                    }
                }

                // Commit the block list after everything has been staged to
                // complete the upload
                return(_client.Flush(
                           position: appendedBytes,
                           httpHeaders: httpHeaders,
                           conditions: conditions,
                           cancellationToken: cancellationToken));
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
        /// <summary> Publishes a batch of EventGridEvents to an Azure Event Grid topic. </summary>
        /// <param name="events"> An array of events to be published to Event Grid. </param>
        /// <param name="async">Whether to invoke the operation asynchronously.</param>
        /// <param name="cancellationToken"> The cancellation token to use. </param>
        private async Task <Response> SendEventsInternal(IEnumerable <EventGridEvent> events, bool async, CancellationToken cancellationToken = default)
        {
            using DiagnosticScope scope = _clientDiagnostics.CreateScope($"{nameof(EventGridPublisherClient)}.{nameof(SendEvents)}");
            scope.Start();

            try
            {
                // List of events cannot be null
                Argument.AssertNotNull(events, nameof(events));

                List <EventGridEventInternal> eventsWithSerializedPayloads = new List <EventGridEventInternal>();
                foreach (EventGridEvent egEvent in events)
                {
                    // Individual events cannot be null
                    Argument.AssertNotNull(egEvent, nameof(egEvent));

                    JsonDocument data;
                    if (egEvent.Data is BinaryData binaryEventData)
                    {
                        try
                        {
                            data = JsonDocument.Parse(binaryEventData);
                        }
                        catch (JsonException)
                        {
                            data = SerializeObjectToJsonDocument(binaryEventData.ToString(), typeof(string), cancellationToken);
                        }
                    }
                    else
                    {
                        data = SerializeObjectToJsonDocument(egEvent.Data, egEvent.Data.GetType(), cancellationToken);
                    }

                    EventGridEventInternal newEGEvent = new EventGridEventInternal(
                        egEvent.Id,
                        egEvent.Subject,
                        data.RootElement,
                        egEvent.EventType,
                        egEvent.EventTime,
                        egEvent.DataVersion)
                    {
                        Topic = egEvent.Topic
                    };

                    eventsWithSerializedPayloads.Add(newEGEvent);
                }
                if (async)
                {
                    // Publish asynchronously if called via an async path
                    return(await _serviceRestClient.PublishEventsAsync(
                               _hostName,
                               eventsWithSerializedPayloads,
                               cancellationToken).ConfigureAwait(false));
                }
                else
                {
                    return(_serviceRestClient.PublishEvents(
                               _hostName,
                               eventsWithSerializedPayloads,
                               cancellationToken));
                }
            }
            catch (Exception e)
            {
                scope.Failed(e);
                throw;
            }
        }
        private async Task <Response <BlobContentInfo> > UploadInParallelAsync(
            Stream content,
            int blockSize,
            BlobHttpHeaders blobHttpHeaders,
            IDictionary <string, string> metadata,
            BlobRequestConditions conditions,
            IProgress <long> progressHandler,
            AccessTier?accessTier,
            CancellationToken cancellationToken)
        {
            // Wrap the staging and commit calls in an Upload span for
            // distributed tracing
            DiagnosticScope scope = _client.ClientDiagnostics.CreateScope(
                _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Blobs)}.{nameof(BlobClient)}.{nameof(BlobClient.Upload)}");

            try
            {
                scope.Start();

                // Wrap progressHandler in a AggregatingProgressIncrementer to prevent
                // progress from being reset with each stage blob operation.
                if (progressHandler != null)
                {
                    progressHandler = new AggregatingProgressIncrementer(progressHandler);
                }

                // The list tracking blocks IDs we're going to commit
                List <string> blockIds = new List <string>();

                // A list of tasks that are currently executing which will
                // always be smaller than _maxWorkerCount
                List <Task> runningTasks = new List <Task>();

                // Partition the stream into individual blocks
                await foreach (ChunkedStream block in PartitionedUploadExtensions.GetBlocksAsync(
                                   content,
                                   blockSize,
                                   async: true,
                                   _arrayPool,
                                   cancellationToken).ConfigureAwait(false))
                {
                    // Start staging the next block (but don't await the Task!)
                    string blockId = GenerateBlockId(block.AbsolutePosition);
                    Task   task    = StageBlockAsync(
                        block,
                        blockId,
                        conditions,
                        progressHandler,
                        cancellationToken);

                    // Add the block to our task and commit lists
                    runningTasks.Add(task);
                    blockIds.Add(blockId);

                    // If we run out of workers
                    if (runningTasks.Count >= _maxWorkerCount)
                    {
                        // Wait for at least one of them to finish
                        await Task.WhenAny(runningTasks).ConfigureAwait(false);

                        // Clear any completed blocks from the task list
                        for (int i = 0; i < runningTasks.Count; i++)
                        {
                            Task runningTask = runningTasks[i];
                            if (!runningTask.IsCompleted)
                            {
                                continue;
                            }

                            await runningTask.ConfigureAwait(false);

                            runningTasks.RemoveAt(i);
                            i--;
                        }
                    }
                }

                // Wait for all the remaining blocks to finish staging and then
                // commit the block list to complete the upload
                await Task.WhenAll(runningTasks).ConfigureAwait(false);

                return(await _client.CommitBlockListAsync(
                           blockIds,
                           blobHttpHeaders,
                           metadata,
                           conditions,
                           accessTier,
                           cancellationToken)
                       .ConfigureAwait(false));
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
예제 #18
0
 /// <summary>
 /// RecordOtelAttributes
 /// </summary>
 /// <param name="exception"></param>
 /// <param name="scope"></param>
 internal static void RecordOtelAttributes(ChangeFeedProcessorUserException exception, DiagnosticScope scope)
 {
     scope.AddAttribute(OpenTelemetryAttributeKeys.ExceptionMessage, exception.Message);
 }
        private Response <BlobContentInfo> UploadInSequence(
            Stream content,
            int blockSize,
            BlobHttpHeaders blobHttpHeaders,
            IDictionary <string, string> metadata,
            BlobRequestConditions conditions,
            IProgress <long> progressHandler,
            AccessTier?accessTier,
            CancellationToken cancellationToken)
        {
            // Wrap the staging and commit calls in an Upload span for
            // distributed tracing
            DiagnosticScope scope = _client.ClientDiagnostics.CreateScope(
                _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Blobs)}.{nameof(BlobClient)}.{nameof(BlobClient.Upload)}");

            try
            {
                scope.Start();

                // Wrap progressHandler in a AggregatingProgressIncrementer to prevent
                // progress from being reset with each stage blob operation.
                if (progressHandler != null)
                {
                    progressHandler = new AggregatingProgressIncrementer(progressHandler);
                }

                // The list tracking blocks IDs we're going to commit
                List <string> blockIds = new List <string>();

                // Partition the stream into individual blocks and stage them
                foreach (ChunkedStream block in PartitionedUploadExtensions.GetBlocksAsync(
                             content, blockSize, async: false, _arrayPool, cancellationToken).EnsureSyncEnumerable())
                {
                    // Dispose the block after the loop iterates and return its memory to our ArrayPool
                    using (block)
                    {
                        // Stage the next block
                        string blockId = GenerateBlockId(block.AbsolutePosition);
                        _client.StageBlock(
                            blockId,
                            new MemoryStream(block.Bytes, 0, block.Length, writable: false),
                            conditions: conditions,
                            progressHandler: progressHandler,
                            cancellationToken: cancellationToken);

                        blockIds.Add(blockId);
                    }
                }

                // Commit the block list after everything has been staged to
                // complete the upload
                return(_client.CommitBlockList(
                           blockIds,
                           blobHttpHeaders,
                           metadata,
                           conditions,
                           accessTier,
                           cancellationToken));
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
예제 #20
0
        /// <summary>
        ///   The main loop of a partition pump.  It receives events from the Azure Event Hubs service
        ///   and delegates their processing to the inner partition processor.
        /// </summary>
        ///
        /// <param name="cancellationToken">A <see cref="CancellationToken"/> instance to signal the request to cancel the operation.</param>
        ///
        /// <returns>A task to be resolved on when the operation has completed.</returns>
        ///
        private async Task RunAsync(CancellationToken cancellationToken)
        {
            IEnumerable <EventData> receivedEvents;
            Exception unrecoverableException = null;

            // We'll break from the loop upon encountering a non-retriable exception.  The event processor periodically
            // checks its pumps' status, so it should be aware of when one of them stops working.

            while (!cancellationToken.IsCancellationRequested)
            {
                try
                {
                    receivedEvents = await InnerConsumer.ReceiveAsync(Options.MaximumMessageCount, Options.MaximumReceiveWaitTime, cancellationToken).ConfigureAwait(false);

                    using DiagnosticScope diagnosticScope = EventDataInstrumentation.ClientDiagnostics.CreateScope(DiagnosticProperty.EventProcessorProcessingActivityName);
                    diagnosticScope.AddAttribute("kind", "server");

                    if (diagnosticScope.IsEnabled)
                    {
                        foreach (var eventData in receivedEvents)
                        {
                            if (EventDataInstrumentation.TryExtractDiagnosticId(eventData, out string diagnosticId))
                            {
                                diagnosticScope.AddLink(diagnosticId);
                            }
                        }
                    }

                    diagnosticScope.Start();

                    try
                    {
                        await PartitionProcessor.ProcessEventsAsync(Context, receivedEvents, cancellationToken).ConfigureAwait(false);
                    }
                    catch (Exception partitionProcessorException)
                    {
                        diagnosticScope.Failed(partitionProcessorException);
                        unrecoverableException = partitionProcessorException;
                        CloseReason            = PartitionProcessorCloseReason.PartitionProcessorException;

                        break;
                    }
                }
                catch (Exception eventHubException)
                {
                    // Stop running only if it's not a retriable exception.

                    if (s_retryPolicy.CalculateRetryDelay(eventHubException, 1) == null)
                    {
                        unrecoverableException = eventHubException;
                        CloseReason            = PartitionProcessorCloseReason.EventHubException;

                        break;
                    }
                }
            }

            if (unrecoverableException != null)
            {
                // In case an exception is encountered while partition processor is processing the error, don't
                // catch it and let the calling method (StopAsync) handle it.

                await PartitionProcessor.ProcessErrorAsync(Context, unrecoverableException, cancellationToken).ConfigureAwait(false);
            }
        }
예제 #21
0
        /// <summary>
        ///   The main loop of a partition pump.  It receives events from the Azure Event Hubs service
        ///   and delegates their processing to the event processor processing handlers.
        /// </summary>
        ///
        /// <param name="cancellationToken">A <see cref="CancellationToken"/> instance to signal the request to cancel the operation.</param>
        ///
        /// <returns>A task to be resolved on when the operation has completed.</returns>
        ///
        private async Task RunAsync(CancellationToken cancellationToken)
        {
            List <EventData> receivedEvents;
            Exception        unrecoverableException = null;

            // We'll break from the loop upon encountering a non-retriable exception.  The event processor periodically
            // checks its pumps' status, so it should be aware of when one of them stops working.

            while (!cancellationToken.IsCancellationRequested)
            {
                try
                {
                    receivedEvents = (await InnerConsumer.ReceiveAsync(MaximumMessageCount, Options.MaximumReceiveWaitTime, cancellationToken).ConfigureAwait(false)).ToList();

                    using DiagnosticScope diagnosticScope = EventDataInstrumentation.ClientDiagnostics.CreateScope(DiagnosticProperty.EventProcessorProcessingActivityName);
                    diagnosticScope.AddAttribute("kind", "server");

                    if (diagnosticScope.IsEnabled)
                    {
                        foreach (var eventData in receivedEvents)
                        {
                            if (EventDataInstrumentation.TryExtractDiagnosticId(eventData, out string diagnosticId))
                            {
                                diagnosticScope.AddLink(diagnosticId);
                            }
                        }
                    }

                    // Small workaround to make sure we call ProcessEvent with EventData = null when no events have been received.
                    // The code is expected to get simpler when we start using the async enumerator internally to receive events.

                    if (receivedEvents.Count == 0)
                    {
                        receivedEvents.Add(null);
                    }

                    diagnosticScope.Start();

                    foreach (var eventData in receivedEvents)
                    {
                        try
                        {
                            var processorEvent = new EventProcessorEvent(Context, eventData, UpdateCheckpointAsync);
                            await ProcessEventAsync(processorEvent).ConfigureAwait(false);
                        }
                        catch (Exception eventProcessingException)
                        {
                            diagnosticScope.Failed(eventProcessingException);
                            unrecoverableException = eventProcessingException;

                            break;
                        }
                    }
                }
                catch (Exception eventHubException)
                {
                    // Stop running only if it's not a retriable exception.

                    if (RetryPolicy.CalculateRetryDelay(eventHubException, 1) == null)
                    {
                        throw eventHubException;
                    }
                }

                if (unrecoverableException != null)
                {
                    throw unrecoverableException;
                }
            }
        }
예제 #22
0
        /// <summary>
        /// Attest an Intel SGX enclave.
        /// </summary>
        /// <param name="request">Aggregate type containing the information needed to perform an attestation operation.</param>
        /// <param name="async">true if the API call should be asynchronous, false otherwise.</param>
        /// <param name="cancellationToken">Cancellation token used to cancel the request.</param>
        /// <returns>An <see cref="AttestationResponse{AttestationResult}"/> which contains the validated claims for the supplied <paramref name="request"/></returns>
        /// <remarks>The <see cref="AttestationRequest.Evidence"/> must be an Intel SGX Quote.
        /// <seealso href="https://software.intel.com/content/www/us/en/develop/articles/code-sample-intel-software-guard-extensions-remote-attestation-end-to-end-example.html"/>  for more information.
        ///</remarks>
        private async Task <AttestationResponse <AttestationResult> > AttestSgxEnclaveInternal(AttestationRequest request, bool async, CancellationToken cancellationToken = default)
        {
            Argument.AssertNotNull(request, nameof(request));
            Argument.AssertNotNull(request.Evidence, nameof(request.Evidence));
            using DiagnosticScope scope = _clientDiagnostics.CreateScope($"{nameof(AttestationClient)}.{nameof(AttestSgxEnclave)}");
            scope.Start();
            try
            {
                var attestSgxEnclaveRequest = new AttestSgxEnclaveRequest
                {
                    Quote = request.Evidence.ToArray(),
                    DraftPolicyForAttestation = request.DraftPolicyForAttestation,
                };

                if (request.InittimeData != null)
                {
                    attestSgxEnclaveRequest.InitTimeData = new InitTimeData
                    {
                        Data     = request.InittimeData.BinaryData.ToArray(),
                        DataType = request.InittimeData.DataIsJson ? DataType.Json : DataType.Binary,
                    };
                }
                else
                {
                    attestSgxEnclaveRequest.InitTimeData = null;
                }

                if (request.RuntimeData != null)
                {
                    attestSgxEnclaveRequest.RuntimeData = new RuntimeData
                    {
                        Data     = request.RuntimeData.BinaryData.ToArray(),
                        DataType = request.RuntimeData.DataIsJson ? DataType.Json : DataType.Binary,
                    };
                }
                else
                {
                    attestSgxEnclaveRequest.RuntimeData = null;
                }

                Response <AttestationResponse> response;
                if (async)
                {
                    response = await _restClient.AttestSgxEnclaveAsync(attestSgxEnclaveRequest, cancellationToken).ConfigureAwait(false);
                }
                else
                {
                    response = _restClient.AttestSgxEnclave(attestSgxEnclaveRequest, cancellationToken);
                }
                var attestationToken = AttestationToken.Deserialize(response.Value.Token, _clientDiagnostics);
                if (_options.TokenOptions.ValidateToken)
                {
                    var signers = await GetSignersAsync(async, cancellationToken).ConfigureAwait(false);

                    if (!await attestationToken.ValidateTokenInternal(_options.TokenOptions, signers, async, cancellationToken).ConfigureAwait(false))
                    {
                        AttestationTokenValidationFailedException.ThrowFailure(signers, attestationToken);
                    }
                }

                return(new AttestationResponse <AttestationResult>(response.GetRawResponse(), attestationToken));
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
        }
        /// <summary> Publishes a batch of CloudEvents to an Azure Event Grid topic. </summary>
        /// <param name="events"> An array of events to be published to Event Grid. </param>
        /// <param name="async">Whether to invoke the operation asynchronously.</param>
        /// <param name="cancellationToken"> The cancellation token to use. </param>
        private async Task <Response> SendCloudEventsInternal(IEnumerable <CloudEvent> events, bool async, CancellationToken cancellationToken = default)
        {
            using DiagnosticScope scope = _clientDiagnostics.CreateScope($"{nameof(EventGridPublisherClient)}.{nameof(SendEvents)}");
            scope.Start();

            try
            {
                // List of events cannot be null
                Argument.AssertNotNull(events, nameof(events));

                List <CloudEventInternal> eventsWithSerializedPayloads = new List <CloudEventInternal>();
                foreach (CloudEvent cloudEvent in events)
                {
                    // Individual events cannot be null
                    Argument.AssertNotNull(cloudEvent, nameof(cloudEvent));

                    CloudEventInternal newCloudEvent = new CloudEventInternal(
                        cloudEvent.Id,
                        cloudEvent.Source,
                        cloudEvent.Type,
                        "1.0")
                    {
                        Time            = cloudEvent.Time,
                        DataBase64      = cloudEvent.DataBase64,
                        Datacontenttype = cloudEvent.DataContentType,
                        Dataschema      = cloudEvent.DataSchema,
                        Subject         = cloudEvent.Subject
                    };

                    foreach (KeyValuePair <string, object> kvp in cloudEvent.ExtensionAttributes)
                    {
                        newCloudEvent.Add(kvp.Key, new CustomModelSerializer(kvp.Value, _dataSerializer, cancellationToken));
                    }

                    // The 'Data' property is optional for CloudEvents
                    // Additionally, if the type of data is binary, 'Data' will not be populated (data will be stored in 'DataBase64' instead)
                    if (cloudEvent.Data != null)
                    {
                        JsonDocument data = SerializeObjectToJsonDocument(cloudEvent.Data, cloudEvent.Data.GetType(), cancellationToken);
                        newCloudEvent.Data = data.RootElement;
                    }
                    eventsWithSerializedPayloads.Add(newCloudEvent);
                }
                if (async)
                {
                    // Publish asynchronously if called via an async path
                    return(await _serviceRestClient.PublishCloudEventEventsAsync(
                               _hostName,
                               eventsWithSerializedPayloads,
                               cancellationToken).ConfigureAwait(false));
                }
                else
                {
                    return(_serviceRestClient.PublishCloudEventEvents(
                               _hostName,
                               eventsWithSerializedPayloads,
                               cancellationToken));
                }
            }
            catch (Exception e)
            {
                scope.Failed(e);
                throw;
            }
        }
        /// <summary> Publishes a batch of CloudEvents to an Azure Event Grid topic. </summary>
        /// <param name="events"> An array of events to be published to Event Grid. </param>
        /// <param name="async">Whether to invoke the operation asynchronously</param>
        /// <param name="cancellationToken"> The cancellation token to use. </param>
        private async Task <Response> PublishCloudEventsInternal(IEnumerable <CloudEvent> events, bool async, CancellationToken cancellationToken = default)
        {
            using DiagnosticScope scope = _clientDiagnostics.CreateScope($"{nameof(EventGridPublisherClient)}.{nameof(PublishCloudEvents)}");
            scope.Start();

            try
            {
                // List of events cannot be null
                Argument.AssertNotNull(events, nameof(events));

                List <CloudEventInternal> eventsWithSerializedPayloads = new List <CloudEventInternal>();
                foreach (CloudEvent cloudEvent in events)
                {
                    // Individual events cannot be null
                    Argument.AssertNotNull(cloudEvent, nameof(cloudEvent));

                    CloudEventInternal newCloudEvent = new CloudEventInternal(
                        cloudEvent.Id,
                        cloudEvent.Source,
                        cloudEvent.Type,
                        cloudEvent.SpecVersion)
                    {
                        Time            = cloudEvent.Time,
                        Datacontenttype = cloudEvent.DataContentType,
                        Dataschema      = cloudEvent.DataSchema,
                        Subject         = cloudEvent.Subject
                    };

                    foreach (KeyValuePair <string, object> kvp in cloudEvent.ExtensionAttributes)
                    {
                        newCloudEvent.Add(kvp.Key, new EventGridSerializer(kvp.Value, _serializer, cancellationToken));
                    }

                    // The 'Data' property is optional for CloudEvents
                    if (cloudEvent.Data != null)
                    {
                        if (cloudEvent.Data is IEnumerable <byte> enumerable)
                        {
                            newCloudEvent.DataBase64 = Convert.ToBase64String(enumerable.ToArray());
                        }
                        else if (cloudEvent.Data is ReadOnlyMemory <byte> memory)
                        {
                            newCloudEvent.DataBase64 = Convert.ToBase64String(memory.ToArray());
                        }
                        else
                        {
                            newCloudEvent.Data = new EventGridSerializer(
                                cloudEvent.Data,
                                _serializer,
                                cancellationToken);
                        }
                    }
                    eventsWithSerializedPayloads.Add(newCloudEvent);
                }
                if (async)
                {
                    // Publish asynchronously if called via an async path
                    return(await _serviceRestClient.PublishCloudEventEventsAsync(
                               _hostName,
                               eventsWithSerializedPayloads,
                               cancellationToken).ConfigureAwait(false));
                }
                else
                {
                    return(_serviceRestClient.PublishCloudEventEvents(
                               _hostName,
                               eventsWithSerializedPayloads,
                               cancellationToken));
                }
            }
            catch (Exception e)
            {
                scope.Failed(e);
                throw;
            }
        }
        private async Task <Response <PathInfo> > UploadInParallelAsync(
            Stream content,
            int blockSize,
            PathHttpHeaders httpHeaders,
            DataLakeRequestConditions conditions,
            IProgress <long> progressHandler,
            CancellationToken cancellationToken)

        {
            // Wrap the staging and commit calls in an Upload span for
            // distributed tracing
            DiagnosticScope scope = _client.ClientDiagnostics.CreateScope(
                _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Files)}.{nameof(DataLake)}.{nameof(DataLakeFileClient)}.{nameof(DataLakeFileClient.Upload)}");

            try
            {
                scope.Start();

                // Wrap progressHandler in a AggregatingProgressIncrementer to prevent
                // progress from being reset with each stage blob operation.
                if (progressHandler != null)
                {
                    progressHandler = new AggregatingProgressIncrementer(progressHandler);
                }

                // A list of tasks that are currently executing which will
                // always be smaller than _maxWorkerCount
                List <Task> runningTasks = new List <Task>();

                // We need to keep track of how much data we have appended to
                // calculate offsets for the next appends, and the final
                // position to flush
                long appendedBytes = 0;

                // Partition the stream into individual blocks
                await foreach (ChunkedStream block in PartitionedUploadExtensions.GetBlocksAsync(
                                   content, blockSize, async: true, _arrayPool, cancellationToken).ConfigureAwait(false))
                {
                    // Start appending the next block (but don't await the Task!)
                    Task task = AppendBlockAsync(
                        block,
                        appendedBytes,
                        conditions?.LeaseId,
                        progressHandler,
                        cancellationToken);

                    // Add the block to our task and commit lists
                    runningTasks.Add(task);

                    appendedBytes += block.Length;

                    // If we run out of workers
                    if (runningTasks.Count >= _maxWorkerCount)
                    {
                        // Wait for at least one of them to finish
                        await Task.WhenAny(runningTasks).ConfigureAwait(false);

                        // Clear any completed blocks from the task list
                        for (int i = 0; i < runningTasks.Count; i++)
                        {
                            Task runningTask = runningTasks[i];
                            if (!runningTask.IsCompleted)
                            {
                                continue;
                            }

                            await runningTask.ConfigureAwait(false);

                            runningTasks.RemoveAt(i);
                            i--;
                        }
                    }
                }

                // Wait for all the remaining blocks to finish staging and then
                // commit the block list to complete the upload
                await Task.WhenAll(runningTasks).ConfigureAwait(false);

                return(await _client.FlushAsync(
                           position : appendedBytes,
                           httpHeaders : httpHeaders,
                           conditions : conditions,
                           cancellationToken : cancellationToken)
                       .ConfigureAwait(false));
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
        public Response DownloadTo(
            Stream destination,
            BlobRequestConditions conditions,
            CancellationToken cancellationToken)
        {
            // Wrap the download range calls in a Download span for distributed
            // tracing
            DiagnosticScope scope = _client.ClientDiagnostics.CreateScope($"{nameof(BlobBaseClient)}.{nameof(BlobBaseClient.DownloadTo)}");

            try
            {
                scope.Start();

                // Just start downloading using an initial range.  If it's a
                // small blob, we'll get the whole thing in one shot.  If it's
                // a large blob, we'll get its full size in Content-Range and
                // can keep downloading it in segments.
                var initialRange = new HttpRange(0, _initialRangeSize);
                Response <BlobDownloadInfo> initialResponse = _client.Download(
                    initialRange,
                    conditions,
                    rangeGetContentHash: false,
                    cancellationToken);

                // If the initial request returned no content (i.e., a 304),
                // we'll pass that back to the user immediately
                if (initialResponse.IsUnavailable())
                {
                    return(initialResponse.GetRawResponse());
                }

                // Copy the first segment to the destination stream
                CopyTo(initialResponse, destination, cancellationToken);

                // If the first segment was the entire blob, we're finished now
                long initialLength = initialResponse.Value.ContentLength;
                long totalLength   = ParseRangeTotalLength(initialResponse.Value.Details.ContentRange);
                if (initialLength == totalLength)
                {
                    return(initialResponse.GetRawResponse());
                }

                // Capture the etag from the first segment and construct
                // conditions to ensure the blob doesn't change while we're
                // downloading the remaining segments
                ETag etag = initialResponse.Value.Details.ETag;
                BlobRequestConditions conditionsWithEtag = CreateConditionsWithEtag(conditions, etag);

                // Download each of the remaining ranges in the blob
                foreach (HttpRange httpRange in GetRanges(initialLength, totalLength))
                {
                    // Don't need to worry about 304s here because the ETag
                    // condition will turn into a 412 and throw a proper
                    // RequestFailedException
                    Response <BlobDownloadInfo> result = _client.Download(
                        httpRange,
                        conditionsWithEtag,
                        rangeGetContentHash: false,
                        cancellationToken);
                    CopyTo(result.Value, destination, cancellationToken);
                }

                return(initialResponse.GetRawResponse());
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
예제 #27
0
 public static void SetMessageData(this DiagnosticScope scope, IEnumerable <ServiceBusMessage> messages)
 {
     scope.AddLinkedDiagnostics(messages);
 }
        public async Task <Response> DownloadToAsync(
            Stream destination,
            BlobRequestConditions conditions,
            CancellationToken cancellationToken)
        {
            // Wrap the download range calls in a Download span for distributed
            // tracing
            DiagnosticScope scope = _client.ClientDiagnostics.CreateScope($"{nameof(BlobBaseClient)}.{nameof(BlobBaseClient.DownloadTo)}");

            try
            {
                scope.Start();

                // Just start downloading using an initial range.  If it's a
                // small blob, we'll get the whole thing in one shot.  If it's
                // a large blob, we'll get its full size in Content-Range and
                // can keep downloading it in segments.
                var initialRange = new HttpRange(0, _initialRangeSize);
                Task <Response <BlobDownloadInfo> > initialResponseTask =
                    _client.DownloadAsync(
                        initialRange,
                        conditions,
                        rangeGetContentHash: false,
                        cancellationToken);
                Response <BlobDownloadInfo> initialResponse =
                    await initialResponseTask.ConfigureAwait(false);

                // If the initial request returned no content (i.e., a 304),
                // we'll pass that back to the user immediately
                if (initialResponse.IsUnavailable())
                {
                    return(initialResponse.GetRawResponse());
                }

                // If the first segment was the entire blob, we'll copy that to
                // the output stream and finish now
                long initialLength = initialResponse.Value.ContentLength;
                long totalLength   = ParseRangeTotalLength(initialResponse.Value.Details.ContentRange);
                if (initialLength == totalLength)
                {
                    await CopyToAsync(
                        initialResponse,
                        destination,
                        cancellationToken)
                    .ConfigureAwait(false);

                    return(initialResponse.GetRawResponse());
                }

                // Capture the etag from the first segment and construct
                // conditions to ensure the blob doesn't change while we're
                // downloading the remaining segments
                ETag etag = initialResponse.Value.Details.ETag;
                BlobRequestConditions conditionsWithEtag = CreateConditionsWithEtag(conditions, etag);

                // Create a queue of tasks that will each download one segment
                // of the blob.  The queue maintains the order of the segments
                // so we can keep appending to the end of the destination
                // stream when each segment finishes.
                var runningTasks = new Queue <Task <Response <BlobDownloadInfo> > >();
                runningTasks.Enqueue(initialResponseTask);

                // Fill the queue with tasks to download each of the remaining
                // ranges in the blob
                foreach (HttpRange httpRange in GetRanges(initialLength, totalLength))
                {
                    // Add the next Task (which will start the download but
                    // return before it's completed downloading)
                    runningTasks.Enqueue(_client.DownloadAsync(
                                             httpRange,
                                             conditionsWithEtag,
                                             rangeGetContentHash: false,
                                             cancellationToken));

                    // If we have fewer tasks than alotted workers, then just
                    // continue adding tasks until we have _maxWorkerCount
                    // running in parallel
                    if (runningTasks.Count < _maxWorkerCount)
                    {
                        continue;
                    }

                    // Once all the workers are busy, wait for the first
                    // segment to finish downloading before we create more work
                    await ConsumeQueuedTask().ConfigureAwait(false);
                }

                // Wait for all of the remaining segments to download
                while (runningTasks.Count > 0)
                {
                    await ConsumeQueuedTask().ConfigureAwait(false);
                }

                return(initialResponse.GetRawResponse());

                // Wait for the first segment in the queue of tasks to complete
                // downloading and copy it to the destination stream
                async Task ConsumeQueuedTask()
                {
                    // Don't need to worry about 304s here because the ETag
                    // condition will turn into a 412 and throw a proper
                    // RequestFailedException
                    using BlobDownloadInfo result =
                              await runningTasks.Dequeue().ConfigureAwait(false);

                    // Even though the BlobDownloadInfo is returned immediately,
                    // CopyToAsync causes ConsumeQueuedTask to wait until the
                    // download is complete
                    await CopyToAsync(
                        result,
                        destination,
                        cancellationToken)
                    .ConfigureAwait(false);
                }
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
        public async Task <FetchResult> FetchAsync(
            string dtmi, Uri repositoryUri, ModelDependencyResolution dependencyResolution, CancellationToken cancellationToken = default)
        {
            using DiagnosticScope scope = _clientDiagnostics.CreateScope($"{nameof(HttpModelFetcher)}.{nameof(Fetch)}");
            scope.Start();
            try
            {
                Queue <string> work = PrepareWork(dtmi, repositoryUri, dependencyResolution == ModelDependencyResolution.TryFromExpanded);

                string remoteFetchError = string.Empty;
                RequestFailedException requestFailedExceptionThrown = null;
                Exception genericExceptionThrown = null;

                while (work.Count != 0)
                {
                    cancellationToken.ThrowIfCancellationRequested();

                    string tryContentPath = work.Dequeue();
                    ModelsRepositoryEventSource.Instance.FetchingModelContent(tryContentPath);

                    try
                    {
                        string content = await EvaluatePathAsync(tryContentPath, cancellationToken).ConfigureAwait(false);

                        return(new FetchResult()
                        {
                            Definition = content,
                            Path = tryContentPath
                        });
                    }
                    catch (RequestFailedException ex)
                    {
                        requestFailedExceptionThrown = ex;
                    }
                    catch (Exception ex)
                    {
                        genericExceptionThrown = ex;
                    }

                    if (genericExceptionThrown != null || requestFailedExceptionThrown != null)
                    {
                        remoteFetchError =
                            $"{string.Format(CultureInfo.InvariantCulture, StandardStrings.GenericGetModelsError, dtmi)} " +
                            string.Format(CultureInfo.InvariantCulture, StandardStrings.ErrorFetchingModelContent, tryContentPath);
                    }
                }

                if (requestFailedExceptionThrown != null)
                {
                    throw new RequestFailedException(
                              requestFailedExceptionThrown.Status,
                              remoteFetchError,
                              requestFailedExceptionThrown.ErrorCode,
                              requestFailedExceptionThrown);
                }
                else
                {
                    throw new RequestFailedException(
                              (int)HttpStatusCode.BadRequest,
                              remoteFetchError,
                              null,
                              genericExceptionThrown);
                }
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
        }
예제 #30
0
        /// <summary>
        /// The <see cref="AcquireInternal"/> operation acquires a lease on
        /// the file.
        ///
        /// If the file does not have an active lease, the File service
        /// creates a lease on the file and returns it.  If the
        /// file has an active lease, you can only request a new lease
        /// using the active lease ID as <see cref="LeaseId"/>.
        ///
        /// </summary>
        /// <param name="duration">
        /// Specifies the duration of the lease, in seconds, or specify
        /// <see cref="InfiniteLeaseDuration"/> for a lease that never expires.
        /// A non-infinite lease can be between 15 and 60 seconds.
        /// A lease duration cannot be changed using <see cref="RenewAsync"/>
        /// or <see cref="ChangeAsync"/>.
        /// </param>
        /// <param name="async">
        /// Whether to invoke the operation asynchronously.
        /// </param>
        /// <param name="cancellationToken">
        /// Optional <see cref="CancellationToken"/> to propagate
        /// notifications that the operation should be cancelled.
        /// </param>
        /// <returns>
        /// A <see cref="Response{Lease}"/> describing the lease.
        /// </returns>
        /// <remarks>
        /// A <see cref="RequestFailedException"/> will be thrown if
        /// a failure occurs.
        /// </remarks>
        private async Task <Response <ShareFileLease> > AcquireInternal(
            TimeSpan?duration,
            bool async,
            CancellationToken cancellationToken)
        {
            EnsureClient();
            using (ClientConfiguration.Pipeline.BeginLoggingScope(nameof(ShareLeaseClient)))
            {
                ClientConfiguration.Pipeline.LogMethodEnter(
                    nameof(ShareLeaseClient),
                    message:
                    $"{nameof(Uri)}: {Uri}\n" +
                    $"{nameof(LeaseId)}: {LeaseId}\n");

                DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(ShareLeaseClient)}.{nameof(Acquire)}");

                try
                {
                    scope.Start();
                    if (FileClient != null)
                    {
                        ResponseWithHeaders <FileAcquireLeaseHeaders> response;

                        if (async)
                        {
                            response = await FileClient.FileRestClient.AcquireLeaseAsync(
                                duration : (int)Constants.File.Lease.InfiniteLeaseDuration,
                                proposedLeaseId : LeaseId,
                                cancellationToken : cancellationToken)
                                       .ConfigureAwait(false);
                        }
                        else
                        {
                            response = FileClient.FileRestClient.AcquireLease(
                                duration: (int)Constants.File.Lease.InfiniteLeaseDuration,
                                proposedLeaseId: LeaseId,
                                cancellationToken: cancellationToken);
                        }

                        return(Response.FromValue(
                                   response.ToShareFileLease(),
                                   response.GetRawResponse()));
                    }
                    else
                    {
                        // Int64 is an overflow safe cast relative to TimeSpan.MaxValue
                        long serviceDuration;

                        if (duration.HasValue && duration.Value >= TimeSpan.Zero)
                        {
                            serviceDuration = Convert.ToInt64(duration.Value.TotalSeconds);
                        }
                        else
                        {
                            serviceDuration = Constants.File.Lease.InfiniteLeaseDuration;
                        }

                        ResponseWithHeaders <ShareAcquireLeaseHeaders> response;

                        if (async)
                        {
                            response = await ShareClient.ShareRestClient.AcquireLeaseAsync(
                                duration : (int)serviceDuration,
                                proposedLeaseId : LeaseId,
                                cancellationToken : cancellationToken)
                                       .ConfigureAwait(false);
                        }
                        else
                        {
                            response = ShareClient.ShareRestClient.AcquireLease(
                                duration: (int)serviceDuration,
                                proposedLeaseId: LeaseId,
                                cancellationToken: cancellationToken);
                        }

                        return(Response.FromValue(
                                   response.ToShareFileLease(),
                                   response.GetRawResponse()));
                    }
                }
                catch (Exception ex)
                {
                    ClientConfiguration.Pipeline.LogException(ex);
                    scope.Failed(ex);
                    throw;
                }
                finally
                {
                    ClientConfiguration.Pipeline.LogMethodExit(nameof(ShareLeaseClient));
                    scope.Dispose();
                }
            }
        }