private async ValueTask <Response> UpdateStatusAsync(bool async, CancellationToken cancellationToken)
        {
            if (!_hasCompleted)
            {
                using DiagnosticScope? scope = _diagnostics?.CreateScope($"{nameof(SparkSessionOperation)}.{nameof(UpdateStatus)}");
                scope?.Start();

                try
                {
                    // Get the latest status
                    Response <SparkBatchJob> update = async
                        ? await _client.GetSparkBatchJobAsync(_batchId, true, cancellationToken).ConfigureAwait(false)
                        : _client.GetSparkBatchJob(_batchId, true, cancellationToken);

                    // Check if the operation is no longer running
                    _hasCompleted = IsJobComplete(update.Value.Result ?? SparkBatchJobResultType.Uncertain, update.Value.State.Value, _completionType);
                    if (_hasCompleted)
                    {
                        _hasValue = true;
                        _value    = update.Value;
                    }

                    // Update raw response
                    _rawResponse = update.GetRawResponse();
                }
                catch (Exception e)
                {
                    scope?.Failed(e);
                    throw;
                }
            }

            return(GetRawResponse());
        }
        /// <summary> Publishes a batch of CloudEvents to an Azure Event Grid topic. </summary>
        /// <param name="events"> An array of events to be published to Event Grid. </param>
        /// <param name="async">Whether to invoke the operation asynchronously.</param>
        /// <param name="cancellationToken"> The cancellation token to use. </param>
        private async Task <Response> SendCloudEventsInternal(IEnumerable <CloudEvent> events, bool async, CancellationToken cancellationToken = default)
        {
            using DiagnosticScope scope = _clientDiagnostics.CreateScope($"{nameof(EventGridPublisherClient)}.{nameof(SendEvents)}");
            scope.Start();

            try
            {
                // List of events cannot be null
                Argument.AssertNotNull(events, nameof(events));

                List <CloudEventInternal> eventsWithSerializedPayloads = new List <CloudEventInternal>();
                foreach (CloudEvent cloudEvent in events)
                {
                    // Individual events cannot be null
                    Argument.AssertNotNull(cloudEvent, nameof(cloudEvent));

                    CloudEventInternal newCloudEvent = new CloudEventInternal(
                        cloudEvent.Id,
                        cloudEvent.Source,
                        cloudEvent.Type,
                        "1.0")
                    {
                        Time            = cloudEvent.Time,
                        DataBase64      = cloudEvent.DataBase64,
                        Datacontenttype = cloudEvent.DataContentType,
                        Dataschema      = cloudEvent.DataSchema,
                        Subject         = cloudEvent.Subject
                    };

                    foreach (KeyValuePair <string, object> kvp in cloudEvent.ExtensionAttributes)
                    {
                        newCloudEvent.Add(kvp.Key, new CustomModelSerializer(kvp.Value, _dataSerializer, cancellationToken));
                    }

                    // The 'Data' property is optional for CloudEvents
                    // Additionally, if the type of data is binary, 'Data' will not be populated (data will be stored in 'DataBase64' instead)
                    if (cloudEvent.Data != null)
                    {
                        MemoryStream stream = new MemoryStream();
                        _dataSerializer.Serialize(stream, cloudEvent.Data, cloudEvent.Data.GetType(), cancellationToken);
                        stream.Position = 0;
                        JsonDocument data = JsonDocument.Parse(stream);
                        newCloudEvent.Data = data.RootElement;
                    }
                    eventsWithSerializedPayloads.Add(newCloudEvent);
                }
                if (async)
                {
                    // Publish asynchronously if called via an async path
                    return(await _serviceRestClient.PublishCloudEventEventsAsync(
                               _hostName,
                               eventsWithSerializedPayloads,
                               cancellationToken).ConfigureAwait(false));
                }
                else
                {
                    return(_serviceRestClient.PublishCloudEventEvents(
                               _hostName,
                               eventsWithSerializedPayloads,
                               cancellationToken));
                }
            }
            catch (Exception e)
            {
                scope.Failed(e);
                throw;
            }
        }
        /// <summary> Publishes a batch of EventGridEvents to an Azure Event Grid topic. </summary>
        /// <param name="events"> An array of events to be published to Event Grid. </param>
        /// <param name="async">Whether to invoke the operation asynchronously.</param>
        /// <param name="cancellationToken"> The cancellation token to use. </param>
        private async Task <Response> SendEventsInternal(IEnumerable <EventGridEvent> events, bool async, CancellationToken cancellationToken = default)
        {
            using DiagnosticScope scope = _clientDiagnostics.CreateScope($"{nameof(EventGridPublisherClient)}.{nameof(SendEvents)}");
            scope.Start();

            try
            {
                // List of events cannot be null
                Argument.AssertNotNull(events, nameof(events));

                List <EventGridEventInternal> eventsWithSerializedPayloads = new List <EventGridEventInternal>();
                foreach (EventGridEvent egEvent in events)
                {
                    // Individual events cannot be null
                    Argument.AssertNotNull(egEvent, nameof(egEvent));

                    JsonDocument data;
                    if (egEvent.Data is BinaryData binaryEventData)
                    {
                        try
                        {
                            data = JsonDocument.Parse(binaryEventData);
                        }
                        catch (JsonException)
                        {
                            data = SerializeObjectToJsonDocument(binaryEventData.ToString(), typeof(string), cancellationToken);
                        }
                    }
                    else
                    {
                        data = SerializeObjectToJsonDocument(egEvent.Data, egEvent.Data.GetType(), cancellationToken);
                    }

                    EventGridEventInternal newEGEvent = new EventGridEventInternal(
                        egEvent.Id,
                        egEvent.Subject,
                        data.RootElement,
                        egEvent.EventType,
                        egEvent.EventTime,
                        egEvent.DataVersion)
                    {
                        Topic = egEvent.Topic
                    };

                    eventsWithSerializedPayloads.Add(newEGEvent);
                }
                if (async)
                {
                    // Publish asynchronously if called via an async path
                    return(await _serviceRestClient.PublishEventsAsync(
                               _hostName,
                               eventsWithSerializedPayloads,
                               cancellationToken).ConfigureAwait(false));
                }
                else
                {
                    return(_serviceRestClient.PublishEvents(
                               _hostName,
                               eventsWithSerializedPayloads,
                               cancellationToken));
                }
            }
            catch (Exception e)
            {
                scope.Failed(e);
                throw;
            }
        }
        /// <summary> Publishes a batch of CloudEvents to an Azure Event Grid topic. </summary>
        /// <param name="events"> An array of events to be published to Event Grid. </param>
        /// <param name="async">Whether to invoke the operation asynchronously</param>
        /// <param name="cancellationToken"> The cancellation token to use. </param>
        private async Task <Response> PublishCloudEventsInternal(IEnumerable <CloudEvent> events, bool async, CancellationToken cancellationToken = default)
        {
            using DiagnosticScope scope = _clientDiagnostics.CreateScope($"{nameof(EventGridPublisherClient)}.{nameof(PublishCloudEvents)}");
            scope.Start();

            try
            {
                // List of events cannot be null
                Argument.AssertNotNull(events, nameof(events));

                List <CloudEventInternal> eventsWithSerializedPayloads = new List <CloudEventInternal>();
                foreach (CloudEvent cloudEvent in events)
                {
                    // Individual events cannot be null
                    Argument.AssertNotNull(cloudEvent, nameof(cloudEvent));

                    CloudEventInternal newCloudEvent = new CloudEventInternal(
                        cloudEvent.Id,
                        cloudEvent.Source,
                        cloudEvent.Type,
                        cloudEvent.SpecVersion)
                    {
                        Time            = cloudEvent.Time,
                        Datacontenttype = cloudEvent.DataContentType,
                        Dataschema      = cloudEvent.DataSchema,
                        Subject         = cloudEvent.Subject
                    };

                    foreach (KeyValuePair <string, object> kvp in cloudEvent.ExtensionAttributes)
                    {
                        newCloudEvent.Add(kvp.Key, new EventGridSerializer(kvp.Value, _serializer, cancellationToken));
                    }

                    // The 'Data' property is optional for CloudEvents
                    if (cloudEvent.Data != null)
                    {
                        if (cloudEvent.Data is IEnumerable <byte> enumerable)
                        {
                            newCloudEvent.DataBase64 = Convert.ToBase64String(enumerable.ToArray());
                        }
                        else if (cloudEvent.Data is ReadOnlyMemory <byte> memory)
                        {
                            newCloudEvent.DataBase64 = Convert.ToBase64String(memory.ToArray());
                        }
                        else
                        {
                            newCloudEvent.Data = new EventGridSerializer(
                                cloudEvent.Data,
                                _serializer,
                                cancellationToken);
                        }
                    }
                    eventsWithSerializedPayloads.Add(newCloudEvent);
                }
                if (async)
                {
                    // Publish asynchronously if called via an async path
                    return(await _serviceRestClient.PublishCloudEventEventsAsync(
                               _hostName,
                               eventsWithSerializedPayloads,
                               cancellationToken).ConfigureAwait(false));
                }
                else
                {
                    return(_serviceRestClient.PublishCloudEventEvents(
                               _hostName,
                               eventsWithSerializedPayloads,
                               cancellationToken));
                }
            }
            catch (Exception e)
            {
                scope.Failed(e);
                throw;
            }
        }
        /// <summary>
        /// Raise an <see cref="Azure.Core.SyncAsyncEventHandler{T}"/>
        /// event by executing each of the handlers sequentially (to avoid
        /// introducing accidental parallelism in customer code) and collecting
        /// any exceptions.
        /// </summary>
        /// <typeparam name="T">Type of the event arguments.</typeparam>
        /// <param name="eventHandler">The event's delegate.</param>
        /// <param name="e">
        /// An <see cref="SyncAsyncEventArgs"/> instance that contains the
        /// event data.
        /// </param>
        /// <param name="declaringTypeName">
        /// The name of the type declaring the event to construct a helpful
        /// exception message and distributed tracing span.
        /// </param>
        /// <param name="eventName">
        /// The name of the event to construct a helpful exception message and
        /// distributed tracing span.
        /// </param>
        /// <param name="clientDiagnostics">
        /// Client diagnostics to wrap all the handlers in a new distributed
        /// tracing span.
        /// </param>
        /// <returns>
        /// A task that represents running all of the event's handlers.
        /// </returns>
        /// <exception cref="AggregateException">
        /// An exception was thrown during the execution of at least one of the
        /// event's handlers.
        /// </exception>
        /// <exception cref="ArgumentNullException">
        /// Thrown when <paramref name="e"/>, <paramref name="declaringTypeName"/>,
        /// <paramref name="eventName"/>, or <paramref name="clientDiagnostics"/>
        /// are null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when <paramref name="declaringTypeName"/> or
        /// <paramref name="eventName"/> are empty.
        /// </exception>
        public static async Task RaiseAsync <T>(
            this SyncAsyncEventHandler <T> eventHandler,
            T e,
            string declaringTypeName,
            string eventName,
            ClientDiagnostics clientDiagnostics)
            where T : SyncAsyncEventArgs
        {
            Argument.AssertNotNull(e, nameof(e));
            Argument.AssertNotNullOrEmpty(declaringTypeName, nameof(declaringTypeName));
            Argument.AssertNotNullOrEmpty(eventName, nameof(eventName));
            Argument.AssertNotNull(clientDiagnostics, nameof(clientDiagnostics));

            // Get the invocation list, but return early if there's no work
            if (eventHandler == null)
            {
                return;
            }
            Delegate[] handlers = eventHandler.GetInvocationList();
            if (handlers == null || handlers.Length == 0)
            {
                return;
            }

            // Wrap handler invocation in a distributed tracing span so it's
            // easy for customers to track and measure
            string eventFullName = declaringTypeName + "." + eventName;

            using DiagnosticScope scope = clientDiagnostics.CreateScope(eventFullName);
            scope.Start();
            try
            {
                // Collect any exceptions raised by handlers
                List <Exception> failures = null;

                // Raise the handlers sequentially so we don't introduce any
                // unintentional parallelism in customer code
                foreach (Delegate handler in handlers)
                {
                    SyncAsyncEventHandler <T> azureHandler = (SyncAsyncEventHandler <T>)handler;
                    try
                    {
                        Task runHandlerTask = azureHandler(e);
                        // We can consider logging something when e.RunSynchronously
                        // is true, but runHandlerTask.IsComplete is false.
                        // (We'll not bother to check our tests because
                        // EnsureCompleted on the code path that raised the
                        // event will catch it for us.)
                        await runHandlerTask.ConfigureAwait(false);
                    }
                    catch (Exception ex)
                    {
                        failures ??= new List <Exception>();
                        failures.Add(ex);
                    }
                }

                // Wrap any exceptions in an AggregateException
                if (failures?.Count > 0)
                {
                    // Include the event name in the exception for easier debugging
                    throw new AggregateException(
                              "Unhandled exception(s) thrown when raising the " + eventFullName + " event.",
                              failures);
                }
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
        }
        /// <summary>
        ///   The main loop of a partition pump.  It receives events from the Azure Event Hubs service
        ///   and delegates their processing to the inner partition processor.
        /// </summary>
        ///
        /// <param name="cancellationToken">A <see cref="CancellationToken"/> instance to signal the request to cancel the operation.</param>
        ///
        /// <returns>A task to be resolved on when the operation has completed.</returns>
        ///
        private async Task RunAsync(CancellationToken cancellationToken)
        {
            IEnumerable <EventData> receivedEvents;
            Exception unrecoverableException = null;

            // We'll break from the loop upon encountering a non-retriable exception.  The event processor periodically
            // checks its pumps' status, so it should be aware of when one of them stops working.

            while (!cancellationToken.IsCancellationRequested)
            {
                try
                {
                    receivedEvents = await InnerConsumer.ReceiveAsync(Options.MaximumMessageCount, Options.MaximumReceiveWaitTime, cancellationToken).ConfigureAwait(false);

                    using DiagnosticScope diagnosticScope = EventDataInstrumentation.ClientDiagnostics.CreateScope(DiagnosticProperty.EventProcessorProcessingActivityName);
                    diagnosticScope.AddAttribute("kind", "server");

                    if (diagnosticScope.IsEnabled)
                    {
                        foreach (var eventData in receivedEvents)
                        {
                            if (EventDataInstrumentation.TryExtractDiagnosticId(eventData, out string diagnosticId))
                            {
                                diagnosticScope.AddLink(diagnosticId);
                            }
                        }
                    }

                    diagnosticScope.Start();

                    try
                    {
                        await PartitionProcessor.ProcessEventsAsync(Context, receivedEvents, cancellationToken).ConfigureAwait(false);
                    }
                    catch (Exception partitionProcessorException)
                    {
                        diagnosticScope.Failed(partitionProcessorException);
                        unrecoverableException = partitionProcessorException;
                        CloseReason            = PartitionProcessorCloseReason.PartitionProcessorException;

                        break;
                    }
                }
                catch (Exception eventHubException)
                {
                    // Stop running only if it's not a retriable exception.

                    if (s_retryPolicy.CalculateRetryDelay(eventHubException, 1) == null)
                    {
                        unrecoverableException = eventHubException;
                        CloseReason            = PartitionProcessorCloseReason.EventHubException;

                        break;
                    }
                }
            }

            if (unrecoverableException != null)
            {
                // In case an exception is encountered while partition processor is processing the error, don't
                // catch it and let the calling method (StopAsync) handle it.

                await PartitionProcessor.ProcessErrorAsync(Context, unrecoverableException, cancellationToken).ConfigureAwait(false);
            }
        }
示例#7
0
        private Response <BlobContentInfo> UploadInSequence(
            Stream content,
            int blockSize,
            BlobHttpHeaders blobHttpHeaders,
            IDictionary <string, string> metadata,
            BlobRequestConditions conditions,
            IProgress <long> progressHandler,
            AccessTier?accessTier,
            CancellationToken cancellationToken)
        {
            // Wrap the staging and commit calls in an Upload span for
            // distributed tracing
            DiagnosticScope scope = _client.ClientDiagnostics.CreateScope(
                _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Blobs)}.{nameof(BlobClient)}.{nameof(BlobClient.Upload)}");

            try
            {
                scope.Start();

                // Wrap progressHandler in a AggregatingProgressIncrementer to prevent
                // progress from being reset with each stage blob operation.
                if (progressHandler != null)
                {
                    progressHandler = new AggregatingProgressIncrementer(progressHandler);
                }

                // The list tracking blocks IDs we're going to commit
                List <string> blockIds = new List <string>();

                // Partition the stream into individual blocks and stage them
                IAsyncEnumerator <ChunkedStream> enumerator =
                    PartitionedUploadExtensions.GetBlocksAsync(content, blockSize, async: false, _arrayPool, cancellationToken)
                    .GetAsyncEnumerator(cancellationToken);
#pragma warning disable AZC0107
                while (enumerator.MoveNextAsync().EnsureCompleted())
#pragma warning restore AZC0107
                {
                    // Dispose the block after the loop iterates and return its
                    // memory to our ArrayPool
                    using ChunkedStream block = enumerator.Current;

                    // Stage the next block
                    string blockId = GenerateBlockId(block.AbsolutePosition);
                    _client.StageBlock(
                        blockId,
                        new MemoryStream(block.Bytes, 0, block.Length, writable: false),
                        conditions: conditions,
                        progressHandler: progressHandler,
                        cancellationToken: cancellationToken);

                    blockIds.Add(blockId);
                }

                // Commit the block list after everything has been staged to
                // complete the upload
                return(_client.CommitBlockList(
                           blockIds,
                           blobHttpHeaders,
                           metadata,
                           conditions,
                           accessTier,
                           cancellationToken));
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
示例#8
0
        /// <summary>
        /// Submit a <see cref="BlobBatch"/> of sub-operations.
        /// </summary>
        /// <param name="batch">
        /// A <see cref="BlobBatch"/> of sub-operations.
        /// </param>
        /// <param name="throwOnAnyFailure">
        /// A value indicating whether or not to throw exceptions for
        /// sub-operation failures.
        /// </param>
        /// <param name="async">
        /// Whether to invoke the operation asynchronously.
        /// </param>
        /// <param name="cancellationToken">
        /// Optional <see cref="CancellationToken"/> to propagate notifications
        /// that the operation should be cancelled.
        /// </param>
        /// <returns>
        /// A <see cref="Response"/> on successfully submitting.
        /// </returns>
        /// <remarks>
        /// A <see cref="RequestFailedException"/> will be thrown if
        /// a failure to submit the batch occurs.  Individual sub-operation
        /// failures will only throw if <paramref name="throwOnAnyFailure"/> is
        /// true and be wrapped in an <see cref="AggregateException"/>.
        /// </remarks>
        private async Task <Response> SubmitBatchInternal(
            BlobBatch batch,
            bool throwOnAnyFailure,
            bool async,
            CancellationToken cancellationToken)
        {
            DiagnosticScope scope = ClientDiagnostics.CreateScope($"{nameof(BlobBatchClient)}.{nameof(SubmitBatch)}");

            try
            {
                scope.Start();

                batch = batch ?? throw new ArgumentNullException(nameof(batch));
                if (batch.Submitted)
                {
                    throw BatchErrors.CannotResubmitBatch(nameof(batch));
                }
                else if (!batch.IsAssociatedClient(this))
                {
                    throw BatchErrors.BatchClientDoesNotMatch(nameof(batch));
                }

                // Get the sub-operation messages to submit
                IList <HttpMessage> messages = batch.GetMessagesToSubmit();
                if (messages.Count == 0)
                {
                    throw BatchErrors.CannotSubmitEmptyBatch(nameof(batch));
                }
                // TODO: Consider validating the upper limit of 256 messages

                // Merge the sub-operations into a single multipart/mixed Stream
                (Stream content, string contentType) =
                    await MergeOperationRequests(
                        messages,
                        async,
                        cancellationToken)
                    .ConfigureAwait(false);

                if (IsContainerScoped)
                {
                    ResponseWithHeaders <Stream, ContainerSubmitBatchHeaders> response;

                    if (async)
                    {
                        response = await _containerRestClient.SubmitBatchAsync(
                            containerName : ContainerName,
                            contentLength : content.Length,
                            multipartContentType : contentType,
                            body : content,
                            cancellationToken : cancellationToken)
                                   .ConfigureAwait(false);
                    }
                    else
                    {
                        response = _containerRestClient.SubmitBatch(
                            containerName: ContainerName,
                            contentLength: content.Length,
                            multipartContentType: contentType,
                            body: content,
                            cancellationToken: cancellationToken);
                    }

                    await UpdateOperationResponses(
                        messages,
                        response.GetRawResponse(),
                        response.Value,
                        response.Headers.ContentType,
                        throwOnAnyFailure,
                        async,
                        cancellationToken)
                    .ConfigureAwait(false);

                    return(response.GetRawResponse());
                }
                else
                {
                    ResponseWithHeaders <Stream, ServiceSubmitBatchHeaders> response;

                    if (async)
                    {
                        response = await _serviceRestClient.SubmitBatchAsync(
                            contentLength : content.Length,
                            multipartContentType : contentType,
                            body : content,
                            cancellationToken : cancellationToken)
                                   .ConfigureAwait(false);
                    }
                    else
                    {
                        response = _serviceRestClient.SubmitBatch(
                            contentLength: content.Length,
                            multipartContentType: contentType,
                            body: content,
                            cancellationToken: cancellationToken);
                    }

                    await UpdateOperationResponses(
                        messages,
                        response.GetRawResponse(),
                        response.Value,
                        response.Headers.ContentType,
                        throwOnAnyFailure,
                        async,
                        cancellationToken)
                    .ConfigureAwait(false);

                    return(response.GetRawResponse());
                }
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
示例#9
0
        /// <summary>
        /// The <see cref="BreakInternal"/> operation breaks the files's
        /// previously-acquired lease (if it exists).
        ///
        /// Once a lease is broken, it cannot be renewed.  Any authorized
        /// request can break the lease; the request is not required to
        /// specify a matching lease ID.
        ///
        /// A lease that has been broken can also be released.  A client can
        /// immediately acquire a file lease that has been
        /// released.
        ///
        /// </summary>
        /// <param name="async">
        /// Whether to invoke the operation asynchronously.
        /// </param>
        /// <param name="cancellationToken">
        /// Optional <see cref="CancellationToken"/> to propagate
        /// notifications that the operation should be cancelled.
        /// </param>
        /// <returns>
        /// A <see cref="Response{FileLease}"/> describing the broken lease.
        /// </returns>
        /// <remarks>
        /// A <see cref="RequestFailedException"/> will be thrown if
        /// a failure occurs.
        /// </remarks>
        private async Task <Response <ShareFileLease> > BreakInternal(
            bool async,
            CancellationToken cancellationToken)
        {
            EnsureClient();
            using (ClientConfiguration.Pipeline.BeginLoggingScope(nameof(ShareLeaseClient)))
            {
                ClientConfiguration.Pipeline.LogMethodEnter(
                    nameof(ShareLeaseClient),
                    message:
                    $"{nameof(Uri)}: {Uri}\n" +
                    $"{nameof(LeaseId)}: {LeaseId}");

                DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(ShareLeaseClient)}.{nameof(Break)}");

                try
                {
                    scope.Start();

                    if (FileClient != null)
                    {
                        ResponseWithHeaders <FileBreakLeaseHeaders> response;

                        if (async)
                        {
                            response = await FileClient.FileRestClient.BreakLeaseAsync(
                                leaseAccessConditions : null,
                                cancellationToken : cancellationToken)
                                       .ConfigureAwait(false);
                        }
                        else
                        {
                            response = FileClient.FileRestClient.BreakLease(
                                leaseAccessConditions: null,
                                cancellationToken: cancellationToken);
                        }

                        return(Response.FromValue(
                                   response.ToShareFileLease(),
                                   response.GetRawResponse()));
                    }
                    else
                    {
                        ResponseWithHeaders <ShareBreakLeaseHeaders> response;

                        if (async)
                        {
                            response = await ShareClient.ShareRestClient.BreakLeaseAsync(
                                breakPeriod : null,
                                leaseAccessConditions : null,
                                cancellationToken : cancellationToken)
                                       .ConfigureAwait(false);
                        }
                        else
                        {
                            response = ShareClient.ShareRestClient.BreakLease(
                                breakPeriod: null,
                                leaseAccessConditions: null,
                                cancellationToken: cancellationToken);
                        }

                        return(Response.FromValue(
                                   response.ToShareFileLease(),
                                   response.GetRawResponse()));
                    }
                }
                catch (Exception ex)
                {
                    ClientConfiguration.Pipeline.LogException(ex);
                    scope.Failed(ex);
                    throw;
                }
                finally
                {
                    ClientConfiguration.Pipeline.LogMethodExit(nameof(ShareLeaseClient));
                    scope.Dispose();
                }
            }
        }
示例#10
0
        /// <summary>
        /// The <see cref="AcquireInternal"/> operation acquires a lease on
        /// the file.
        ///
        /// If the file does not have an active lease, the File service
        /// creates a lease on the file and returns it.  If the
        /// file has an active lease, you can only request a new lease
        /// using the active lease ID as <see cref="LeaseId"/>.
        ///
        /// </summary>
        /// <param name="duration">
        /// Specifies the duration of the lease, in seconds, or specify
        /// <see cref="InfiniteLeaseDuration"/> for a lease that never expires.
        /// A non-infinite lease can be between 15 and 60 seconds.
        /// A lease duration cannot be changed using <see cref="RenewAsync"/>
        /// or <see cref="ChangeAsync"/>.
        /// </param>
        /// <param name="async">
        /// Whether to invoke the operation asynchronously.
        /// </param>
        /// <param name="cancellationToken">
        /// Optional <see cref="CancellationToken"/> to propagate
        /// notifications that the operation should be cancelled.
        /// </param>
        /// <returns>
        /// A <see cref="Response{Lease}"/> describing the lease.
        /// </returns>
        /// <remarks>
        /// A <see cref="RequestFailedException"/> will be thrown if
        /// a failure occurs.
        /// </remarks>
        private async Task <Response <ShareFileLease> > AcquireInternal(
            TimeSpan?duration,
            bool async,
            CancellationToken cancellationToken)
        {
            EnsureClient();
            using (ClientConfiguration.Pipeline.BeginLoggingScope(nameof(ShareLeaseClient)))
            {
                ClientConfiguration.Pipeline.LogMethodEnter(
                    nameof(ShareLeaseClient),
                    message:
                    $"{nameof(Uri)}: {Uri}\n" +
                    $"{nameof(LeaseId)}: {LeaseId}\n");

                DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(ShareLeaseClient)}.{nameof(Acquire)}");

                try
                {
                    scope.Start();
                    if (FileClient != null)
                    {
                        ResponseWithHeaders <FileAcquireLeaseHeaders> response;

                        if (async)
                        {
                            response = await FileClient.FileRestClient.AcquireLeaseAsync(
                                duration : (int)Constants.File.Lease.InfiniteLeaseDuration,
                                proposedLeaseId : LeaseId,
                                cancellationToken : cancellationToken)
                                       .ConfigureAwait(false);
                        }
                        else
                        {
                            response = FileClient.FileRestClient.AcquireLease(
                                duration: (int)Constants.File.Lease.InfiniteLeaseDuration,
                                proposedLeaseId: LeaseId,
                                cancellationToken: cancellationToken);
                        }

                        return(Response.FromValue(
                                   response.ToShareFileLease(),
                                   response.GetRawResponse()));
                    }
                    else
                    {
                        // Int64 is an overflow safe cast relative to TimeSpan.MaxValue
                        long serviceDuration;

                        if (duration.HasValue && duration.Value >= TimeSpan.Zero)
                        {
                            serviceDuration = Convert.ToInt64(duration.Value.TotalSeconds);
                        }
                        else
                        {
                            serviceDuration = Constants.File.Lease.InfiniteLeaseDuration;
                        }

                        ResponseWithHeaders <ShareAcquireLeaseHeaders> response;

                        if (async)
                        {
                            response = await ShareClient.ShareRestClient.AcquireLeaseAsync(
                                duration : (int)serviceDuration,
                                proposedLeaseId : LeaseId,
                                cancellationToken : cancellationToken)
                                       .ConfigureAwait(false);
                        }
                        else
                        {
                            response = ShareClient.ShareRestClient.AcquireLease(
                                duration: (int)serviceDuration,
                                proposedLeaseId: LeaseId,
                                cancellationToken: cancellationToken);
                        }

                        return(Response.FromValue(
                                   response.ToShareFileLease(),
                                   response.GetRawResponse()));
                    }
                }
                catch (Exception ex)
                {
                    ClientConfiguration.Pipeline.LogException(ex);
                    scope.Failed(ex);
                    throw;
                }
                finally
                {
                    ClientConfiguration.Pipeline.LogMethodExit(nameof(ShareLeaseClient));
                    scope.Dispose();
                }
            }
        }
        private static async Task UpdateClientsideKeyEncryptionKeyInternal(
            BlobClient client,
            UpdateClientSideKeyEncryptionKeyOptions options,
            bool async,
            CancellationToken cancellationToken)
        {
            // argument validation
            Argument.AssertNotNull(client, nameof(client));
            ClientSideEncryptionOptions operationEncryptionOptions = options?.EncryptionOptionsOverride
                                                                     ?? client.ClientSideEncryption
                                                                     ?? throw new ArgumentException($"{nameof(ClientSideEncryptionOptions)} are not configured on this client and none were provided for the operation.");

            Argument.AssertNotNull(operationEncryptionOptions.KeyEncryptionKey, nameof(ClientSideEncryptionOptions.KeyEncryptionKey));
            Argument.AssertNotNull(operationEncryptionOptions.KeyResolver, nameof(ClientSideEncryptionOptions.KeyResolver));
            Argument.AssertNotNull(operationEncryptionOptions.KeyWrapAlgorithm, nameof(ClientSideEncryptionOptions.KeyWrapAlgorithm));

            using (client.ClientConfiguration.Pipeline.BeginLoggingScope(nameof(BlobClient)))
            {
                client.ClientConfiguration.Pipeline.LogMethodEnter(
                    nameof(BlobBaseClient),
                    message:
                    $"{nameof(Uri)}: {client.Uri}\n" +
                    $"{nameof(options.Conditions)}: {options?.Conditions}");

                DiagnosticScope scope = client.ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(BlobClient)}.{nameof(UpdateClientSideKeyEncryptionKey)}");

                try
                {
                    // hold onto etag, metadata, encryptiondata
                    BlobProperties getPropertiesResponse = await client.GetPropertiesInternal(options?.Conditions, async, cancellationToken).ConfigureAwait(false);

                    ETag etag = getPropertiesResponse.ETag;
                    IDictionary <string, string> metadata = getPropertiesResponse.Metadata;
                    EncryptionData encryptionData         = BlobClientSideDecryptor.GetAndValidateEncryptionDataOrDefault(metadata)
                                                            ?? throw new InvalidOperationException("Resource has no client-side encryption key to rotate.");

                    // rotate keywrapping
                    byte[] newWrappedKey = await WrapKeyInternal(
                        await UnwrapKeyInternal(
                            encryptionData,
                            operationEncryptionOptions.KeyResolver,
                            async,
                            cancellationToken).ConfigureAwait(false),
                        operationEncryptionOptions.KeyWrapAlgorithm,
                        operationEncryptionOptions.KeyEncryptionKey,
                        async,
                        cancellationToken).ConfigureAwait(false);

                    // set new wrapped key info and reinsert into metadata
                    encryptionData.WrappedContentKey = new KeyEnvelope
                    {
                        EncryptedKey = newWrappedKey,
                        Algorithm    = operationEncryptionOptions.KeyWrapAlgorithm,
                        KeyId        = operationEncryptionOptions.KeyEncryptionKey.KeyId
                    };
                    metadata[Constants.ClientSideEncryption.EncryptionDataKey] = EncryptionDataSerializer.Serialize(encryptionData);

                    // update blob ONLY IF ETAG MATCHES (do not take chances encryption info is now out of sync)
                    BlobRequestConditions modifiedRequestConditions = BlobRequestConditions.CloneOrDefault(options?.Conditions) ?? new BlobRequestConditions();
                    modifiedRequestConditions.IfMatch = etag;
                    await client.SetMetadataInternal(metadata, modifiedRequestConditions, async, cancellationToken).ConfigureAwait(false);
                }
                catch (Exception ex)
                {
                    client.ClientConfiguration.Pipeline.LogException(ex);
                    scope.Failed(ex);
                    throw;
                }
                finally
                {
                    client.ClientConfiguration.Pipeline.LogMethodExit(nameof(BlobBaseClient));
                    scope.Dispose();
                }
            }
        }
        /// <summary>
        /// Returns a single segment of containers starting from the specified marker.
        ///
        /// For more information, see
        /// <see href="https://docs.microsoft.com/rest/api/storageservices/list-queues1">
        /// List Queues</see>.
        /// </summary>
        /// <param name="marker">
        /// Marker from the previous request.
        /// </param>
        /// <param name="traits">
        /// Optional trait options for shaping the queues.
        /// </param>
        /// <param name="prefix">
        /// Optional string that filters the results to return only queues
        /// whose name begins with the specified <paramref name="prefix"/>.
        /// </param>
        /// <param name="pageSizeHint">
        /// Optional hint to specify the desired size of the page returned.
        /// </param>
        /// <param name="async">
        /// Whether to invoke the operation asynchronously.
        /// </param>
        /// <param name="cancellationToken">
        /// <see cref="CancellationToken"/>
        /// </param>
        /// <returns>
        /// A single segment of containers starting from the specified marker, including the next marker if appropriate.
        /// </returns>
        /// <remarks>
        /// Use an empty marker to start enumeration from the beginning. Queue names are returned in lexicographic order.
        /// After getting a segment, process it, and then call ListQueuesSegmentAsync again (passing in the next marker) to get the next segment.
        /// </remarks>
        internal async Task <Response <ListQueuesSegmentResponse> > GetQueuesInternal(
            string marker,
            QueueTraits traits,
            string prefix,
            int?pageSizeHint,
            bool async,
            CancellationToken cancellationToken)
        {
            using (ClientConfiguration.Pipeline.BeginLoggingScope(nameof(QueueServiceClient)))
            {
                ClientConfiguration.Pipeline.LogMethodEnter(
                    nameof(QueueServiceClient),
                    message:
                    $"{nameof(Uri)}: {Uri}\n" +
                    $"{nameof(marker)}: {marker}\n" +
                    $"{nameof(traits)}: {traits}\n" +
                    $"{nameof(prefix)}: {prefix}");

                DiagnosticScope scope = ClientConfiguration.ClientDiagnostics.CreateScope($"{nameof(QueueServiceClient)}.{nameof(GetQueues)}");

                try
                {
                    ResponseWithHeaders <ListQueuesSegmentResponse, ServiceListQueuesSegmentHeaders> response;

                    scope.Start();
                    IEnumerable <string> includeTypes = traits.AsIncludeTypes();
                    if (async)
                    {
                        response = await _serviceRestClient.ListQueuesSegmentAsync(
                            prefix : prefix,
                            marker : marker,
                            maxresults : pageSizeHint,
                            include : includeTypes.Any()?includeTypes : null,
                            cancellationToken : cancellationToken)
                                   .ConfigureAwait(false);
                    }
                    else
                    {
                        response = _serviceRestClient.ListQueuesSegment(
                            prefix: prefix,
                            marker: marker,
                            maxresults: pageSizeHint,
                            include: includeTypes.Any() ? includeTypes : null,
                            cancellationToken: cancellationToken);
                    }

                    if ((traits & QueueTraits.Metadata) != QueueTraits.Metadata)
                    {
                        IEnumerable <QueueItem> queueItems = response.Value.QueueItems;
                        foreach (QueueItem queueItem in queueItems)
                        {
                            queueItem.Metadata = null;
                        }
                    }
                    return(response);
                }
                catch (Exception ex)
                {
                    ClientConfiguration.Pipeline.LogException(ex);
                    scope.Failed(ex);
                    throw;
                }
                finally
                {
                    ClientConfiguration.Pipeline.LogMethodExit(nameof(QueueServiceClient));
                    scope.Dispose();
                }
            }
        }
示例#13
0
        /// <summary>
        ///   The main loop of a partition pump.  It receives events from the Azure Event Hubs service
        ///   and delegates their processing to the event processor processing handlers.
        /// </summary>
        ///
        /// <param name="cancellationToken">A <see cref="CancellationToken"/> instance to signal the request to cancel the operation.</param>
        ///
        /// <returns>A task to be resolved on when the operation has completed.</returns>
        ///
        private async Task RunAsync(CancellationToken cancellationToken)
        {
            List <EventData> receivedEvents;
            Exception        unrecoverableException = null;

            // We'll break from the loop upon encountering a non-retriable exception.  The event processor periodically
            // checks its pumps' status, so it should be aware of when one of them stops working.

            while (!cancellationToken.IsCancellationRequested)
            {
                await using (var partitionReceiver = InnerConsumer.CreatePartitionReceiver(Context.PartitionId, StartingPosition))
                {
                    try
                    {
                        receivedEvents = (await partitionReceiver.ReceiveAsync(MaximumMessageCount, Options.MaximumReceiveWaitTime, cancellationToken).ConfigureAwait(false)).ToList();

                        using DiagnosticScope diagnosticScope = EventDataInstrumentation.ClientDiagnostics.CreateScope(DiagnosticProperty.EventProcessorProcessingActivityName);
                        diagnosticScope.AddAttribute("kind", "server");

                        if (diagnosticScope.IsEnabled)
                        {
                            foreach (var eventData in receivedEvents)
                            {
                                if (EventDataInstrumentation.TryExtractDiagnosticId(eventData, out string diagnosticId))
                                {
                                    diagnosticScope.AddLink(diagnosticId);
                                }
                            }
                        }

                        // Small workaround to make sure we call ProcessEvent with EventData = null when no events have been received.
                        // The code is expected to get simpler when we start using the async enumerator internally to receive events.

                        if (receivedEvents.Count == 0)
                        {
                            receivedEvents.Add(null);
                        }

                        diagnosticScope.Start();

                        foreach (var eventData in receivedEvents)
                        {
                            try
                            {
                                var processorEvent = new EventProcessorEvent(Context, eventData, UpdateCheckpointAsync);
                                await ProcessEventAsync(processorEvent).ConfigureAwait(false);
                            }
                            catch (Exception eventProcessingException)
                            {
                                diagnosticScope.Failed(eventProcessingException);
                                unrecoverableException = eventProcessingException;

                                break;
                            }
                        }
                    }
                    catch (Exception eventHubException)
                    {
                        // Stop running only if it's not a retriable exception.

                        if (RetryPolicy.CalculateRetryDelay(eventHubException, 1) == null)
                        {
                            throw eventHubException;
                        }
                    }

                    if (unrecoverableException != null)
                    {
                        throw unrecoverableException;
                    }
                }
            }
        }
示例#14
0
        /// <summary>
        /// Submit a <see cref="BlobBatch"/> of sub-operations.
        /// </summary>
        /// <param name="batch">
        /// A <see cref="BlobBatch"/> of sub-operations.
        /// </param>
        /// <param name="throwOnAnyFailure">
        /// A value indicating whether or not to throw exceptions for
        /// sub-operation failures.
        /// </param>
        /// <param name="async">
        /// Whether to invoke the operation asynchronously.
        /// </param>
        /// <param name="cancellationToken">
        /// Optional <see cref="CancellationToken"/> to propagate notifications
        /// that the operation should be cancelled.
        /// </param>
        /// <returns>
        /// A <see cref="Response"/> on successfully submitting.
        /// </returns>
        /// <remarks>
        /// A <see cref="RequestFailedException"/> will be thrown if
        /// a failure to submit the batch occurs.  Individual sub-operation
        /// failures will only throw if <paramref name="throwOnAnyFailure"/> is
        /// true and be wrapped in an <see cref="AggregateException"/>.
        /// </remarks>
        private async Task <Response> SubmitBatchInternal(
            BlobBatch batch,
            bool throwOnAnyFailure,
            bool async,
            CancellationToken cancellationToken)
        {
            DiagnosticScope scope = ClientDiagnostics.CreateScope($"{nameof(BlobBatchClient)}.{nameof(SubmitBatch)}");

            try
            {
                scope.Start();

                batch = batch ?? throw new ArgumentNullException(nameof(batch));
                if (batch.Submitted)
                {
                    throw BatchErrors.CannotResubmitBatch(nameof(batch));
                }
                else if (!batch.IsAssociatedClient(this))
                {
                    throw BatchErrors.BatchClientDoesNotMatch(nameof(batch));
                }

                // Get the sub-operation messages to submit
                IList <HttpMessage> messages = batch.GetMessagesToSubmit();
                if (messages.Count == 0)
                {
                    throw BatchErrors.CannotSubmitEmptyBatch(nameof(batch));
                }
                // TODO: Consider validating the upper limit of 256 messages

                // Merge the sub-operations into a single multipart/mixed Stream
                (Stream content, string contentType) =
                    await MergeOperationRequests(
                        messages,
                        async,
                        cancellationToken)
                    .ConfigureAwait(false);

                // Send the batch request
                Response <BlobBatchResult> batchResult;

                if (_isContainerScoped)
                {
                    batchResult = await BatchRestClient.Container.SubmitBatchAsync(
                        clientDiagnostics : ClientDiagnostics,
                        pipeline : Pipeline,
                        resourceUri : Uri,
                        body : content,
                        contentLength : content.Length,
                        multipartContentType : contentType,
                        version : Version.ToVersionString(),
                        async : async,
                        operationName : $"{nameof(BlobBatchClient)}.{nameof(SubmitBatch)}",
                        cancellationToken : cancellationToken)
                                  .ConfigureAwait(false);
                }
                else
                {
                    batchResult = await BatchRestClient.Service.SubmitBatchAsync(
                        clientDiagnostics : ClientDiagnostics,
                        pipeline : Pipeline,
                        resourceUri : Uri,
                        body : content,
                        contentLength : content.Length,
                        multipartContentType : contentType,
                        version : Version.ToVersionString(),
                        async : async,
                        operationName : $"{nameof(BlobBatchClient)}.{nameof(SubmitBatch)}",
                        cancellationToken : cancellationToken)
                                  .ConfigureAwait(false);
                }

                // Split the responses apart and update the sub-operation responses
                Response raw = batchResult.GetRawResponse();
                await UpdateOperationResponses(
                    messages,
                    raw,
                    batchResult.Value.Content,
                    batchResult.Value.ContentType,
                    throwOnAnyFailure,
                    async,
                    cancellationToken)
                .ConfigureAwait(false);

                // Return the batch result
                return(raw);
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
        public Response DownloadTo(
            Stream destination,
            BlobRequestConditions conditions,
            CancellationToken cancellationToken)
        {
            // Wrap the download range calls in a Download span for distributed
            // tracing
            DiagnosticScope scope = _client.ClientDiagnostics.CreateScope($"{nameof(BlobBaseClient)}.{nameof(BlobBaseClient.DownloadTo)}");

            try
            {
                scope.Start();

                // Just start downloading using an initial range.  If it's a
                // small blob, we'll get the whole thing in one shot.  If it's
                // a large blob, we'll get its full size in Content-Range and
                // can keep downloading it in segments.
                var initialRange = new HttpRange(0, _initialRangeSize);
                Response <BlobDownloadInfo> initialResponse = _client.Download(
                    initialRange,
                    conditions,
                    rangeGetContentHash: false,
                    cancellationToken);

                // If the initial request returned no content (i.e., a 304),
                // we'll pass that back to the user immediately
                if (initialResponse.IsUnavailable())
                {
                    return(initialResponse.GetRawResponse());
                }

                // Copy the first segment to the destination stream
                CopyTo(initialResponse, destination, cancellationToken);

                // If the first segment was the entire blob, we're finished now
                long initialLength = initialResponse.Value.ContentLength;
                long totalLength   = ParseRangeTotalLength(initialResponse.Value.Details.ContentRange);
                if (initialLength == totalLength)
                {
                    return(initialResponse.GetRawResponse());
                }

                // Capture the etag from the first segment and construct
                // conditions to ensure the blob doesn't change while we're
                // downloading the remaining segments
                ETag etag = initialResponse.Value.Details.ETag;
                BlobRequestConditions conditionsWithEtag = CreateConditionsWithEtag(conditions, etag);

                // Download each of the remaining ranges in the blob
                foreach (HttpRange httpRange in GetRanges(initialLength, totalLength))
                {
                    // Don't need to worry about 304s here because the ETag
                    // condition will turn into a 412 and throw a proper
                    // RequestFailedException
                    Response <BlobDownloadInfo> result = _client.Download(
                        httpRange,
                        conditionsWithEtag,
                        rangeGetContentHash: false,
                        cancellationToken);
                    CopyTo(result.Value, destination, cancellationToken);
                }

                return(initialResponse.GetRawResponse());
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
        public async Task <Response> DownloadToAsync(
            Stream destination,
            BlobRequestConditions conditions,
            CancellationToken cancellationToken)
        {
            // Wrap the download range calls in a Download span for distributed
            // tracing
            DiagnosticScope scope = _client.ClientDiagnostics.CreateScope($"{nameof(BlobBaseClient)}.{nameof(BlobBaseClient.DownloadTo)}");

            try
            {
                scope.Start();

                // Just start downloading using an initial range.  If it's a
                // small blob, we'll get the whole thing in one shot.  If it's
                // a large blob, we'll get its full size in Content-Range and
                // can keep downloading it in segments.
                var initialRange = new HttpRange(0, _initialRangeSize);
                Task <Response <BlobDownloadInfo> > initialResponseTask =
                    _client.DownloadAsync(
                        initialRange,
                        conditions,
                        rangeGetContentHash: false,
                        cancellationToken);
                Response <BlobDownloadInfo> initialResponse =
                    await initialResponseTask.ConfigureAwait(false);

                // If the initial request returned no content (i.e., a 304),
                // we'll pass that back to the user immediately
                if (initialResponse.IsUnavailable())
                {
                    return(initialResponse.GetRawResponse());
                }

                // If the first segment was the entire blob, we'll copy that to
                // the output stream and finish now
                long initialLength = initialResponse.Value.ContentLength;
                long totalLength   = ParseRangeTotalLength(initialResponse.Value.Details.ContentRange);
                if (initialLength == totalLength)
                {
                    await CopyToAsync(
                        initialResponse,
                        destination,
                        cancellationToken)
                    .ConfigureAwait(false);

                    return(initialResponse.GetRawResponse());
                }

                // Capture the etag from the first segment and construct
                // conditions to ensure the blob doesn't change while we're
                // downloading the remaining segments
                ETag etag = initialResponse.Value.Details.ETag;
                BlobRequestConditions conditionsWithEtag = CreateConditionsWithEtag(conditions, etag);

                // Create a queue of tasks that will each download one segment
                // of the blob.  The queue maintains the order of the segments
                // so we can keep appending to the end of the destination
                // stream when each segment finishes.
                var runningTasks = new Queue <Task <Response <BlobDownloadInfo> > >();
                runningTasks.Enqueue(initialResponseTask);

                // Fill the queue with tasks to download each of the remaining
                // ranges in the blob
                foreach (HttpRange httpRange in GetRanges(initialLength, totalLength))
                {
                    // Add the next Task (which will start the download but
                    // return before it's completed downloading)
                    runningTasks.Enqueue(_client.DownloadAsync(
                                             httpRange,
                                             conditionsWithEtag,
                                             rangeGetContentHash: false,
                                             cancellationToken));

                    // If we have fewer tasks than alotted workers, then just
                    // continue adding tasks until we have _maxWorkerCount
                    // running in parallel
                    if (runningTasks.Count < _maxWorkerCount)
                    {
                        continue;
                    }

                    // Once all the workers are busy, wait for the first
                    // segment to finish downloading before we create more work
                    await ConsumeQueuedTask().ConfigureAwait(false);
                }

                // Wait for all of the remaining segments to download
                while (runningTasks.Count > 0)
                {
                    await ConsumeQueuedTask().ConfigureAwait(false);
                }

                return(initialResponse.GetRawResponse());

                // Wait for the first segment in the queue of tasks to complete
                // downloading and copy it to the destination stream
                async Task ConsumeQueuedTask()
                {
                    // Don't need to worry about 304s here because the ETag
                    // condition will turn into a 412 and throw a proper
                    // RequestFailedException
                    using BlobDownloadInfo result =
                              await runningTasks.Dequeue().ConfigureAwait(false);

                    // Even though the BlobDownloadInfo is returned immediately,
                    // CopyToAsync causes ConsumeQueuedTask to wait until the
                    // download is complete
                    await CopyToAsync(
                        result,
                        destination,
                        cancellationToken)
                    .ConfigureAwait(false);
                }
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
        /// <summary>
        ///   Starts running a task responsible for receiving and processing events in the context of a specified partition.
        /// </summary>
        ///
        /// <param name="partitionId">The identifier of the Event Hub partition the task is associated with.  Events will be read only from this partition.</param>
        /// <param name="startingPosition">The position within the partition where the task should begin reading events.</param>
        /// <param name="maximumReceiveWaitTime">The maximum amount of time to wait to for an event to be available before emitting an empty item; if <c>null</c>, empty items will not be published.</param>
        /// <param name="retryOptions">The set of options to use for determining whether a failed operation should be retried and, if so, the amount of time to wait between retry attempts.</param>
        /// <param name="trackLastEnqueuedEventProperties">Indicates whether or not the task should request information on the last enqueued event on the partition associated with a given event, and track that information as events are received.</param>
        /// <param name="cancellationToken">An optional <see cref="CancellationToken"/> instance to signal the request to cancel the operation.</param>
        ///
        /// <returns>The running task that is currently receiving and processing events in the context of the specified partition.</returns>
        ///
        protected virtual Task RunPartitionProcessingAsync(string partitionId,
                                                           EventPosition startingPosition,
                                                           TimeSpan?maximumReceiveWaitTime,
                                                           EventHubsRetryOptions retryOptions,
                                                           bool trackLastEnqueuedEventProperties,
                                                           CancellationToken cancellationToken = default)
        {
            // TODO: should the retry options used here be the same for the abstract RetryPolicy property?

            Argument.AssertNotNullOrEmpty(partitionId, nameof(partitionId));
            Argument.AssertNotNull(retryOptions, nameof(retryOptions));

            return(Task.Run(async() =>
            {
                // TODO: should we double check if a previous run already exists and close it?  We have a race condition.  Maybe we should throw in case another task exists.

                var cancellationSource = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken);
                var taskCancellationToken = cancellationSource.Token;

                ActivePartitionProcessorTokenSources[partitionId] = cancellationSource;

                // Context is set to default if operation fails.  This shouldn't fail unless the user tries processing
                // a partition they don't own.

                PartitionContexts.TryGetValue(partitionId, out var context);

                var clientOptions = new EventHubConsumerClientOptions
                {
                    RetryOptions = retryOptions
                };

                var readOptions = new ReadEventOptions
                {
                    MaximumWaitTime = maximumReceiveWaitTime,
                    TrackLastEnqueuedEventProperties = trackLastEnqueuedEventProperties
                };

                await using var connection = CreateConnection();

                await using (var consumer = new EventHubConsumerClient(ConsumerGroup, connection, clientOptions))
                {
                    await foreach (var partitionEvent in consumer.ReadEventsFromPartitionAsync(partitionId, startingPosition, readOptions, taskCancellationToken))
                    {
                        using DiagnosticScope diagnosticScope = EventDataInstrumentation.ClientDiagnostics.CreateScope(DiagnosticProperty.EventProcessorProcessingActivityName);
                        diagnosticScope.AddAttribute("kind", "server");

                        if (diagnosticScope.IsEnabled &&
                            partitionEvent.Data != null &&
                            EventDataInstrumentation.TryExtractDiagnosticId(partitionEvent.Data, out string diagnosticId))
                        {
                            diagnosticScope.AddLink(diagnosticId);
                        }

                        diagnosticScope.Start();

                        try
                        {
                            await ProcessEventAsync(partitionEvent, context).ConfigureAwait(false);
                        }
                        catch (Exception eventProcessingException)
                        {
                            diagnosticScope.Failed(eventProcessingException);
                            throw;
                        }
                    }
                }
            }));
        }
示例#18
0
        public async Task <FetchResult> FetchAsync(
            string dtmi, Uri repositoryUri, DependencyResolutionOption resolutionOption, CancellationToken cancellationToken = default)
        {
            using DiagnosticScope scope = _clientDiagnostics.CreateScope("RemoteModelFetcher.Fetch");
            scope.Start();
            try
            {
                Queue <string> work = PrepareWork(dtmi, repositoryUri, resolutionOption == DependencyResolutionOption.TryFromExpanded);

                string remoteFetchError = string.Empty;
                RequestFailedException requestFailedExceptionThrown = null;
                Exception genericExceptionThrown = null;

                while (work.Count != 0)
                {
                    cancellationToken.ThrowIfCancellationRequested();

                    string tryContentPath = work.Dequeue();
                    ModelsRepositoryEventSource.Instance.FetchingModelContent(tryContentPath);

                    try
                    {
                        string content = await EvaluatePathAsync(tryContentPath, cancellationToken).ConfigureAwait(false);

                        return(new FetchResult()
                        {
                            Definition = content,
                            Path = tryContentPath
                        });
                    }
                    catch (RequestFailedException ex)
                    {
                        requestFailedExceptionThrown = ex;
                    }
                    catch (Exception ex)
                    {
                        genericExceptionThrown = ex;
                    }

                    if (genericExceptionThrown != null || requestFailedExceptionThrown != null)
                    {
                        remoteFetchError =
                            $"{string.Format(CultureInfo.InvariantCulture, StandardStrings.GenericGetModelsError, dtmi)} " +
                            string.Format(CultureInfo.InvariantCulture, StandardStrings.ErrorFetchingModelContent, tryContentPath);
                    }
                }

                if (requestFailedExceptionThrown != null)
                {
                    throw new RequestFailedException(
                              requestFailedExceptionThrown.Status,
                              remoteFetchError,
                              requestFailedExceptionThrown.ErrorCode,
                              requestFailedExceptionThrown);
                }
                else
                {
                    throw new RequestFailedException(
                              (int)HttpStatusCode.BadRequest,
                              remoteFetchError,
                              null,
                              genericExceptionThrown);
                }
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
        }
示例#19
0
        /// <summary>
        /// Attest an Intel SGX enclave.
        /// </summary>
        /// <param name="request">Aggregate type containing the information needed to perform an attestation operation.</param>
        /// <param name="async">true if the API call should be asynchronous, false otherwise.</param>
        /// <param name="cancellationToken">Cancellation token used to cancel the request.</param>
        /// <returns>An <see cref="AttestationResponse{AttestationResult}"/> which contains the validated claims for the supplied <paramref name="request"/></returns>
        /// <remarks>The <see cref="AttestationRequest.Evidence"/> must be an Intel SGX Quote.
        /// <seealso href="https://software.intel.com/content/www/us/en/develop/articles/code-sample-intel-software-guard-extensions-remote-attestation-end-to-end-example.html"/>  for more information.
        ///</remarks>
        private async Task <AttestationResponse <AttestationResult> > AttestSgxEnclaveInternal(AttestationRequest request, bool async, CancellationToken cancellationToken = default)
        {
            Argument.AssertNotNull(request, nameof(request));
            Argument.AssertNotNull(request.Evidence, nameof(request.Evidence));
            using DiagnosticScope scope = _clientDiagnostics.CreateScope($"{nameof(AttestationClient)}.{nameof(AttestSgxEnclave)}");
            scope.Start();
            try
            {
                var attestSgxEnclaveRequest = new AttestSgxEnclaveRequest
                {
                    Quote = request.Evidence.ToArray(),
                    DraftPolicyForAttestation = request.DraftPolicyForAttestation,
                };

                if (request.InittimeData != null)
                {
                    attestSgxEnclaveRequest.InitTimeData = new InitTimeData
                    {
                        Data     = request.InittimeData.BinaryData.ToArray(),
                        DataType = request.InittimeData.DataIsJson ? DataType.Json : DataType.Binary,
                    };
                }
                else
                {
                    attestSgxEnclaveRequest.InitTimeData = null;
                }

                if (request.RuntimeData != null)
                {
                    attestSgxEnclaveRequest.RuntimeData = new RuntimeData
                    {
                        Data     = request.RuntimeData.BinaryData.ToArray(),
                        DataType = request.RuntimeData.DataIsJson ? DataType.Json : DataType.Binary,
                    };
                }
                else
                {
                    attestSgxEnclaveRequest.RuntimeData = null;
                }

                Response <AttestationResponse> response;
                if (async)
                {
                    response = await _restClient.AttestSgxEnclaveAsync(attestSgxEnclaveRequest, cancellationToken).ConfigureAwait(false);
                }
                else
                {
                    response = _restClient.AttestSgxEnclave(attestSgxEnclaveRequest, cancellationToken);
                }
                var attestationToken = AttestationToken.Deserialize(response.Value.Token, _clientDiagnostics);
                if (_options.TokenOptions.ValidateToken)
                {
                    var signers = await GetSignersAsync(async, cancellationToken).ConfigureAwait(false);

                    if (!await attestationToken.ValidateTokenInternal(_options.TokenOptions, signers, async, cancellationToken).ConfigureAwait(false))
                    {
                        AttestationTokenValidationFailedException.ThrowFailure(signers, attestationToken);
                    }
                }

                return(new AttestationResponse <AttestationResult>(response.GetRawResponse(), attestationToken));
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
        }
示例#20
0
        private async Task <Response <BlobContentInfo> > UploadInParallelAsync(
            Stream content,
            int blockSize,
            BlobHttpHeaders blobHttpHeaders,
            IDictionary <string, string> metadata,
            BlobRequestConditions conditions,
            IProgress <long> progressHandler,
            AccessTier?accessTier,
            CancellationToken cancellationToken)
        {
            // Wrap the staging and commit calls in an Upload span for
            // distributed tracing
            DiagnosticScope scope = _client.ClientDiagnostics.CreateScope(
                _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Blobs)}.{nameof(BlobClient)}.{nameof(BlobClient.Upload)}");

            try
            {
                scope.Start();

                // Wrap progressHandler in a AggregatingProgressIncrementer to prevent
                // progress from being reset with each stage blob operation.
                if (progressHandler != null)
                {
                    progressHandler = new AggregatingProgressIncrementer(progressHandler);
                }

                // The list tracking blocks IDs we're going to commit
                List <string> blockIds = new List <string>();

                // A list of tasks that are currently executing which will
                // always be smaller than _maxWorkerCount
                List <Task> runningTasks = new List <Task>();

                // Partition the stream into individual blocks
                await foreach (ChunkedStream block in PartitionedUploadExtensions.GetBlocksAsync(
                                   content,
                                   blockSize,
                                   async: true,
                                   _arrayPool,
                                   cancellationToken).ConfigureAwait(false))
                {
                    // Start staging the next block (but don't await the Task!)
                    string blockId = GenerateBlockId(block.AbsolutePosition);
                    Task   task    = StageBlockAsync(
                        block,
                        blockId,
                        conditions,
                        progressHandler,
                        cancellationToken);

                    // Add the block to our task and commit lists
                    runningTasks.Add(task);
                    blockIds.Add(blockId);

                    // If we run out of workers
                    if (runningTasks.Count >= _maxWorkerCount)
                    {
                        // Wait for at least one of them to finish
                        await Task.WhenAny(runningTasks).ConfigureAwait(false);

                        // Clear any completed blocks from the task list
                        for (int i = 0; i < runningTasks.Count; i++)
                        {
                            Task runningTask = runningTasks[i];
                            if (!runningTask.IsCompleted)
                            {
                                continue;
                            }

                            await runningTask.ConfigureAwait(false);

                            runningTasks.RemoveAt(i);
                            i--;
                        }
                    }
                }

                // Wait for all the remaining blocks to finish staging and then
                // commit the block list to complete the upload
                await Task.WhenAll(runningTasks).ConfigureAwait(false);

                return(await _client.CommitBlockListAsync(
                           blockIds,
                           blobHttpHeaders,
                           metadata,
                           conditions,
                           accessTier,
                           cancellationToken)
                       .ConfigureAwait(false));
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
示例#21
0
        private Response <PathInfo> UploadInSequence(
            Stream content,
            int blockSize,
            PathHttpHeaders httpHeaders,
            DataLakeRequestConditions conditions,
            IProgress <long> progressHandler,
            CancellationToken cancellationToken)
        {
            // Wrap the append and flush calls in an Upload span for
            // distributed tracing
            DiagnosticScope scope = _client.ClientDiagnostics.CreateScope(
                _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Files)}.{nameof(DataLake)}.{nameof(DataLakeFileClient)}.{nameof(DataLakeFileClient.Upload)}");

            try
            {
                scope.Start();

                // Wrap progressHandler in a AggregatingProgressIncrementer to prevent
                // progress from being reset with each append file operation.
                if (progressHandler != null)
                {
                    progressHandler = new AggregatingProgressIncrementer(progressHandler);
                }

                // Partition the stream into individual blocks and stage them
                IAsyncEnumerator <ChunkedStream> enumerator =
                    PartitionedUploadExtensions.GetBlocksAsync(
                        content, blockSize, async: false, _arrayPool, cancellationToken)
                    .GetAsyncEnumerator(cancellationToken);

                // We need to keep track of how much data we have appended to
                // calculate offsets for the next appends, and the final
                // position to flush
                long appendedBytes = 0;

                while (enumerator.MoveNextAsync().EnsureCompleted())
                {
                    // Dispose the block after the loop iterates and return its
                    // memory to our ArrayPool
                    using ChunkedStream block = enumerator.Current;

                    // Append the next block
                    _client.Append(
                        new MemoryStream(block.Bytes, 0, block.Length, writable: false),
                        offset: appendedBytes,
                        leaseId: conditions?.LeaseId,
                        progressHandler: progressHandler,
                        cancellationToken: cancellationToken);

                    appendedBytes += block.Length;
                }

                // Commit the block list after everything has been staged to
                // complete the upload
                return(_client.Flush(
                           position: appendedBytes,
                           httpHeaders: httpHeaders,
                           conditions: conditions,
                           cancellationToken: cancellationToken));
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
示例#22
0
        private async Task <Response <PathInfo> > UploadInParallelAsync(
            Stream content,
            int blockSize,
            PathHttpHeaders httpHeaders,
            DataLakeRequestConditions conditions,
            IProgress <long> progressHandler,
            CancellationToken cancellationToken)

        {
            // Wrap the staging and commit calls in an Upload span for
            // distributed tracing
            DiagnosticScope scope = _client.ClientDiagnostics.CreateScope(
                _operationName ?? $"{nameof(Azure)}.{nameof(Storage)}.{nameof(Files)}.{nameof(DataLake)}.{nameof(DataLakeFileClient)}.{nameof(DataLakeFileClient.Upload)}");

            try
            {
                scope.Start();

                // Wrap progressHandler in a AggregatingProgressIncrementer to prevent
                // progress from being reset with each stage blob operation.
                if (progressHandler != null)
                {
                    progressHandler = new AggregatingProgressIncrementer(progressHandler);
                }

                // A list of tasks that are currently executing which will
                // always be smaller than _maxWorkerCount
                List <Task> runningTasks = new List <Task>();

                // We need to keep track of how much data we have appended to
                // calculate offsets for the next appends, and the final
                // position to flush
                long appendedBytes = 0;

                // Partition the stream into individual blocks
                await foreach (ChunkedStream block in PartitionedUploadExtensions.GetBlocksAsync(
                                   content, blockSize, async: true, _arrayPool, cancellationToken).ConfigureAwait(false))
                {
                    // Start appending the next block (but don't await the Task!)
                    Task task = AppendBlockAsync(
                        block,
                        appendedBytes,
                        conditions?.LeaseId,
                        progressHandler,
                        cancellationToken);

                    // Add the block to our task and commit lists
                    runningTasks.Add(task);

                    appendedBytes += block.Length;

                    // If we run out of workers
                    if (runningTasks.Count >= _maxWorkerCount)
                    {
                        // Wait for at least one of them to finish
                        await Task.WhenAny(runningTasks).ConfigureAwait(false);

                        // Clear any completed blocks from the task list
                        for (int i = 0; i < runningTasks.Count; i++)
                        {
                            Task runningTask = runningTasks[i];
                            if (!runningTask.IsCompleted)
                            {
                                continue;
                            }

                            await runningTask.ConfigureAwait(false);

                            runningTasks.RemoveAt(i);
                            i--;
                        }
                    }
                }

                // Wait for all the remaining blocks to finish staging and then
                // commit the block list to complete the upload
                await Task.WhenAll(runningTasks).ConfigureAwait(false);

                return(await _client.FlushAsync(
                           position : appendedBytes,
                           httpHeaders : httpHeaders,
                           conditions : conditions,
                           cancellationToken : cancellationToken)
                       .ConfigureAwait(false));
            }
            catch (Exception ex)
            {
                scope.Failed(ex);
                throw;
            }
            finally
            {
                scope.Dispose();
            }
        }
 private void RegisterFailed(Exception ex)
 {
     AzureIdentityEventSource.Singleton.GetTokenFailed(_name, _context, ex);
     _scope.Failed(ex);
 }