public virtual async Task <(Result Result, TimeSpan Elapsed)> InvokeAsync(string actionName, string job) { var actionWatch = ValueStopwatch.StartNew(); Result result; try { var actionType = typeNameRegistry.GetType(actionName); var actionHandler = ruleActionHandlers[actionType]; var deserialized = jsonSerializer.Deserialize <object>(job, actionHandler.DataType); using (var cts = new CancellationTokenSource(GetTimeoutInMs())) { result = await actionHandler.ExecuteJobAsync(deserialized, cts.Token).WithCancellation(cts.Token); } } catch (Exception ex) { result = Result.Failed(ex); } var elapsed = TimeSpan.FromMilliseconds(actionWatch.Stop()); result.Enrich(elapsed); return(result, elapsed); }
public void ConnectionStop() { // Arrange var expectedEventId = 2; var eventListener = new TestEventListener(expectedEventId); var httpConnectionsEventSource = GetHttpConnectionEventSource(); eventListener.EnableEvents(httpConnectionsEventSource, EventLevel.Informational); // Act var stopWatch = ValueStopwatch.StartNew(); httpConnectionsEventSource.ConnectionStop("1", stopWatch); // Assert var eventData = eventListener.EventData; Assert.NotNull(eventData); Assert.Equal(expectedEventId, eventData.EventId); Assert.Equal("ConnectionStop", eventData.EventName); Assert.Equal(EventLevel.Informational, eventData.Level); Assert.Same(httpConnectionsEventSource, eventData.EventSource); Assert.Equal("Stopped connection '{0}'.", eventData.Message); Assert.Collection(eventData.Payload, arg => { Assert.Equal("1", arg); }); }
public async Task LargeDirectory_NoScannerMatch() { var stopwatch = ValueStopwatch.StartNew(); await using var directory = TemporaryDirectory.Create(); const int FileCount = 10_000; for (var i = 0; i < FileCount; i++) { await File.WriteAllTextAsync(directory.GetFullPath($"text{i.ToStringInvariant()}.txt"), ""); } _testOutputHelper.WriteLine("File generated in " + stopwatch.GetElapsedTime()); stopwatch = ValueStopwatch.StartNew(); var items = new List <Dependency>(FileCount); await foreach (var item in DependencyScanner.ScanDirectoryAsync(directory.FullPath, new ScannerOptions { Scanners = new[] { new DummyScannerNeverMatch() } })) { items.Add(item); } _testOutputHelper.WriteLine("File scanned in " + stopwatch.GetElapsedTime()); Assert.Empty(items); }
private async Task <CloudBlobContainer> CreateCloudBlobContainer( DataProtectionAzureStorageOptions options, CloudStorageAccount cloudStorageAccount, CancellationToken cancellationToken = default) { var sw = ValueStopwatch.StartNew(); var cloudBlobClient = cloudStorageAccount.CreateCloudBlobClient(); var cloudBlobContainer = cloudBlobClient.GetContainerReference(options.ContainerName); var created = await cloudBlobContainer.CreateIfNotExistsAsync(cancellationToken); if (created) { _logger.LogInformation("[Azure Blob][DataProtection] No Azure Blob [{blobName}] found - so one was auto created.", options.ContainerName); } else { _logger.LogInformation("[Azure Blob][DataProtection] Using existing Azure Blob:[{blobName}].", options.ContainerName); } _logger.LogInformation("[Azure Blob][DataProtection] Completed: {methodName}; Elapsed: {elapsed}sec", nameof(CreateCloudBlobContainer), sw.GetElapsedTime().TotalSeconds); return(cloudBlobContainer); }
/// <summary> /// Get ID for all partitions of a service. If the partition do not fit in a page, one /// page of results is returned as well as a continuation token which can be used to get the next page. Let PartitionFilter to be null because we are getting all partition. /// </summary> public async Task <IEnumerable <Guid> > GetPartitionListAsync(Uri serviceName, TimeSpan timeout, CancellationToken cancellationToken) { var partitionList = new List <Guid>(); ServicePartitionList previousResult = null; // Set up the counter that record the time lapse. var stopWatch = ValueStopwatch.StartNew(); do { cancellationToken.ThrowIfCancellationRequested(); var remaining = timeout - stopWatch.Elapsed; if (remaining.Ticks < 0) { // If the passing time is longer than the timeout duration. throw new TimeoutException($"Unable to enumerate all partition pages in the allotted time budget of {timeout.TotalSeconds} seconds"); } previousResult = await ExceptionsHelper.TranslateCancellations( () => _queryClient.GetPartitionListAsync( serviceName: serviceName, partitionIdFilter: null, continuationToken: previousResult?.ContinuationToken, timeout: remaining, cancellationToken: cancellationToken), cancellationToken); foreach (var partition in previousResult) { partitionList.Add(partition.PartitionInformation.Id); } }while (!string.IsNullOrEmpty(previousResult?.ContinuationToken)); return(partitionList); }
public async Task <IndirectProbeResponse> ProbeIndirectly(SiloAddress target, TimeSpan probeTimeout, int probeNumber) { IndirectProbeResponse result; var healthScore = this.ActivationServices.GetRequiredService <LocalSiloHealthMonitor>().GetLocalHealthDegradationScore(DateTime.UtcNow); var probeResponseTimer = ValueStopwatch.StartNew(); try { var probeTask = this.ProbeInternal(target, probeNumber); await probeTask.WithTimeout(probeTimeout, exceptionMessage : $"Requested probe timeout {probeTimeout} exceeded"); result = new IndirectProbeResponse { Succeeded = true, IntermediaryHealthScore = healthScore, ProbeResponseTime = probeResponseTimer.Elapsed, }; } catch (Exception exception) { result = new IndirectProbeResponse { Succeeded = false, IntermediaryHealthScore = healthScore, FailureMessage = $"Encountered exception {LogFormatter.PrintException(exception)}", ProbeResponseTime = probeResponseTimer.Elapsed, }; } return(result); }
internal async void ConnectToPeer(TorrentManager manager, Peer peer) { // Connect to the peer. IConnection connection = ConnectionFactory.Create(peer.ConnectionUri); if (connection == null) { return; } var state = new AsyncConnectState(manager, connection, ValueStopwatch.StartNew()); PendingConnects.Add(state); manager.Peers.ConnectingToPeers.Add(peer); bool succeeded; try { await NetworkIO.ConnectAsync(connection); succeeded = true; } catch { succeeded = false; } PendingConnects.Remove(state); manager.Peers.ConnectingToPeers.Remove(peer); if (manager.Engine == null || !manager.Mode.CanAcceptConnections) { manager.Peers.AvailablePeers.Add(peer); connection.Dispose(); return; } try { if (!succeeded) { peer.FailedConnectionAttempts++; connection.Dispose(); manager.Peers.BusyPeers.Add(peer); manager.RaiseConnectionAttemptFailed(new ConnectionAttemptFailedEventArgs(peer, ConnectionFailureReason.Unreachable, manager)); } else { PeerId id = new PeerId(peer, connection, manager.Bitfield?.Clone().SetAll(false)); id.LastMessageReceived.Restart(); id.LastMessageSent.Restart(); Logger.Log(id.Connection, "ConnectionManager - Connection opened"); ProcessNewOutgoingConnection(manager, id); } } catch { // FIXME: Do nothing now? } finally { // Try to connect to another peer TryConnect(); } }
/// <summary> /// Gets the details for all applications or for a specific application created in the system. /// Also takes in timeout interval, which is the maximum of time the system will allow this operation to continue before returning. /// </summary> public async Task <IEnumerable <ApplicationWrapper> > GetApplicationListAsync(Uri applicationNameFilter, TimeSpan timeout, CancellationToken cancellationToken) { var applicationList = new List <ApplicationWrapper>(); ApplicationList previousResult = null; // Set up the counter that record the time lapse. var stopWatch = ValueStopwatch.StartNew(); do { cancellationToken.ThrowIfCancellationRequested(); var remaining = timeout - stopWatch.Elapsed; if (remaining.Ticks < 0) { // If the passing time is longer than the timeout duration. throw new TimeoutException($"Unable to enumerate all application pages in the allotted time budget of {timeout.TotalSeconds} seconds"); } previousResult = await ExceptionsHelper.TranslateCancellations( () => _queryClient.GetApplicationListAsync( applicationNameFilter: applicationNameFilter, continuationToken: previousResult?.ContinuationToken, timeout: remaining, cancellationToken: cancellationToken), cancellationToken); applicationList.AddRange(previousResult.Select(MapApp)); }while (!string.IsNullOrEmpty(previousResult?.ContinuationToken)); return(applicationList); ApplicationWrapper MapApp(Application app) => new ApplicationWrapper { ApplicationName = app.ApplicationName, ApplicationTypeName = app.ApplicationTypeName, ApplicationTypeVersion = app.ApplicationTypeVersion, ApplicationParameters = MapAppParameters(app), }; IDictionary <string, string> MapAppParameters(Application app) { // NOTE: App Params in Service Fabric are case insensitive (verified on version 7.0.457.9590). // Since this is not documented behavior, the code below tries to play it safe by ignoring // duplicated app params instead of throwing and preventing such service from working at all // behind the Proxy. var result = new Dictionary <string, string>(StringComparer.OrdinalIgnoreCase); foreach (var param in app.ApplicationParameters) { if (!result.TryAdd(param.Name, param.Value)) { Log.DuplicateAppParameter(_logger, param.Name, app.ApplicationName); } } return(result); } }
public async Task InvokeAsync(HttpContext context, RequestDelegate next) { var usageBody = SetUsageBody(context); var watch = ValueStopwatch.StartNew(); try { await next(context); } finally { if (context.Response.StatusCode != StatusCodes.Status429TooManyRequests) { var appId = context.Features.Get <IAppFeature>()?.App.Id; if (appId != null) { var bytes = usageBody.BytesWritten; if (context.Request.ContentLength != null) { bytes += context.Request.ContentLength.Value; } var(_, clientId) = context.User.GetClient(); var request = default(RequestLog); request.Bytes = bytes; request.CacheStatus = "MISS"; request.CacheHits = 0; request.Costs = context.Features.Get <IApiCostsFeature>()?.Costs ?? 0; request.ElapsedMs = watch.Stop(); request.RequestMethod = context.Request.Method; request.RequestPath = context.Request.Path; request.Timestamp = clock.GetCurrentInstant(); request.StatusCode = context.Response.StatusCode; request.UserId = context.User.OpenIdSubject(); request.UserClientId = clientId; #pragma warning disable MA0040 // Flow the cancellation token await usageLog.LogAsync(appId.Value, request); if (request.Costs > 0) { var date = request.Timestamp.ToDateTimeUtc().Date; await usageTracker.TrackAsync(date, appId.Value.ToString(), request.UserClientId, request.Costs, request.ElapsedMs, request.Bytes); } #pragma warning restore MA0040 // Flow the cancellation token } } } }
public static async Task <double> ProfileAsync <T1, T2, T3>(Func <T1, T2, T3, Task> func, T1 t1, T2 t2, T3 t3) { var stopwatch = ValueStopwatch.StartNew(); await func(t1, t2, t3); return(stopwatch.Elapsed.TotalMilliseconds); }
public static async Task <double> ProfileAsync <T>(Func <T, Task> func, T t) { var stopwatch = ValueStopwatch.StartNew(); await func(t); return(stopwatch.Elapsed.TotalMilliseconds); }
public static async Task <double> ProfileAsync(Func <Task> action) { var stopwatch = ValueStopwatch.StartNew(); await action(); return(stopwatch.Elapsed.TotalMilliseconds); }
protected override void ResetCore(TimeSpan?interval) { this.stopwatch = ValueStopwatch.StartNew(); if (interval != null) { this.intervalTicks = interval.Value.Ticks; } }
public static double Profile(Action action) { Guard.NotNull(action, nameof(action)); var stopwatch = ValueStopwatch.StartNew(); action(); return(stopwatch.Elapsed.TotalMilliseconds); }
public static double Profile <T1, T2>(Action <T1, T2> action, T1 t1, T2 t2) { Guard.NotNull(action, nameof(action)); var stopwatch = ValueStopwatch.StartNew(); action(t1, t2); return(stopwatch.Elapsed.TotalMilliseconds); }
/// <summary> /// Executed each tick of the client engine /// </summary> public void UnchokeReview() { int interestedCount = 0; int unchokedCount = 0; chokedInterestedPeers.Clear(); // Run a review even if we can unchoke all the peers who are currently choked. If more // peers become interested in the future we will need the results of a review to // choose the 'best' one. if (!timeSinceLastReview.IsRunning || timeSinceLastReview.Elapsed >= minimumTimeBetweenReviews) { //Based on the time of the last review, a new review is due //There are more interested peers than available upload slots //If we're downloading, the download rate is insufficient to skip the review //If we're seeding, the upload rate is insufficient to skip the review //So, we need a review ExecuteReview(); timeSinceLastReview = ValueStopwatch.StartNew(); } // The review may have already unchoked peers. Bail early // if all the slots are full. foreach (var peer in Unchokeable.Peers) { // Choke any unchoked peers which are no longer interested if (!peer.IsInterested && !peer.AmChoking) { Choke(peer); } else if (peer.IsInterested) { interestedCount++; if (peer.AmChoking) { chokedInterestedPeers.Add(peer); } else { unchokedCount++; } } } if (interestedCount > 0 && interestedCount <= Unchokeable.UploadSlots || Unchokeable.UploadSlots == 0) { // We have enough slots to satisfy everyone, so unchoke them all foreach (var peer in chokedInterestedPeers) { Unchoke(peer); } } else { // Allocate slots based off the most recent review AllocateSlots(unchokedCount); } }
async void ConnectToPeer(TorrentManager manager, Peer peer) { // Connect to the peer. var connection = Factories.CreatePeerConnection(peer.ConnectionUri); if (connection == null || peer.AllowedEncryption.Count == 0) { return; } var state = new AsyncConnectState(manager, connection, ValueStopwatch.StartNew()); PendingConnects.Add(state); manager.Peers.ConnectingToPeers.Add(peer); bool succeeded; try { await NetworkIO.ConnectAsync(connection); succeeded = true; } catch { succeeded = false; } PendingConnects.Remove(state); manager.Peers.ConnectingToPeers.Remove(peer); if (manager.Disposed || !manager.Mode.CanAcceptConnections) { manager.Peers.AvailablePeers.Add(peer); connection.Dispose(); return; } try { if (!succeeded) { peer.FailedConnectionAttempts++; connection.Dispose(); manager.RaiseConnectionAttemptFailed(new ConnectionAttemptFailedEventArgs(peer, ConnectionFailureReason.Unreachable, manager)); } else { var id = new PeerId(peer, connection, new MutableBitField(manager.Bitfield.Length).SetAll(false)); id.LastMessageReceived.Restart(); id.LastMessageSent.Restart(); logger.Info(id.Connection, "Connection opened"); ProcessNewOutgoingConnection(manager, id); } } catch { // FIXME: Do nothing now? } finally { // Try to connect to another peer TryConnect(); } }
private async Task <IViewComponentResult> InvokeAsyncCore(ObjectMethodExecutor executor, object component, ViewComponentContext context) { using (Log.ViewComponentScope(_logger, context)) { var arguments = PrepareArguments(context.Arguments, executor); _diagnosticListener.BeforeViewComponent(context, component); Log.ViewComponentExecuting(_logger, context, arguments); var stopwatch = ValueStopwatch.StartNew(); object resultAsObject; var returnType = executor.MethodReturnType; if (returnType == typeof(Task <IViewComponentResult>)) { var task = executor.Execute(component, arguments); if (task is null) { throw new InvalidOperationException(Resources.ViewComponent_MustReturnValue); } resultAsObject = await(Task <IViewComponentResult>) task; } else if (returnType == typeof(Task <string>)) { var task = executor.Execute(component, arguments); if (task is null) { throw new InvalidOperationException(Resources.ViewComponent_MustReturnValue); } resultAsObject = await(Task <string>) task; } else if (returnType == typeof(Task <IHtmlContent>)) { var task = executor.Execute(component, arguments); if (task is null) { throw new InvalidOperationException(Resources.ViewComponent_MustReturnValue); } resultAsObject = await(Task <IHtmlContent>) task; } else { resultAsObject = await executor.ExecuteAsync(component, arguments); } var viewComponentResult = CoerceToViewComponentResult(resultAsObject); Log.ViewComponentExecuted(_logger, context, stopwatch.GetElapsedTime(), viewComponentResult); _diagnosticListener.AfterViewComponent(context, viewComponentResult, component); return(viewComponentResult); } }
private async Task RunAsync() { var sw = ValueStopwatch.StartNew(); CancellationTokenSource?cancellation = null; try { cancellation = CancellationTokenSource.CreateLinkedTokenSource(_stopping.Token); cancellation.CancelAfter(TimeoutMilliseconds); var blob = await _storageBlob.GetBlobAsync(Options.ModelName, Options.ModelFileName, cancellation.Token); var etag = string.Empty; if (blob != null) { await blob.FetchAttributesAsync(); etag = blob.Properties.ETag; } if (_eTag != etag) { var previousToken = Interlocked.Exchange(ref _reloadToken, new ReloadToken()); await Task.Delay(100, cancellation.Token); _logger.LogInformation( "[{loader}][Reloaded] Model Name: {modelName} Elapsed: {elapsed}ms", nameof(AzureStorageModelLoader), Options.ModelName, sw.GetElapsedTime().TotalMilliseconds); _eTag = etag; previousToken?.OnReload(); } } catch (OperationCanceledException) when(!_stopping.IsCancellationRequested) { // This is a cancellation - if the app is shutting down we want to ignore it. } catch (Exception ex) { _logger.LogError(ex, "Azure Storage Model Loader failed for Model: {modelName}", Options.ModelName); } finally { cancellation?.Dispose(); } // schedule a polling task only if none exists and a valid delay is specified if (_pollingTask == null) { _pollingTask = PollForChangesAsync(); } }
/// <summary> /// Performs all required initialization on the host. /// Must be called before the host is started. /// </summary> public async Task InitializeAsync(CancellationToken cancellationToken = default) { _stopwatch = ValueStopwatch.StartNew(); using (_metricsLogger.LatencyEvent(MetricEventNames.HostStartupLatency)) { PreInitialize(); HostInitializing?.Invoke(this, EventArgs.Empty); _workerRuntime = _workerRuntime ?? _environment.GetEnvironmentVariable(EnvironmentSettingNames.FunctionWorkerRuntime); // get worker config information and check to see if worker should index or not var workerConfigs = _languageWorkerOptions.Value.WorkerConfigs; bool workerIndexing = Utility.CanWorkerIndex(workerConfigs, _environment); // Generate Functions IEnumerable <FunctionMetadata> functionMetadataList = GetFunctionsMetadata(workerIndexing); if (!_environment.IsPlaceholderModeEnabled()) { string runtimeStack = _workerRuntime; if (!string.IsNullOrEmpty(runtimeStack)) { // Appending the runtime version is currently only enabled for linux consumption. This will be eventually enabled for // Windows Consumption as well. string runtimeVersion = _environment.GetEnvironmentVariable(RpcWorkerConstants.FunctionWorkerRuntimeVersionSettingName); if (!string.IsNullOrEmpty(runtimeVersion)) { runtimeStack = string.Concat(runtimeStack, "-", runtimeVersion); } } _metricsLogger.LogEvent(string.Format(MetricEventNames.HostStartupRuntimeLanguage, Sanitizer.Sanitize(runtimeStack))); Utility.LogAutorestGeneratedJsonIfExists(ScriptOptions.RootScriptPath, _logger); } IsFunctionDataCacheEnabled = GetIsFunctionDataCacheEnabled(); await InitializeFunctionDescriptorsAsync(functionMetadataList, cancellationToken); if (!workerIndexing) { // Initialize worker function invocation dispatcher only for valid functions after creating function descriptors // Dispatcher not needed for codeless function. // Disptacher needed for non-dotnet codeless functions var filteredFunctionMetadata = functionMetadataList.Where(m => !Utility.IsCodelessDotNetLanguageFunction(m)); await _functionDispatcher.InitializeAsync(Utility.GetValidFunctions(filteredFunctionMetadata, Functions), cancellationToken); } GenerateFunctions(); ScheduleFileSystemCleanup(); } }
public async Task GetElapsedTimeReturnsTimeElapsedSinceStart() { var stopwatch = ValueStopwatch.StartNew(); await Task.Delay(200); var elapsed = stopwatch.GetElapsedTime(); Assert.True(elapsed.TotalMilliseconds > 0); Assert.True(elapsed.TotalMilliseconds < 5000); }
/// <inheritdoc/> public virtual TResult Evaluate(Func <IDataView, IEstimator <ITransformer>, TResult> builder) { var sw = ValueStopwatch.StartNew(); var result = builder(TestDataView, TrainingPipeLine); result.ElapsedMilliseconds = (long)sw.GetElapsedTime().TotalMilliseconds; return(result); }
protected override async Task <HttpResponseMessage> SendAsync(HttpRequestMessage request, CancellationToken cancellationToken) { var stopwatch = ValueStopwatch.StartNew(); var sb = new StringBuilder(); sb.Append(request.Method).Append(' ').Append(request.RequestUri).AppendLine(); LogHeaders(request.Headers, sb); if (request.Content != null) { LogHeaders(request.Content.Headers, sb); var requestBody = await request.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); sb.AppendLine().AppendLine(requestBody); } try { var response = await base.SendAsync(request, cancellationToken).ConfigureAwait(false); sb.AppendLine(new string('-', 60)); sb.Append((int)response.StatusCode).Append(' ').AppendLine(response.ReasonPhrase); LogHeaders(response.Headers, sb); if (response.Content != null) { LogHeaders(response.Content.Headers, sb); var contentType = response.Content.Headers.ContentType?.MediaType; if (contentType != "application/octet-stream") { var responseContent = await response.Content.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); if (string.Equals(response.Content.Headers.ContentType?.MediaType, "application/json", StringComparison.OrdinalIgnoreCase)) { sb.AppendLine().AppendLine(JsonConvert.SerializeObject(JsonConvert.DeserializeObject(responseContent), Formatting.Indented)); } else { sb.AppendLine().AppendLine(responseContent); } } else { sb.AppendLine().AppendLine("**Content omited for 'application/octet-stream'**"); } } return(response); } finally { sb.Append("Executed in ").Append(stopwatch.GetElapsedTime()).AppendLine(); Logs.Add(sb.ToString()); } }
public async Task OnActionExecutionAsync(ActionExecutingContext context, ActionExecutionDelegate next) { context.HttpContext.Features.Set <IApiCostsFeature>(FilterDefinition); var app = context.HttpContext.Context().App; if (app != null) { var appId = app.Id.ToString(); if (FilterDefinition.Weight > 0) { using (Profiler.Trace("CheckUsage")) { var plan = appPlansProvider.GetPlanForApp(app); var usage = await usageTracker.GetMonthlyCallsAsync(appId, DateTime.Today); if (plan?.MaxApiCalls >= 0 && usage > plan.MaxApiCalls * 1.1) { context.Result = new StatusCodeResult(429); return; } } } var watch = ValueStopwatch.StartNew(); try { await next(); } finally { var elapsedMs = watch.Stop(); await appLogStore.LogAsync(app.Id, clock.GetCurrentInstant(), context.HttpContext.Request.Method, context.HttpContext.Request.Path, context.HttpContext.User.OpenIdSubject(), context.HttpContext.User.OpenIdClientId(), elapsedMs, FilterDefinition.Weight); if (FilterDefinition.Weight > 0) { await usageTracker.TrackAsync(appId, context.HttpContext.User.OpenIdClientId(), FilterDefinition.Weight, elapsedMs); } } } else { await next(); } }
private async Task <HealthReportEntry> RunCheckAsync(IServiceScope scope, HealthCheckRegistration registration, CancellationToken cancellationToken) { await Task.Yield(); cancellationToken.ThrowIfCancellationRequested(); var healthCheck = registration.Factory(scope.ServiceProvider); // If the health check does things like make Database queries using EF or backend HTTP calls, // it may be valuable to know that logs it generates are part of a health check. So we start a scope. using (_logger.BeginScope(new HealthCheckLogScope(registration.Name))) { var stopwatch = ValueStopwatch.StartNew(); var context = new HealthCheckContext { Registration = registration }; Log.HealthCheckBegin(_logger, registration); HealthReportEntry entry; try { var result = await healthCheck.CheckHealthAsync(context, cancellationToken); var duration = stopwatch.GetElapsedTime(); entry = new HealthReportEntry( status: result.Status, description: result.Description, duration: duration, exception: result.Exception, data: result.Data); Log.HealthCheckEnd(_logger, registration, entry, duration); Log.HealthCheckData(_logger, registration, entry); } // Allow cancellation to propagate. catch (Exception ex) when(ex as OperationCanceledException == null) { var duration = stopwatch.GetElapsedTime(); entry = new HealthReportEntry( status: HealthStatus.Unhealthy, description: ex.Message, duration: duration, exception: ex, data: null); Log.HealthCheckError(_logger, registration, ex, duration); } return(entry); } }
public ValueStopwatch ConnectionStart(string connectionId) { Interlocked.Increment(ref _connectionsStarted); Interlocked.Increment(ref _currentConnections); if (IsEnabled(EventLevel.Informational, EventKeywords.None)) { WriteEvent(1, connectionId); return(ValueStopwatch.StartNew()); } return(default);
public ValueStopwatch?ProcessMessageDispatchStart() { if (!IsEnabled()) { return(null); } MessageDispatchStart(); return(ValueStopwatch.StartNew()); }
public QueueFrame QueueTimer() { Interlocked.Increment(ref _queueLength); if (IsEnabled()) { return(new QueueFrame(ValueStopwatch.StartNew(), this)); } return(CachedNonTimerResult); }
public void TestParse() { ValueStopwatch stopwatch = ValueStopwatch.StartNew(); for (int i = 0; i < 100000; i++) { BackendMetricsParser.TryParse(delimitedString, out BackendMetrics backendMetrics); } stopwatch.Stop(); Console.WriteLine(stopwatch.ElapsedMilliseconds); }
/// <inheritdoc/> public virtual TrainModelResult TrainModel(Func <IDataView, TrainModelResult> builder) { var sw = ValueStopwatch.StartNew(); var result = builder(TrainingDataView); Model = result.Model; result.ElapsedMilliseconds = (long)sw.GetElapsedTime().TotalMilliseconds; return(result); }