public ResolvedEvent[] Flush() { if (!Live || _waitingEvents.Count == 0) { return new ResolvedEvent[] { } } ; var age = DateTime.UtcNow - _waitingEvents.First().Event.Created; if (_waitingEvents.Count < 100 && age < TimeSpan.FromSeconds(30)) { return new ResolvedEvent[] { } } ; List <ResolvedEvent> discovered; lock (_lock) { discovered = _waitingEvents.GetRange(0, Math.Min(500, _waitingEvents.Count)); _waitingEvents.RemoveRange(0, Math.Min(500, _waitingEvents.Count)); } Queued.Decrement(Id, discovered.Count); _idleContext = Idle.NewContext(Id); return(discovered.ToArray()); } } }
private static async Task ProduceModMetric() { while (true) { using (_timerTesting.NewContext("X")) { int delay = (int)(100 * (_stopper.Elapsed.TotalSeconds % 50)); // up to 5 second await Task.Delay(delay); string tag = "low"; if (delay > 2500) { tag = "high"; } else if (delay > 100) { tag = "mid"; } await Task.Delay(delay); //_modTesting.Mark(tag, delay); //_modTesting.Mark(tag, 1); _modTesting.Increment(tag); Console.Write("."); } } }
private async Task Commit() { Guid eventId; EventIds.TryRemove(CommitId, out eventId); var headers = new Dictionary <string, string> { [CommitHeader] = CommitId.ToString(), [TerminatingEventIdHeader] = eventId.ToString() // Todo: what else can we put in here? }; var allRepos = _repositories.Values.Concat(_entityRepositories.Values).Concat(_pocoRepositories.Values).ToArray(); var changedStreams = _repositories.Sum(x => x.Value.ChangedStreams) + _entityRepositories.Sum(x => x.Value.ChangedStreams); Logger.Write(LogLevel.Debug, () => $"Detected {changedStreams} changed streams in commit {CommitId}"); if (changedStreams > 1) { Logger.Write(LogLevel.Info, () => $"Starting prepare for commit id {CommitId} with {_repositories.Count + _entityRepositories.Count + _pocoRepositories.Count} tracked repositories"); using (PrepareTime.NewContext()) { // First check all streams read but not modified - if the store has a different version a VersionException will be thrown await allRepos.SelectAsync(x => x.Prepare(CommitId)).ConfigureAwait(false); } } // this log message can be expensive as the list is computed for a check // so only warn users about multiple stream commits when debugging Logger.Write(LogLevel.Debug, () => { var orderedRepos = _repositories.Select(x => new Tuple <int, IRepository>(x.Value.ChangedStreams, x.Value)) .Concat(_entityRepositories.Select(x => new Tuple <int, IRepository>(x.Value.ChangedStreams, x.Value))); if (orderedRepos.Count(x => x.Item1 != 0) > 1) { return($"Starting commit id {CommitId} with {_repositories.Count + _entityRepositories.Count + _pocoRepositories.Count} tracked repositories. You changed {orderedRepos.Sum(x => x.Item1)} streams. We highly discourage this https://github.com/volak/Aggregates.NET/wiki/Changing-Multiple-Streams"); } return($"Starting commit id {CommitId} with {_repositories.Count + _entityRepositories.Count + _pocoRepositories.Count} tracked repositories"); }); using (var ctx = CommitTime.NewContext()) { await allRepos.SelectAsync(x => x.Commit(CommitId, headers)).ConfigureAwait(false); if (ctx.Elapsed > TimeSpan.FromSeconds(1)) { SlowLogger.Write(LogLevel.Warn, () => $"Commit id {CommitId} took {ctx.Elapsed.TotalSeconds} seconds!"); } Logger.Write(LogLevel.Info, () => $"Commit id {CommitId} took {ctx.Elapsed.TotalMilliseconds} ms"); } }
static void CollectData() { while (true) { myCounter.Increment(); using (myTimer.NewContext()) { Thread.Sleep(1000); } } }
private async Task Commit() { var headers = new Dictionary <string, string> { [CommitHeader] = CommitId.ToString(), // Todo: what else can we put in here? }; var allRepos = _repositories.Values.Concat(_pocoRepositories.Values).ToArray(); var changedStreams = _repositories.Sum(x => x.Value.ChangedStreams) + _pocoRepositories.Sum(x => x.Value.ChangedStreams); Logger.Write(LogLevel.Debug, () => $"Detected {changedStreams} changed streams in commit {CommitId}"); // Only prepare if multiple changed streams, which will quickly check all changed streams to see if they are all the same version as when we read them // Not 100% guarenteed to eliminate writing 1 stream then failing the other one but will help - and we also tell the user to not do this.. if (changedStreams > 1) { Logger.Write(LogLevel.Warn, $"Starting prepare for commit id {CommitId} with {_repositories.Count + _pocoRepositories.Count} tracked repositories. You changed {changedStreams} streams. We highly discourage this https://github.com/volak/Aggregates.NET/wiki/Changing-Multiple-Streams"); using (PrepareTime.NewContext()) { // First check all streams read but not modified - if the store has a different version a VersionException will be thrown await allRepos.WhenAllAsync(x => x.Prepare(CommitId)).ConfigureAwait(false); } } Logger.Write(LogLevel.Debug, () => $"Starting commit id {CommitId} with {_repositories.Count + _pocoRepositories.Count} tracked repositories"); try { using (var ctx = CommitTime.NewContext()) { await allRepos.WhenAllAsync(x => x.Commit(CommitId, headers)).ConfigureAwait(false); if (ctx.Elapsed > TimeSpan.FromSeconds(1)) { SlowLogger.Write(LogLevel.Warn, () => $"Commit id {CommitId} took {ctx.Elapsed.TotalSeconds} seconds!"); } Logger.Write(LogLevel.Debug, () => $"Commit id {CommitId} took {ctx.Elapsed.TotalMilliseconds} ms"); } } finally { Guid eventId; EventIds.TryRemove(CommitId, out eventId); } }
private static void ConfigureNES() { Wireup wireup = Wireup.Init(); //PersistenceWireup persistenceWireup = ConfigureSql(wireup); PersistenceWireup persistenceWireup = ConfigureMongo(wireup); counter.Increment(Iterations); using ( var storeEvents = persistenceWireup.InitializeStorageEngine() .UsingBinarySerialization() .Build()) { storeEvents.Advanced.Purge(); Parallel.For(1, Iterations, new ParallelOptions { MaxDegreeOfParallelism = 4 }, x => { using (timer.NewContext()) { do { try { var streamId = x % 10; var stream = storeEvents.OpenStream("default", streamId.ToString(), 0, int.MaxValue); stream.Add(new EventMessage() { Body = "abc" }); stream.CommitChanges(Guid.NewGuid()); break; } catch (ConcurrencyException ex) { concurrency.Increment(); } } while (true); } counter.Decrement(); }); Console.WriteLine("Press any key to exit..."); Console.ReadLine(); } }
private async Task Commit() { // Insert all command headers into the commit var headers = new Dictionary <string, string>(CurrentHeaders); Logger.Write(LogLevel.Debug, () => $"Starting commit id {CommitId} for {_repositories.Count + _entityRepositories.Count + _pocoRepositories.Count} tracked repositories"); using (CommitTime.NewContext()) { var startingEventId = CommitId; foreach (var repo in _repositories.Values) { try { startingEventId = await repo.Commit(CommitId, startingEventId, headers).ConfigureAwait(false); } catch (StorageException e) { throw new PersistenceException(e.Message, e); } } foreach (var repo in _entityRepositories.Values) { try { startingEventId = await repo.Commit(CommitId, startingEventId, headers).ConfigureAwait(false); } catch (StorageException e) { throw new PersistenceException(e.Message, e); } } foreach (var repo in _pocoRepositories.Values) { try { startingEventId = await repo.Commit(CommitId, startingEventId, headers).ConfigureAwait(false); } catch (StorageException e) { throw new PersistenceException(e.Message, e); } } } Logger.Write(LogLevel.Debug, () => $"Commit id {CommitId} complete"); }
public void Update() { try { using (_nodeUpdateTimer.NewContext()) { for (int i = 0; i < _config.MaxMessageToProcessPerTick; i++) { if (!Net.PumpEvents()) { break; } } OnNodeUpdate(); } } catch (Exception ex) { _logger.NodeUpdateLoopError(this, ex); } }
public ResolvedEvent[] Flush() { if (!Live) { return new ResolvedEvent[] { } } ; List <ResolvedEvent> discovered; lock (_lock) { discovered = _waitingEvents.GetRange(0, Math.Min(100, _waitingEvents.Count)); _waitingEvents.RemoveRange(0, Math.Min(100, _waitingEvents.Count)); } Queued.Decrement(Id, discovered.Count); QueuedEvents.Decrement(Id, discovered.Count); _idleContext = Idle.NewContext(Id); return(discovered.ToArray()); } }
private static async void Run(Uri serverUri, string[] equations) { var httpClient = new HttpClient(); var sw = new Stopwatch(); while (true) { foreach (var equation in equations) { using (timer.NewContext()) { try { var response = await httpClient.GetAsync(serverUri + "calc?eq=" + equation); responseCodeCounter.Increment(response.StatusCode.ToString()); } catch (Exception e) { } } } } }
public override async Task Invoke(IIncomingLogicalMessageContext context, Func <Task> next) { MessagesConcurrent.Increment(); // Only SEND messages deserve a UnitOfWork if (context.MessageHeaders[Headers.MessageIntent] != MessageIntentEnum.Send.ToString()) { await next().ConfigureAwait(false); return; } Logger.Write(LogLevel.Info, () => $"Starting UOW for message {context.MessageId} type {context.Message.MessageType.FullName}"); var uows = new Stack <IApplicationUnitOfWork>(); try { MessagesMeter.Mark(); using (MessagesTimer.NewContext()) { using (BeginTimer.NewContext()) { var listOfUows = context.Builder.BuildAll <IApplicationUnitOfWork>(); // Trick to put ILastApplicationUnitOfWork at the bottom of the stack to be uow.End'd last foreach (var uow in listOfUows.Where(x => x is ILastApplicationUnitOfWork).Concat(listOfUows.Where(x => !(x is ILastApplicationUnitOfWork)))) { uow.Builder = context.Builder; int retries; if (!context.Extensions.TryGet(Defaults.Retries, out retries)) { retries = 0; } uow.Retries = retries; var savedBag = await _persistence.Remove(context.MessageId, uow.GetType()).ConfigureAwait(false); uow.Bag = savedBag ?? new ContextBag(); Logger.Write(LogLevel.Debug, () => $"Running UOW.Begin for message {context.MessageId} on {uow.GetType().FullName}"); await uow.Begin().ConfigureAwait(false); uows.Push(uow); } } using (ProcessTimer.NewContext()) { DelayedMessage[] delayed; // Special case for delayed messages read from delayed stream if (context.Headers.ContainsKey(Defaults.BulkHeader) && context.Extensions.TryGet(Defaults.BulkHeader, out delayed)) { foreach (var x in delayed) { // Replace all headers with the original headers to preserve CorrId etc. context.Headers.Clear(); foreach (var header in x.Headers) { context.Headers[header.Key] = header.Value; } context.Headers["Delayed Channel"] = x.ChannelKey; context.Extensions.Set(Defaults.ChannelKey, x.ChannelKey); context.UpdateMessageInstance(x.Message); await next().ConfigureAwait(true); } } else { await next().ConfigureAwait(false); } } using (EndTimer.NewContext()) { foreach (var uow in uows.PopAll()) { Logger.Write(LogLevel.Debug, () => $"Running UOW.End for message {context.MessageId} on {uow.GetType().FullName}"); try { // ConfigureAwait true because we don't want uow.End running in parrallel await uow.End().ConfigureAwait(true); } finally { await _persistence.Save(context.MessageId, uow.GetType(), uow.Bag).ConfigureAwait(true); } } } // Only clear context bags once all UOWs complete successfully await _persistence.Clear(context.MessageId).ConfigureAwait(false); } } catch (Exception e) { Logger.Warn( $"Caught exception '{e.GetType().FullName}' while executing message {context.MessageId} {context.Message.MessageType.FullName}"); ErrorsMeter.Mark(context.Message.MessageType.FullName); var trailingExceptions = new List <Exception>(); using (EndTimer.NewContext()) { foreach (var uow in uows.PopAll()) { try { Logger.Write(LogLevel.Debug, () => $"Running UOW.End with exception [{e.GetType().Name}] for message {context.MessageId} on {uow.GetType().FullName}"); await uow.End(e).ConfigureAwait(true); } catch (Exception endException) { trailingExceptions.Add(endException); } await _persistence.Save(context.MessageId, uow.GetType(), uow.Bag).ConfigureAwait(true); } } if (trailingExceptions.Any()) { trailingExceptions.Insert(0, e); throw new System.AggregateException(trailingExceptions); } throw; } finally { MessagesConcurrent.Decrement(); } }
private static void Threaded(object state) { var param = (ThreadParam)state; var random = new Random(); while (!Bus.BusOnline) { Logger.Warn($"Could not find NSBs onMessage handler yet - if this persists there is a problem."); Thread.Sleep(500); } try { while (true) { param.Token.ThrowIfCancellationRequested(); if (WaitingEvents.Keys.Count == 0) { Thread.Sleep(100); continue; } List <IFullEvent> flushedEvents; // Pull a random delayed stream for processing try { if ( !WaitingEvents.TryRemove( WaitingEvents.Keys.ElementAt(random.Next(WaitingEvents.Keys.Count)), out flushedEvents)) { continue; } } catch (ArgumentOutOfRangeException) { // Keys.Count is not thread safe and this can throw very occasionally continue; } DelayedQueued.Decrement(flushedEvents.Count()); try { Task.Run(async() => { using (var ctx = DelayedExecution.NewContext()) { // Same stream ids should modify the same models, processing this way reduces write collisions on commit await ProcessEvents(param, flushedEvents.ToArray()).ConfigureAwait(false); if (ctx.Elapsed > TimeSpan.FromSeconds(5)) { SlowLogger.Warn( $"Processing {flushedEvents.Count()} bulked events took {ctx.Elapsed.TotalSeconds} seconds!"); } Logger.Write(LogLevel.Info, () => $"Processing {flushedEvents.Count()} bulked events took {ctx.Elapsed.TotalMilliseconds} ms"); } }, param.Token).Wait(); } catch (System.AggregateException e) { if (e.InnerException is OperationCanceledException) { throw e.InnerException; } // If not a canceled exception, just write to log and continue // we dont want some random unknown exception to kill the whole event loop Logger.Error( $"Received exception in main event thread: {e.InnerException.GetType()}: {e.InnerException.Message}", e); } } } catch (OperationCanceledException) { } }
public async Task <IEnumerable <IFullEvent> > GetEvents(string stream, long?start = null, int?count = null) { var settings = new JsonSerializerSettings { TypeNameHandling = TypeNameHandling.Auto, SerializationBinder = new EventSerializationBinder(_mapper), ContractResolver = new EventContractResolver(_mapper), Converters = new[] { new Newtonsoft.Json.Converters.StringEnumConverter() } }; var bucket = Math.Abs(stream.GetHashCode() % _clients.Count()); var sliceStart = start ?? StreamPosition.Start; StreamEventsSlice current; Logger.Write(LogLevel.Debug, () => $"Reading events from stream [{stream}] starting at {sliceStart}"); var events = new List <ResolvedEvent>(); using (var ctx = ReadTime.NewContext()) { do { var readsize = _readsize; if (count.HasValue) { readsize = Math.Min(count.Value - events.Count, _readsize); } current = await _clients[bucket].ReadStreamEventsForwardAsync(stream, sliceStart, readsize, false) .ConfigureAwait(false); Logger.Write(LogLevel.Debug, () => $"Read {current.Events.Length} events from position {sliceStart}. Status: {current.Status} LastEventNumber: {current.LastEventNumber} NextEventNumber: {current.NextEventNumber}"); events.AddRange(current.Events); sliceStart = current.NextEventNumber; } while (!current.IsEndOfStream && (!count.HasValue || (events.Count != count.Value))); if (ctx.Elapsed > TimeSpan.FromSeconds(1)) { SlowLogger.Write(LogLevel.Warn, () => $"Reading {events.Count} events of total size {events.Sum(x => x.Event.Data.Length)} from stream [{stream}] took {ctx.Elapsed.TotalSeconds} seconds!"); } Logger.Write(LogLevel.Info, () => $"Reading {events.Count} events of total size {events.Sum(x => x.Event.Data.Length)} from stream [{stream}] took {ctx.Elapsed.TotalMilliseconds} ms"); } Logger.Write(LogLevel.Debug, () => $"Finished reading {events.Count} events from stream [{stream}]"); if (current.Status == SliceReadStatus.StreamNotFound) { Logger.Write(LogLevel.Info, () => $"Stream [{stream}] does not exist!"); throw new NotFoundException($"Stream [{stream}] does not exist!"); } ReadEvents.Update(events.Count); ReadEventsSize.Update(events.Sum(x => x.Event.Data.Length)); var translatedEvents = events.Select(e => { var metadata = e.Event.Metadata; var data = e.Event.Data; var descriptor = metadata.Deserialize(settings); if (descriptor.Compressed) { data = data.Decompress(); } var @event = data.Deserialize(e.Event.EventType, settings); // Special case if event was written without a version - substitue the position from store if (descriptor.Version == 0) { descriptor.Version = e.Event.EventNumber; } return((IFullEvent) new WritableEvent { Descriptor = descriptor, Event = @event, EventId = e.Event.EventId }); }).ToList(); Logger.Write(LogLevel.Info, () => $"Read {translatedEvents.Count} events from stream [{stream}]"); return(translatedEvents); }
public void Call() { getTimer = metrics.Timer("getTimer", Unit.Requests); putTimer = metrics.Timer("putTimer", Unit.Requests); setTimer = metrics.Timer("setTimer", Unit.Requests); var endTime = DateTime.Now.AddSeconds(duration); while (DateTime.Now.CompareTo(endTime) < 0) { var map = client.GetMap<object, object>(baseMapName + random.Next(totalMaps)); double chance = random.NextDouble(); if ((chance -= putProb) <= 0) { using (putTimer.NewContext()) { map.Put(random.Next(totalKeys), value); } } else if ((chance -= getProb) <= 0) { using (getTimer.NewContext()) { map.Get(random.Next(totalKeys)); } } else { using (setTimer.NewContext()) { map.Set(random.Next(totalKeys), value); } } } Console.WriteLine("Call DONE..."); }
// Todo: all the logging and timing can be moved into a "Debug Dispatcher" which can be registered as the IDispatcher if the user wants private async Task Process(Object @event, IEventDescriptor descriptor = null, long?position = null) { var eventType = _mapper.GetMappedTypeFor(@event.GetType()); Stopwatch s = null; var handleContext = new HandleContext { Bus = _bus, EventDescriptor = descriptor }; using (_eventsTimer.NewContext()) { if (Logger.IsDebugEnabled) { Logger.DebugFormat("Processing event {0} at position {1}. Size of queue: {2}/{3}", eventType.FullName, position, _processingQueueSize, _maxQueueSize); } using (var childBuilder = _builder.CreateChildBuilder()) { var handlerGenericType = typeof(IHandleMessagesAsync <>).MakeGenericType(eventType); List <dynamic> handlers = childBuilder.BuildAll(handlerGenericType).ToList(); if (handlers.Count == 0) { return; } var success = false; var retry = 0; do { var uows = new ConcurrentStack <IEventUnitOfWork>(); var mutators = childBuilder.BuildAll <IEventMutator>(); if (Logger.IsDebugEnabled) { s = Stopwatch.StartNew(); } if (mutators != null && mutators.Any()) { Parallel.ForEach(mutators, _parallelOptions, mutate => { //if (Logger.IsDebugEnabled) Logger.DebugFormat("Mutating incoming event {0} with mutator {1}", eventType.FullName, mutate.GetType().FullName); @event = mutate.MutateIncoming(@event, descriptor, position); }); } await childBuilder.BuildAll <IEventUnitOfWork>().ForEachAsync(2, async(uow) => { uows.Push(uow); uow.Builder = childBuilder; await uow.Begin(); }); if (Logger.IsDebugEnabled) { s.Stop(); Logger.DebugFormat("UOW.Begin for event {0} took {1} ms", eventType.FullName, s.ElapsedMilliseconds); } try { if (Logger.IsDebugEnabled) { s.Restart(); } Func <dynamic, Task> processor = async(handler) => { using (_handlerTimer.NewContext()) { var handlerRetries = 0; var handlerSuccess = false; do { try { Stopwatch handlerWatch = null; if (Logger.IsDebugEnabled) { Logger.DebugFormat("Executing event {0} on handler {1}", eventType.FullName, handler.GetType().FullName); handlerWatch = Stopwatch.StartNew(); } var lambda = _objectInvoker.Invoker(handler, eventType); await lambda(handler, @event, handleContext); if (Logger.IsDebugEnabled) { handlerWatch.Stop(); Logger.DebugFormat("Executing event {0} on handler {1} took {2} ms", eventType.FullName, handler.GetType().FullName, handlerWatch.ElapsedMilliseconds); } handlerSuccess = true; } catch (RetryException e) { Logger.InfoFormat("Received retry signal while dispatching event {0} to {1}. Retry: {2}/3\nException: {3}", eventType.FullName, handler.FullName, handlerRetries, e); handlerRetries++; } } while (!handlerSuccess && (_maxRetries == -1 || handlerRetries <= _maxRetries)); if (!handlerSuccess) { Logger.ErrorFormat("Failed executing event {0} on handler {1}", eventType.FullName, handler.FullName); throw new RetryException(String.Format("Failed executing event {0} on handler {1}", eventType.FullName, handler.FullName)); } } }; // Run each handler in parallel (or not) (if handler ever is ASYNC can't use Parallel) if (_parallelHandlers) { await handlers.ForEachAsync(_parallelOptions.MaxDegreeOfParallelism, processor); } else { foreach (var handler in handlers) { await processor(handler); } } if (Logger.IsDebugEnabled) { s.Stop(); Logger.DebugFormat("Processing event {0} took {1} ms", eventType.FullName, s.ElapsedMilliseconds); } } catch (Exception e) { var trailingExceptions = new ConcurrentBag <Exception>(); await uows.Generate().ForEachAsync(2, async(uow) => { try { await uow.End(e); } catch (Exception endException) { trailingExceptions.Add(endException); } }); if (trailingExceptions.Any()) { var exceptions = trailingExceptions.ToList(); exceptions.Insert(0, e); e = new System.AggregateException(exceptions); } // Only log if the event has failed more than half max retries indicating a possible non-transient error if (retry > (_maxRetries / 2)) { Logger.InfoFormat("Encountered an error while processing {0}. Retry {1}/{2}\nPayload: {3}\nException details:\n{4}", eventType.FullName, retry, _maxRetries, JsonConvert.SerializeObject(@event), e); } else { Logger.DebugFormat("Encountered an error while processing {0}. Retry {1}/{2}\nPayload: {3}\nException details:\n{4}", eventType.FullName, retry, _maxRetries, JsonConvert.SerializeObject(@event), e); } _errorsMeter.Mark(); retry++; Thread.Sleep(10); continue; } // Failures when executing UOW.End `could` be transient (network or disk hicup) // A failure of 1 uow in a chain of 5 is a problem as it could create a mangled DB (partial update via one uow then crash) // So we'll just keep retrying the failing UOW forever until it succeeds. if (Logger.IsDebugEnabled) { s.Restart(); } var endSuccess = false; var endRetry = 0; while (!endSuccess) { try { await uows.Generate().ForEachAsync(2, async(uow) => { try { await uow.End(); } catch { // If it failed it needs to go back on the stack uows.Push(uow); throw; } }); endSuccess = true; } catch (Exception e) { if (endRetry > (_maxRetries / 2)) { Logger.ErrorFormat("UOW.End failure while processing event {0} - retry {1}/{3}\nException:\n{2}", eventType.FullName, retry, e, _maxRetries); } else { Logger.DebugFormat("UOW.End failure while processing event {0} - retry {1}/{3}\nException:\n{2}", eventType.FullName, retry, e, _maxRetries); } endRetry++; Thread.Sleep(50); } } if (Logger.IsDebugEnabled) { s.Stop(); Logger.DebugFormat("UOW.End for event {0} took {1} ms", eventType.FullName, s.ElapsedMilliseconds); } success = true; } while (!success && (_maxRetries == -1 || retry < _maxRetries)); if (!success) { var message = String.Format("Encountered an error while processing {0}. Ran out of retries, dropping event.\nPayload: {3}", eventType.FullName, JsonConvert.SerializeObject(@event)); if (_dropEventFatal) { Logger.Fatal(message); throw new SubscriptionCanceled(message); } Logger.Error(message); } } } _eventsMeter.Mark(); }
async Task IRepository.Commit(Guid commitId, IDictionary <string, string> commitHeaders) { Logger.Write(LogLevel.Debug, () => $"Repository {typeof(T).FullName} starting commit {commitId}"); var written = 0; using (CommitTime.NewContext()) { await Tracked.Values .Where(x => x.Stream.Dirty) .ToArray() .SelectAsync(async(tracked) => { var headers = new Dictionary <string, string>(commitHeaders); var stream = tracked.Stream; Interlocked.Add(ref written, stream.TotalUncommitted); if (stream.StreamVersion != stream.CommitVersion && tracked is ISnapshotting && (tracked as ISnapshotting).ShouldTakeSnapshot()) { Logger.Write(LogLevel.Debug, () => $"Taking snapshot of [{tracked.GetType().FullName}] id [{tracked.StreamId}] version {tracked.Version}"); var memento = (tracked as ISnapshotting).TakeSnapshot(); stream.AddSnapshot(memento); } try { await _store.Evict <T>(stream.Bucket, stream.StreamId).ConfigureAwait(false); await stream.Commit(commitId, headers).ConfigureAwait(false); } catch (VersionException e) { Conflicts.Mark(); // If we expected no stream, no reason to try to resolve the conflict if (stream.CommitVersion == -1) { Logger.Warn( $"New stream [{tracked.StreamId}] entity {tracked.GetType().FullName} already exists in store"); throw new ConflictResolutionFailedException( $"New stream [{tracked.StreamId}] entity {tracked.GetType().FullName} already exists in store"); } try { Logger.Write(LogLevel.Info, () => $"Stream [{tracked.StreamId}] entity {tracked.GetType().FullName} stream version {stream.StreamVersion} commit verison {stream.CommitVersion} has version conflicts with store - Message: {e.Message} Store: {e.InnerException?.Message}"); using (ConflictResolutionTime.NewContext()) { var uncommitted = stream.Uncommitted.ToList(); stream.Flush(false); // make new clean entity var clean = await GetUntracked(stream).ConfigureAwait(false); try { Logger.Write(LogLevel.Debug, () => $"Attempting to resolve conflict with strategy {_conflictResolution.Conflict}"); var strategy = _conflictResolution.Conflict.Build(_builder, _conflictResolution.Resolver); await strategy.Resolve(clean, uncommitted, commitId, commitHeaders).ConfigureAwait(false); } catch (ConflictingCommandException) { throw new ConflictResolutionFailedException("Failed to resolve stream conflict"); } } Logger.WriteFormat(LogLevel.Info, "Stream [{0}] entity {1} version {2} had version conflicts with store - successfully resolved", tracked.StreamId, tracked.GetType().FullName, stream.StreamVersion); ConflictsResolved.Mark(); } catch (AbandonConflictException abandon) { ConflictsUnresolved.Mark(); Logger.WriteFormat(LogLevel.Error, "Stream [{0}] entity {1} has version conflicts with store - abandoning resolution", tracked.StreamId, tracked.GetType().FullName); throw new ConflictResolutionFailedException( $"Aborted conflict resolution for stream [{tracked.StreamId}] entity {tracked.GetType().FullName}", abandon); } catch (Exception ex) { ConflictsUnresolved.Mark(); Logger.WriteFormat(LogLevel.Error, "Stream [{0}] entity {1} has version conflicts with store - FAILED to resolve due to: {3}: {2}", tracked.StreamId, tracked.GetType().FullName, ex.Message, ex.GetType().Name); throw new ConflictResolutionFailedException( $"Failed to resolve conflict for stream [{tracked.StreamId}] entity {tracked.GetType().FullName} due to exception", ex); } } catch (PersistenceException e) { Logger.WriteFormat(LogLevel.Warn, "Failed to commit events to store for stream: [{0}] bucket [{1}] Exception: {3}: {2}", stream.StreamId, stream.Bucket, e.Message, e.GetType().Name); WriteErrors.Mark(e.Message); throw; } catch (DuplicateCommitException) { Logger.WriteFormat(LogLevel.Warn, "Detected a double commit for stream: [{0}] bucket [{1}] - discarding changes for this stream", stream.StreamId, stream.Bucket); WriteErrors.Mark("Duplicate"); // I was throwing this, but if this happens it means the events for this message have already been committed. Possibly as a partial message failure earlier. // Im changing to just discard the changes, perhaps can take a deeper look later if this ever bites me on the ass //throw; } }); } WrittenEvents.Mark(typeof(T).FullName, written); Logger.Write(LogLevel.Debug, () => $"Repository {typeof(T).FullName} finished commit {commitId} wrote {written} events"); }
public override async Task Invoke(IIncomingLogicalMessageContext context, Func <Task> next) { MessagesConcurrent.Increment(); // Only SEND messages deserve a UnitOfWork if (context.MessageHeaders[Headers.MessageIntent] != MessageIntentEnum.Send.ToString() && context.MessageHeaders[Headers.MessageIntent] != MessageIntentEnum.Publish.ToString()) { await next().ConfigureAwait(false); return; } Logger.Write(LogLevel.Debug, () => $"Starting UOW for message {context.MessageId} type {context.Message.MessageType.FullName}"); var uows = new Stack <IApplicationUnitOfWork>(); var bagId = $"contextbags.{context.MessageId}"; Dictionary <Type, ContextBag> savedBags = null; if (!context.Extensions.TryGet <Dictionary <Type, ContextBag> >(bagId, out savedBags)) { savedBags = new Dictionary <Type, ContextBag>(); } try { MessagesMeter.Mark(); using (MessagesTimer.NewContext()) { using (BeginTimer.NewContext()) { var listOfUows = context.Builder.BuildAll <IApplicationUnitOfWork>(); // Trick to put ILastApplicationUnitOfWork at the bottom of the stack to be uow.End'd last foreach (var uow in listOfUows.Where(x => x is ILastApplicationUnitOfWork).Concat(listOfUows.Where(x => !(x is ILastApplicationUnitOfWork)))) { uow.Builder = context.Builder; int retries; if (!context.Extensions.TryGet(Defaults.Retries, out retries)) { retries = 0; } uow.Retries = retries; if (savedBags.ContainsKey(uow.GetType())) { uow.Bag = savedBags[uow.GetType()]; } else { uow.Bag = new ContextBag(); } Logger.Write(LogLevel.Debug, () => $"Running UOW.Begin for message {context.MessageId} on {uow.GetType().FullName}"); await uow.Begin().ConfigureAwait(false); uows.Push(uow); } } using (ProcessTimer.NewContext()) { DelayedMessage[] delayed; // Special case for delayed messages read from delayed stream if (context.Headers.ContainsKey(Defaults.BulkHeader) && context.Extensions.TryGet(Defaults.BulkHeader, out delayed)) { Logger.Write(LogLevel.Debug, () => $"Bulk processing {delayed.Count()} messages, bulk id {context.MessageId}"); var index = 1; foreach (var x in delayed) { // Replace all headers with the original headers to preserve CorrId etc. context.Headers.Clear(); foreach (var header in x.Headers) { context.Headers[header.Key] = header.Value; } context.Headers[Defaults.BulkHeader] = delayed.Count().ToString(); context.Headers[Defaults.DelayedId] = x.MessageId; context.Headers[Defaults.ChannelKey] = x.ChannelKey; Logger.Write(LogLevel.Debug, () => $"Processing {index}/{delayed.Count()} message, bulk id {context.MessageId}. MessageId: {x.MessageId} ChannelKey: {x.ChannelKey}"); //context.Extensions.Set(Defaults.ChannelKey, x.ChannelKey); context.UpdateMessageInstance(x.Message); await next().ConfigureAwait(true); index++; } } else { await next().ConfigureAwait(false); } } using (EndTimer.NewContext()) { foreach (var uow in uows.PopAll()) { Logger.Write(LogLevel.Debug, () => $"Running UOW.End for message {context.MessageId} on {uow.GetType().FullName}"); try { // ConfigureAwait true because we don't want uow.End running in parrallel await uow.End().ConfigureAwait(true); } finally { savedBags[uow.GetType()] = uow.Bag; } } } } } catch (Exception e) { Logger.Warn($"Caught exception '{e.GetType().FullName}' while executing message {context.MessageId} {context.Message.MessageType.FullName}", e); ErrorsMeter.Mark(context.Message.MessageType.FullName); var trailingExceptions = new List <Exception>(); using (EndTimer.NewContext()) { foreach (var uow in uows.PopAll()) { try { Logger.Write(LogLevel.Debug, () => $"Running UOW.End with exception [{e.GetType().Name}] for message {context.MessageId} on {uow.GetType().FullName}"); await uow.End(e).ConfigureAwait(true); } catch (Exception endException) { trailingExceptions.Add(endException); } // If here one UOW threw an exception, we should save all the other context bags incase they did some work too savedBags[uow.GetType()] = uow.Bag; } } // Save uow bags into context for retries context.Extensions.Set(bagId, savedBags); if (trailingExceptions.Any()) { trailingExceptions.Insert(0, e); throw new System.AggregateException(trailingExceptions); } throw; } finally { MessagesConcurrent.Decrement(); } }
public void Invoke(IncomingContext context, Action next) { Stopwatch s = null; var uows = new ConcurrentStack <ICommandUnitOfWork>(); try { _commandsMeter.Mark(); using (_commandsTimer.NewContext()) { context.Builder.BuildAll <ICommandUnitOfWork>().ForEachAsync(2, async(uow) => { uows.Push(uow); uow.Builder = context.Builder; await uow.Begin(); }).Wait(); if (Logger.IsDebugEnabled) { s = Stopwatch.StartNew(); } next(); if (Logger.IsDebugEnabled) { s.Stop(); Logger.DebugFormat("Processing command {0} took {1} ms", context.IncomingLogicalMessage.MessageType.FullName, s.ElapsedMilliseconds); s.Restart(); } uows.Generate().ForEachAsync(2, async(uow) => { try { await uow.End(); } catch { // If it failed it needs to go back on the stack uows.Push(uow); throw; } }).Wait(); if (Logger.IsDebugEnabled) { s.Stop(); Logger.DebugFormat("UOW.End for command {0} took {1} ms", context.IncomingLogicalMessage.MessageType.FullName, s.ElapsedMilliseconds); } } } catch (System.AggregateException e) { _errorsMeter.Mark(); var trailingExceptions = new List <Exception>(); uows.Generate().ForEachAsync(2, async(uow) => { try { await uow.End(e); } catch (Exception endException) { trailingExceptions.Add(endException); } }).Wait(); if (trailingExceptions.Any()) { trailingExceptions.Insert(0, e); e = new System.AggregateException(trailingExceptions); } throw; } }
static void Main(string[] args) { Metric.Config //.WithAllCounters() .WithReporting(config => config .WithConsoleReport(TimeSpan.FromSeconds(30)) .WithApplicationInsights(ConfigurationManager.AppSettings["AI_InstrumentationKey"].ToString(), Guid.NewGuid().ToString(), TimeSpan.FromSeconds(30)) ); // create some metrics Counter counter = Metric.Counter("MyItemCounter", Unit.Items); counter.Increment(); counter.Increment(); counter.Increment(); Counter counterSuppressed = Metric.Counter("MySuppressedCounter", Unit.Items, ApplicationInsightsReport.DoNotReport); counterSuppressed.Increment(); Metric.Gauge("MyPercentageGauge", () => DateTime.Now.Second, Unit.Percent); Meter meter = Metric.Meter("MyErrorsPerSecMeter", Unit.Errors, TimeUnit.Seconds); meter.Mark(); meter.Mark(); meter.Mark(); meter.Mark(); meter.Mark(); meter.Mark(); meter.Mark(); meter.Mark(); meter.Mark(); meter.Mark(); Histogram histogram = Metric.Histogram("MyItemsHistogram", Unit.Items); histogram.Update(456); histogram.Update(123); histogram.Update(789); HealthChecks.RegisterHealthCheck("Seconds", () => { if (DateTime.Now.Second > 55) { return(HealthCheckResult.Unhealthy("Seconds > 55")); } else { return(HealthCheckResult.Healthy("Seconds <= 55")); } }); HealthChecks.RegisterHealthCheck("AlwaysTrue", () => HealthCheckResult.Healthy("Always True check")); var rnd = new Random(); Metrics.Timer timer = Metric.Timer("MyEventsTimer", Unit.Events); for (int i = 0; i < 3; i++) { using (var context = timer.NewContext("TimerCtx")) { Thread.Sleep(132 + rnd.Next(15)); } } Console.WriteLine("Press any key to end..."); Console.ReadKey(); }
private static async Task ProcessEvent(MessageMetadataRegistry messageMeta, JsonSerializerSettings settings, RecordedEvent @event, CancellationToken token) { var transportTransaction = new TransportTransaction(); var contextBag = new ContextBag(); var metadata = @event.Metadata; var data = @event.Data; var descriptor = metadata.Deserialize(settings); if (descriptor.Compressed) { data = data.Decompress(); } var messageId = Guid.NewGuid().ToString(); var headers = new Dictionary <string, string>(descriptor.Headers) { [Headers.MessageIntent] = MessageIntentEnum.Send.ToString(), [Headers.EnclosedMessageTypes] = SerializeEnclosedMessageTypes(messageMeta, Type.GetType(@event.EventType)), [Headers.MessageId] = messageId, ["EventId"] = @event.EventId.ToString(), ["EventStreamId"] = @event.EventStreamId, ["EventNumber"] = @event.EventNumber.ToString() }; using (var tokenSource = new CancellationTokenSource()) { var processed = false; var numberOfDeliveryAttempts = 0; while (!processed) { using (var ctx = EventExecution.NewContext()) { try { // If canceled, this will throw the number of time immediate retry requires to send the message to the error queue token.ThrowIfCancellationRequested(); // Don't re-use the event id for the message id var messageContext = new MessageContext(messageId, headers, data ?? new byte[0], transportTransaction, tokenSource, contextBag); await Bus.OnMessage(messageContext).ConfigureAwait(false); EventCount.Increment(); processed = true; } catch (ObjectDisposedException) { // NSB transport has been disconnected break; } catch (Exception ex) { EventErrors.Mark($"{ex.GetType().Name} {ex.Message}"); ++numberOfDeliveryAttempts; var errorContext = new ErrorContext(ex, headers, messageId, data ?? new byte[0], transportTransaction, numberOfDeliveryAttempts); if (await Bus.OnError(errorContext).ConfigureAwait(false) == ErrorHandleResult.Handled) { break; } } } } } }
public virtual async Task <object> Invoke(HttpServiceRequest request, Type resultReturnType, JsonSerializerSettings jsonSettings) { using (_roundtripTime.NewContext()) return(await InvokeCore(request, resultReturnType, jsonSettings).ConfigureAwait(false)); }
private static void ParseLoop(XMLLogParser parser, TransientParserConfigs config, Stream input, CancellationToken cancellationToken) { using (var xmlReader = XmlReader.Create(input)) { // Don't just use XML's natual tree-creation setup... because we don't want the root element to have children. For large or streaming logs, it will become a resource hog var elements = new Stack <XElement>(); bool exitLoop = false; while (!exitLoop && !cancellationToken.IsCancellationRequested && xmlReader.Read()) { switch (xmlReader.NodeType) { case XmlNodeType.Element: var name = XName.Get(xmlReader.Name, xmlReader.NamespaceURI); var element = new XElement(name); if (elements.Count > 1) // Don't add children to root to ensure it doesn't become massive { elements.Peek().Add(element); } elements.Push(element); if (xmlReader.HasAttributes) { while (xmlReader.MoveToNextAttribute()) { var attName = XName.Get(xmlReader.Name, xmlReader.NamespaceURI); var att = new XAttribute(attName, xmlReader.Value); elements.Peek().Add(att); } xmlReader.MoveToElement(); } break; case XmlNodeType.EndElement: xmlElementCounter.Increment(); if (xmlReader.Name == elements.Peek().Name) { var finishedElement = elements.Pop(); if (elements.Count == 1) // Don't process the elements unless they're children of root. Anything else is a child element of a log element { using (processElementTimer.NewContext()) { if (ProcessElement(parser, config, finishedElement) != LogParserErrors.OK) { exitLoop = true; break; } } } } else { Console.Error.WriteLine($"Element {elements.Peek().Name} ended, but the name of the ending element {xmlReader.Name} doesn't match. Possibly out of sync..."); } break; case XmlNodeType.CDATA: xmlCDATACounter.Increment(); elements.Peek().Add(new XCData(xmlReader.Value)); break; case XmlNodeType.Text: xmlTextCounter.Increment(); elements.Peek().Add(new XText(xmlReader.Value)); break; case XmlNodeType.Whitespace: break; default: unknownXmlElementCounter.Increment(xmlReader.NodeType.ToString()); break; } } // Only do if cancellation wasn't requested because it means we ran to completion but never found root. Cancellation means we never will. if (elements.Count != 0 && !cancellationToken.IsCancellationRequested) { xmlRootUnfinishedCounter.Increment(); Console.Error.WriteLine("Root element didn't end"); } } }
private async Task HandleRequest(HttpListenerContext context, long ticks, long timeFromLastReq) { try { var deltaDelayTicks = DateTime.UtcNow.Ticks - ticks; var sw = Stopwatch.StartNew(); RequestTimings.ClearCurrentTimings(); using (context.Response) { // Special endpoints should not be logged/measured/traced like regular endpoints // Access is allowed without HTTPS verifications since they don't expose anything sensitive (e.g. config values are encrypted) if (await TryHandleSpecialEndpoints(context)) { return; } // Regular endpoint handling using (_activeRequestsCounter.NewContext("Request")) { RequestTimings.GetOrCreate(); // initialize request timing context string methodName = null; // Initialize with empty object for protocol backwards-compatibility. var requestData = new HttpServiceRequest { TracingData = new TracingData() }; object[] argumentsWithDefaults = null; ServiceMethod serviceMethod = null; ServiceCallEvent callEvent = _serverRequestPublisher.GetNewCallEvent(); try { try { await CheckSecureConnection(context); ValidateRequest(context); requestData = await ParseRequest(context); //----------------------------------------------------------------------------------------- // Don't move TracingContext writes main flow, IT have to be here, to avoid side changes //----------------------------------------------------------------------------------------- TracingContext.SetRequestID(requestData.TracingData.RequestID); TracingContext.SpanStartTime = requestData.TracingData.SpanStartTime; TracingContext.AbandonRequestBy = requestData.TracingData.AbandonRequestBy; TracingContext.SetParentSpan( requestData.TracingData.SpanID ?? Guid.NewGuid().ToString("N")); TracingContext.SetOverrides(requestData.Overrides); if (requestData.TracingData.Tags != null) { TracingContext.Tags = new ContextTags(requestData.TracingData.Tags); } TracingContext.AdditionalProperties = requestData.TracingData.AdditionalProperties; callEvent.ServiceMethodSchema = context.Request.IsSecureConnection ? "HTTPS" : "HTTP"; SetCallEventRequestData(callEvent, requestData); serviceMethod = ServiceEndPointDefinition.Resolve(requestData.Target); callEvent.CalledServiceName = serviceMethod.GrainInterfaceType.Name; methodName = serviceMethod.ServiceInterfaceMethod.Name; var arguments = requestData.Target.IsWeaklyTyped ? GetParametersByName(serviceMethod, requestData.Arguments) : requestData.Arguments.Values.Cast <object>().ToArray(); argumentsWithDefaults = GetConvertedAndDefaultArguments(serviceMethod.ServiceInterfaceMethod, arguments); if (_extendedDelayTimeLogging) { callEvent.RecvDateTicks = ticks; callEvent.ReqStartupDeltaTicks = deltaDelayTicks; callEvent.TimeFromLastReq = timeFromLastReq; var outstandingReqs = Interlocked.Read(ref OutstandingRequests); callEvent.OutstandingRequests = outstandingReqs; if (deltaDelayTicks > 10_000_000) { callEvent.CollectionCountGen0 = GC.CollectionCount(0); callEvent.CollectionCountGen1 = GC.CollectionCount(1); callEvent.CollectionCountGen2 = GC.CollectionCount(2); } } } catch (Exception e) { callEvent.Exception = e; if (e is RequestException) { throw; } throw new RequestException("Invalid request", e); } RejectRequestIfLateOrOverloaded(); var responseJson = await GetResponse(context, serviceMethod, requestData, argumentsWithDefaults); if (await TryWriteResponse(context, responseJson, serviceCallEvent: callEvent)) { callEvent.ErrCode = 0; _successCounter.Increment(); } else { _failureCounter.Increment(); } } catch (Exception e) { callEvent.Exception = callEvent.Exception ?? e; _failureCounter.Increment(); Exception ex = GetRelevantException(e); string json = _serializationTime.Time(() => ExceptionSerializer.Serialize(ex)); await TryWriteResponse(context, json, GetExceptionStatusCode(ex), serviceCallEvent : callEvent); } finally { sw.Stop(); callEvent.ActualTotalTime = sw.Elapsed.TotalMilliseconds; _roundtripTime.Record((long)(sw.Elapsed.TotalMilliseconds * 1000000), TimeUnit.Nanoseconds); if (methodName != null) { _endpointContext.Timer(methodName, Unit.Requests) .Record((long)(sw.Elapsed.TotalMilliseconds * 1000000), TimeUnit.Nanoseconds); } _serverRequestPublisher.TryPublish(callEvent, argumentsWithDefaults, serviceMethod); } } } } finally { Interlocked.Decrement(ref OutstandingRequests); } }
private static async Task ProcessEvent(IMessaging messaging, string stream, long position, IFullEvent @event, CancellationToken token) { Logger.Write(LogLevel.Debug, () => $"Processing event from stream [{@event.Descriptor.StreamId}] bucket [{@event.Descriptor.Bucket}] entity [{@event.Descriptor.EntityType}] event id {@event.EventId}"); var contextBag = new ContextBag(); // Hack to get all the events to invoker without NSB deserializing contextBag.Set(Defaults.EventHeader, @event.Event); using (var tokenSource = CancellationTokenSource.CreateLinkedTokenSource(token)) { var processed = false; var numberOfDeliveryAttempts = 0; var messageId = Guid.NewGuid().ToString(); var corrId = ""; if (@event.Descriptor?.Headers?.ContainsKey(Headers.CorrelationId) ?? false) { corrId = @event.Descriptor.Headers[Headers.CorrelationId]; } while (!processed) { var transportTransaction = new TransportTransaction(); var headers = new Dictionary <string, string>(@event.Descriptor.Headers ?? new Dictionary <string, string>()) { [Headers.MessageIntent] = MessageIntentEnum.Send.ToString(), [Headers.EnclosedMessageTypes] = SerializeEnclosedMessageTypes(messaging, @event.Event.GetType()), [Headers.MessageId] = messageId, [Headers.CorrelationId] = corrId, [Defaults.EventHeader] = "", [$"{Defaults.EventPrefixHeader}.EventId"] = @event.EventId.ToString(), [$"{Defaults.EventPrefixHeader}.EventStream"] = stream, [$"{Defaults.EventPrefixHeader}.EventPosition"] = position.ToString() }; using (var ctx = EventExecution.NewContext()) { try { // If canceled, this will throw the number of time immediate retry requires to send the message to the error queue token.ThrowIfCancellationRequested(); // Don't re-use the event id for the message id var messageContext = new MessageContext(messageId, headers, Marker, transportTransaction, tokenSource, contextBag); await Bus.OnMessage(messageContext).ConfigureAwait(false); EventsHandled.Increment(); processed = true; } catch (ObjectDisposedException) { // NSB transport has been disconnected throw new OperationCanceledException(); } catch (Exception ex) { EventErrors.Mark($"{ex.GetType().Name} {ex.Message}"); ++numberOfDeliveryAttempts; // Don't retry a cancelation if (tokenSource.IsCancellationRequested) { numberOfDeliveryAttempts = Int32.MaxValue; } var errorContext = new ErrorContext(ex, headers, messageId, Marker, transportTransaction, numberOfDeliveryAttempts); if (await Bus.OnError(errorContext).ConfigureAwait(false) == ErrorHandleResult.Handled || tokenSource.IsCancellationRequested) { break; } } } } } }
public async Task End(Exception ex = null) { if (ex != null) { return; } Logger.Debug("UOW End - {0} saves, {1} updates, {2} deletions", _saves.Count, _updates.Count, _deletions.Count); // New and update options commented out for now.. causing too many events to be dropped because riak doing weird things // - Dont comment them out, figure out riak weirdness! IEnumerable <RiakResult> exceptions; using (CommitTimer.NewContext()) { var options = new RiakPutOptions { IfNoneMatch = true, IfNotModified = false, ReturnHead = true, ReturnBody = false }; options.SetW(Quorum.WellKnown.Quorum); var saved = _client.Async.Put(_saves.Values, options); var uptOptions = new RiakPutOptions { IfNoneMatch = false, IfNotModified = true, ReturnHead = true, ReturnBody = false }; uptOptions.SetW(Quorum.WellKnown.Quorum); var updated = _client.Async.Put(_updates.Values, uptOptions); var deleteOpt = new RiakDeleteOptions(); deleteOpt.SetR(Quorum.WellKnown.Quorum); var deleted = _client.Async.Delete(_deletions, deleteOpt); await Task.WhenAll(saved, updated, deleted).ConfigureAwait(false); exceptions = saved.Result.Where(x => !x.IsSuccess) .Concat(updated.Result.Where(x => !x.IsSuccess)) .Concat(deleted.Result.Where(x => !x.IsSuccess)); } // Remove updated and deleted from cache (regardless of success) foreach (var update in _updates) { MemCache.Evict(update.Key.ToString()); } foreach (var delete in _deletions) { MemCache.Evict(delete.Key.ToString()); } NLog.MappedDiagnosticsLogicalContext.Clear(); if (exceptions.Any()) { ExceptionsMeter.Mark(); var grouped = exceptions.GroupBy(x => x.ErrorMessage).Select(x => x.First()); Logger.Warn("Exceptions when saving. Details: {0}", grouped.Select(x => $"{x.ResultCode} {x.ErrorMessage}").Aggregate((cur, next) => $"{cur}, {next}")); foreach (var tracked in _tracked) { MemCache.Evict(tracked.Key.ToString()); } // Backout saves because if the event created new objects running the event again will cause more errors due to match_found await BackOutSaves(_saves.Keys).ConfigureAwait(false); throw new StorageException("Failed to commit", grouped.Select(x => x.Exception)); } }
private static void Threaded(object state) { var param = (ThreadParam)state; param.Clients.SelectAsync(x => x.Connect()).Wait(); var threadIdle = Metric.Timer("Delayed Events Idle", Unit.None, tags: "debug"); var tasks = new Task[param.Clients.Count()]; TimerContext?idleContext = threadIdle.NewContext(); while (true) { param.Token.ThrowIfCancellationRequested(); var noEvents = true; for (var i = 0; i < param.Clients.Count(); i++) { // Check if current task is complete if (tasks[i] != null && !tasks[i].IsCompleted) { continue; } var client = param.Clients.ElementAt(i); var events = client.Flush(); DelayedHandled.Update(events.Count()); if (!events.Any()) { continue; } noEvents = false; idleContext?.Dispose(); idleContext = null; tasks[i] = Task.Run(async() => { using (var ctx = DelayedExecution.NewContext()) { // Group delayed events from by stream id and process in chunks // Same stream ids should modify the same models, processing this way reduces write collisions on commit await events.GroupBy(x => x.Event.EventStreamId) .SelectAsync(x => ProcessEvents(param, client, x.ToArray())).ConfigureAwait(false); if (ctx.Elapsed > TimeSpan.FromSeconds(5)) { SlowLogger.Warn($"Processing {events.Count()} bulked events took {ctx.Elapsed.TotalSeconds} seconds!"); } Logger.Write(LogLevel.Info, () => $"Processing {events.Count()} bulked events took {ctx.Elapsed.TotalMilliseconds} ms"); } }, param.Token); } if (idleContext == null) { idleContext = threadIdle.NewContext(); } // Cheap hack to not burn cpu incase there are no events if (noEvents) { Thread.Sleep(50); } } }
private async Task <long> DoWrite(string stream, IEnumerable <EventData> events, long?expectedVersion = null) { var bucket = Math.Abs(stream.GetHashCode() % _clients.Count()); long nextVersion; using (var ctx = WriteTime.NewContext()) { EventStoreTransaction transaction = null; try { if (events.Count() > _readsize) { transaction = await _clients[bucket].StartTransactionAsync(stream, expectedVersion ?? ExpectedVersion.Any).ConfigureAwait(false); } if (transaction != null) { Logger.Write(LogLevel.Debug, () => $"Using transaction {events.Count()} is over max {_readsize} to write stream id [{stream}]"); var page = 0; while (page < events.Count()) { await transaction.WriteAsync(events.Skip(page).Take(_readsize)).ConfigureAwait(false); page += _readsize; } var result = await transaction.CommitAsync().ConfigureAwait(false); nextVersion = result.NextExpectedVersion; } else { var result = await _clients[bucket].AppendToStreamAsync(stream, expectedVersion ?? ExpectedVersion.Any, events) .ConfigureAwait(false); nextVersion = result.NextExpectedVersion; } } catch (WrongExpectedVersionException e) { transaction?.Rollback(); throw new VersionException($"We expected version {expectedVersion ?? ExpectedVersion.Any}", e); } catch (CannotEstablishConnectionException e) { transaction?.Rollback(); throw new PersistenceException(e.Message, e); } catch (OperationTimedOutException e) { transaction?.Rollback(); throw new PersistenceException(e.Message, e); } catch (EventStoreConnectionException e) { transaction?.Rollback(); throw new PersistenceException(e.Message, e); } WrittenEvents.Update(events.Count()); WrittenEventsSize.Update(events.Sum(x => x.Data.Length)); if (ctx.Elapsed > TimeSpan.FromSeconds(1)) { SlowLogger.Write(LogLevel.Warn, () => $"Writing {events.Count()} events of total size {events.Sum(x => x.Data.Length)} to stream [{stream}] version {expectedVersion} took {ctx.Elapsed.TotalSeconds} seconds!"); } Logger.Write(LogLevel.Debug, () => $"Writing {events.Count()} events of total size {events.Sum(x => x.Data.Length)} to stream [{stream}] version {expectedVersion} took {ctx.Elapsed.TotalMilliseconds} ms"); } return(nextVersion); }
private static void Threaded(object state) { var param = (ThreadParam)state; // A fake message that will travel through the pipeline in order to bulk process messages from the context bag var bulkMarker = new BulkMessage().Serialize(param.JsonSettings).AsByteArray(); param.Clients.SelectAsync(x => x.Connect()).Wait(); var threadIdle = Metric.Timer("Delayed Events Idle", Unit.None, tags: "debug"); TimerContext?idleContext = threadIdle.NewContext(); while (true) { param.Token.ThrowIfCancellationRequested(); var noEvents = true; for (var i = 0; i < param.Clients.Count(); i++) { var client = param.Clients.ElementAt(i); var events = client.Flush(); if (!events.Any()) { continue; } noEvents = false; idleContext?.Dispose(); idleContext = null; var delayed = events.Select(x => x.Event.Data.Deserialize <DelayedMessage>(param.JsonSettings)).ToArray(); Logger.Write(LogLevel.Info, () => $"Processing {delayed.Count()} bulk events"); var transportTransaction = new TransportTransaction(); var contextBag = new ContextBag(); // Hack to get all the delayed messages to bulk invoker without NSB deserializing and processing each one contextBag.Set(Defaults.BulkHeader, delayed); // Need to supply EnclosedMessageTypes to trick NSB pipeline into processing our fake message var messageId = Guid.NewGuid().ToString(); var headers = new Dictionary <string, string>() { [Headers.EnclosedMessageTypes] = typeof(BulkMessage).AssemblyQualifiedName, [Headers.MessageIntent] = MessageIntentEnum.Send.ToString(), [Headers.MessageId] = messageId, [Defaults.BulkHeader] = delayed.Count().ToString(), }; // Run bulk process on this thread using (var tokenSource = new CancellationTokenSource()) { var success = false; var retry = 0; do { using (var ctx = DelayedExecution.NewContext()) { try { // If canceled, this will throw the number of time immediate retry requires to send the message to the error queue param.Token.ThrowIfCancellationRequested(); // Don't re-use the event id for the message id var messageContext = new NServiceBus.Transport.MessageContext(messageId, headers, bulkMarker, transportTransaction, tokenSource, contextBag); Bus.OnMessage(messageContext).Wait(param.Token); Logger.Write(LogLevel.Debug, () => $"Scheduling acknowledge of {delayed.Count()} bulk events"); DelayedHandled.Update(delayed.Count()); client.Acknowledge(events); success = true; } catch (ObjectDisposedException) { // NSB transport has been disconnected break; } catch (Exception ex) { } if (ctx.Elapsed > TimeSpan.FromSeconds(5)) { SlowLogger.Warn( $"Processing {delayed.Count()} bulked events took {ctx.Elapsed.TotalSeconds} seconds!"); } Logger.Write(LogLevel.Info, () => $"Processing {delayed.Count()} bulked events took {ctx.Elapsed.TotalMilliseconds} ms"); } // Todo: use max retry setting } while (!success && retry < 10); if (!success) { // Dont run through NSB's error handler, it expects a single serialized message which // is not compatible here. Just tell EventStore to retry it // Todo: ES will park messages that fail - develop something to monitor parked messages client.Nack(events); } } } if (idleContext == null) { idleContext = threadIdle.NewContext(); } // Cheap hack to not burn cpu incase there are no events if (noEvents) { Thread.Sleep(50); } } }