public static RealTimeInventoryFinalResult ProcessAndSendResult(this OperationResult <IRealTimeInventory> result, IRequestMessage requestMessage, Func <IRealTimeInventory, IInventoryServiceCompletedMessage> successResponseCompletedMessage, ILoggingAdapter logger, IRealTimeInventory realTimeInventory, IActorRef sender, IActorRef notificationActorRef, IPerformanceService performanceService) { logger?.Info(requestMessage.GetType().Name + " Request was " + (!result.IsSuccessful ? " NOT " : "") + " successful. Current Inventory : " + realTimeInventory.GetCurrentQuantitiesReport()); IInventoryServiceCompletedMessage response; if (!result.IsSuccessful) { response = result.ToInventoryOperationErrorMessage(requestMessage.ProductId); var message = "Error while trying to " + requestMessage.GetType() + " - The sender of the message is " + sender?.Path + result.Exception.ErrorMessage; notificationActorRef?.Tell(message); logger?.Error(message, requestMessage, result, realTimeInventory.GetCurrentQuantitiesReport()); } else { realTimeInventory = result.Data as RealTimeInventory; response = successResponseCompletedMessage(realTimeInventory); logger?.Info(response.GetType().Name + " Response was sent back. Current Inventory : " + realTimeInventory.GetCurrentQuantitiesReport() + " - The sender of the message is " + sender.Path); } sender?.Tell(response); notificationActorRef?.Tell(new RealTimeInventoryChangeMessage(realTimeInventory)); performanceService?.Increment("Completed " + requestMessage.GetType().Name); return(new RealTimeInventoryFinalResult(realTimeInventory as RealTimeInventory, response, result)); }
private void Handle(ReceiveTimeout timeoutInfo) { _logger.Info(timeoutInfo.ToString()); _logger.Info("Recipe failed to transfer"); }
public void InitFSM() { StartWith(State.Connecting, new Data(null, null)); When(State.Connecting, @event => { if (@event.FsmEvent is IClientOp) { return(Stay().Replying(new Status.Failure(new IllegalStateException("not connected yet")))); } var connected = @event.FsmEvent as Connected; if (connected != null) { connected.Channel.WriteAndFlushAsync(new Hello(_name.Name, TestConductor.Get(Context.System).Address)); return(GoTo(State.AwaitDone).Using(new Data(connected.Channel, null))); } if (@event.FsmEvent is ConnectionFailure) { return(GoTo(State.Failed)); } if (@event.FsmEvent is StateTimeout) { _log.Error($"Failed to connect to test conductor within {_settings.ConnectTimeout.TotalMilliseconds} ms."); return(GoTo(State.Failed)); } return(null); }, _settings.ConnectTimeout); When(State.AwaitDone, @event => { if (@event.FsmEvent is Done) { _log.Debug("received Done: starting test"); return(GoTo(State.Connected)); } if (@event.FsmEvent is INetworkOp) { _log.Error("Received {0} instead of Done", @event.FsmEvent); return(GoTo(State.Failed)); } if (@event.FsmEvent is IServerOp) { return(Stay().Replying(new Failure(new IllegalStateException("not connected yet")))); } if (@event.FsmEvent is StateTimeout) { _log.Error("connect timeout to TestConductor"); return(GoTo(State.Failed)); } return(null); }, _settings.BarrierTimeout); When(State.Connected, @event => { if (@event.FsmEvent is Disconnected) { _log.Info("disconnected from TestConductor"); throw new ConnectionFailure("disconnect"); } if (@event.FsmEvent is ToServer <Done> && @event.StateData.Channel != null) { @event.StateData.Channel.WriteAndFlushAsync(Done.Instance); return(Stay()); } var toServer = @event.FsmEvent as IToServer; if (toServer != null && @event.StateData.Channel != null && @event.StateData.RunningOp == null) { @event.StateData.Channel.WriteAndFlushAsync(toServer.Msg); string token = null; var enterBarrier = @event.FsmEvent as ToServer <EnterBarrier>; if (enterBarrier != null) { token = enterBarrier.Msg.Name; } else { var getAddress = @event.FsmEvent as ToServer <GetAddress>; if (getAddress != null) { token = getAddress.Msg.Node.Name; } } return(Stay().Using(@event.StateData.Copy(runningOp: Tuple.Create(token, Sender)))); } if (toServer != null && @event.StateData.Channel != null && @event.StateData.RunningOp != null) { _log.Error("cannot write {0} while waiting for {1}", toServer.Msg, @event.StateData.RunningOp); return(Stay()); } if (@event.FsmEvent is IClientOp && @event.StateData.Channel != null) { var barrierResult = @event.FsmEvent as BarrierResult; if (barrierResult != null) { if (@event.StateData.RunningOp == null) { _log.Warning("did not expect {0}", @event.FsmEvent); } else { object response; if (barrierResult.Name != @event.StateData.RunningOp.Item1) { response = new Failure( new Exception("wrong barrier " + barrierResult + " received while waiting for " + @event.StateData.RunningOp.Item1)); } else if (!barrierResult.Success) { response = new Failure( new Exception("barrier failed:" + @event.StateData.RunningOp.Item1)); } else { response = barrierResult.Name; } @event.StateData.RunningOp.Item2.Tell(response); } return(Stay().Using(@event.StateData.Copy(runningOp: null))); } var addressReply = @event.FsmEvent as AddressReply; if (addressReply != null) { if (@event.StateData.RunningOp == null) { _log.Warning("did not expect {0}", @event.FsmEvent); } else { @event.StateData.RunningOp.Item2.Tell(addressReply.Addr); } return(Stay().Using(@event.StateData.Copy(runningOp: null))); } var throttleMsg = @event.FsmEvent as ThrottleMsg; if (@event.FsmEvent is ThrottleMsg) { ThrottleMode mode; if (throttleMsg.RateMBit < 0.0f) { mode = Unthrottled.Instance; } else if (throttleMsg.RateMBit == 0.0f) { mode = Blackhole.Instance; } else { mode = new Transport.TokenBucket(1000, throttleMsg.RateMBit * 125000, 0, 0); } var cmdTask = TestConductor.Get(Context.System) .Transport.ManagementCommand(new SetThrottle(throttleMsg.Target, throttleMsg.Direction, mode)); var self = Self; cmdTask.ContinueWith(t => { if (t.IsFaulted) { throw new ConfigurationException("Throttle was requested from the TestConductor, but no transport " + "adapters available that support throttling. Specify 'testTransport(on=true)' in your MultiNodeConfig"); } self.Tell(new ToServer <Done>(Done.Instance)); }); return(Stay()); } if (@event.FsmEvent is DisconnectMsg) { return(Stay()); //FIXME is this the right EC for the future below? } var terminateMsg = @event.FsmEvent as TerminateMsg; if (terminateMsg != null) { _log.Info("Received TerminateMsg - shutting down..."); if (terminateMsg.ShutdownOrExit.IsLeft && terminateMsg.ShutdownOrExit.ToLeft().Value == false) { Context.System.Terminate(); return(Stay()); } if (terminateMsg.ShutdownOrExit.IsLeft && terminateMsg.ShutdownOrExit.ToLeft().Value == true) { Context.System.AsInstanceOf <ActorSystemImpl>().Abort(); return(Stay()); } if (terminateMsg.ShutdownOrExit.IsRight) { Environment.Exit(terminateMsg.ShutdownOrExit.ToRight().Value); return(Stay()); } } if (@event.FsmEvent is Done) { return(Stay()); //FIXME what should happen? } } return(null); }); When(State.Failed, @event => { if (@event.FsmEvent is IClientOp) { return(Stay().Replying(new Status.Failure(new Exception("cannot do " + @event.FsmEvent + " while failed")))); } if (@event.FsmEvent is INetworkOp) { _log.Warning("ignoring network message {0} while Failed", @event.FsmEvent); return(Stay()); } return(null); }); OnTermination(e => { _log.Info("Terminating connection to multi-node test controller due to [{0}]", e.Reason); if (e.StateData.Channel != null) { var disconnectTimeout = TimeSpan.FromSeconds(2); //todo: make into setting loaded from HOCON if (!e.StateData.Channel.CloseAsync().Wait(disconnectTimeout)) { _log.Warning("Failed to disconnect from conductor within {0}", disconnectTimeout); } } }); Initialize(); }
public object ToJournal(object evt) { Log.Info("On it's way to the journal: {0}", evt); return(evt); }
public void OnProjection(Album album) { _mongo.UpsertAlbum(album); _log.Info($"Updated projection for id: " + album.Id); }
public override Task CloseAsync(IChannelHandlerContext context) { _log.Info("Server: disconnecting {0} from {1}", context.Channel.LocalAddress, context.Channel.RemoteAddress); return(base.CloseAsync(context)); }
private void HandleMessage(string message) { Log.Info($"Child got msg: {message}"); Sender.Tell($"Сообщение '{message}' успешно обработано в Child"); }
public MyJobsActor() { Recover <string>(job => { // from the journal _log.Info($"Load Journal : Job loaded={job}_{_nextJobId}"); _jobs.Add(new Job(_nextJobId, job)); }); // recover Recover <Job>(job => {// from the journal _log.Info($"Load Journal : Job loaded={job.Id}"); _jobs.Add(job); }); Recover <SnapshotOffer>(offer => {// from snapshot var jobs = offer.Snapshot as List <Job>; if (jobs != null) { _log.Info($"Load Snapshot : Jobs loaded={jobs.Count}"); _log.Info(String.Join(",", jobs.Select(x => x.Name))); _jobs = _jobs.Concat(jobs).ToList(); } }); // commands Command <StartJob>(job => Persist(job.Name, s => { var name = $"{job.Name}_{_nextJobId}"; _jobs.Add(new Job(_nextJobId, name)); //add msg to in-memory event store after persisting to data store _log.Info($"Job:{name}"); if (++_msgsSinceLastSnapshot % 5 == 0) { //time to save a snapshot _log.Info("Save Snapshot"); SaveSnapshot(_jobs); } })); Command <SaveSnapshotSuccess>(success => { // soft-delete the journal up until the sequence # at // which the snapshot was taken _log.Info("Save Snapshot Success; Deleting Journal Messages"); DeleteMessages(success.Metadata.SequenceNr); }); Command <SaveSnapshotFailure>(failure => { // handle snapshot save failure... _log.Info("Save Snapshot Failure"); }); Command <DeleteMessagesFailure>(failure => { // handle snapshot save failure... _log.Info("Delete Messages Failure"); }); Command <DeleteMessagesSuccess>(success => { // handle snapshot save success... _log.Info("Delete Messages Success"); }); Command <GetJobs>(get => { Sender.Tell(_jobs.ToImmutableList()); }); }
private void OnDisconnect() { Context.Stop(Self); _logger.Info($"Stopped, client {_nick} disconnected."); }
public void LogInfo(string message) { _log.Info("Cluster Node [{0}] - {1}", SelfAddress, message); }
private void Processing() { Receive <GatherStats>(gs => { var stat = new Stat(_hospital.Id, StatisticType.EstimatedTimeToSeeADoctor, _avgDuration); foreach (var sub in _subscriptions) { sub.Tell(stat); } }); Receive <SubscribeStatistic>(sc => { _subscriptions.Add(sc.Subscriber); }); Receive <UnsubscribeStatistic>(uc => { _subscriptions.Remove(uc.Subscriber); }); Receive <RegisterPatient>(rp => { var sw = Stopwatch.StartNew(); _patients[rp.Disease.Priority].Add(rp.PatientId, rp); if (_doctors.Count == 0) { // Aucun docteur n'est encore enregistré, le temsp d'attente est indéfini return; } // Trier le temps d'occupation de tous les médecins _remainingTimeToSeeADoctor.Clear(); foreach (var doctor in _doctors) { var patient = doctor.Value; if (patient == null) { // Médecin sans patient _remainingTimeToSeeADoctor.Add(0); } else { var diseaseInCharge = patient.Disease; var requiredTimeForDisease = ConvertTimeToMilliSec(diseaseInCharge.RequiredTime, diseaseInCharge.TimeUnit); var elapsedTime = (DateTime.Now - patient.StartTime).TotalMilliseconds; if (elapsedTime > requiredTimeForDisease) { // Le médecin a terminé avec ce patient _remainingTimeToSeeADoctor.Add(0); } else { // Le temps qui reste au médecin avant de se libérer _remainingTimeToSeeADoctor.Add((long)(requiredTimeForDisease - elapsedTime)); } } } // Estimer le temps d'attente de chaque patient for (var diseasePriority = DiseasePriority.VeryHigh; diseasePriority < DiseasePriority.Invalid; ++diseasePriority) { foreach (var patient in _patients[diseasePriority]) { // Trier la liste de temps d'ocuppation des médecins _remainingTimeToSeeADoctor.Sort(); // Le premier médecin qui va se libérer var waitingTime = _remainingTimeToSeeADoctor[0]; _avgDuration = ((_avgDuration * _statCount) + waitingTime) / (++_statCount); _counter.RawValue = (long)_avgDuration; // Rajouter à ce médecin le temps d'occupation avec ce nouveau patient _remainingTimeToSeeADoctor[0] += ConvertTimeToMilliSec(patient.Value.Disease.RequiredTime, patient.Value.Disease.TimeUnit); } } _log.Info($"(H{_hospital.Id}) RegisterPatient for patient ID={rp.PatientId} took {sw.ElapsedTicks} ticks"); }); Receive <BeginAppointmentWithDoctor>(bawd => { var sw = Stopwatch.StartNew(); if ((_doctors.Count >= _hospital.AssignedDoctors) && !_doctors.ContainsKey(bawd.DoctorId)) { // Un nouveau quart de travail :) _doctors.Clear(); for (var diseasePriority = DiseasePriority.VeryHigh; diseasePriority < DiseasePriority.Invalid; ++diseasePriority) { _patients[diseasePriority].Clear(); } } _doctors[bawd.DoctorId] = bawd; RemovePatient(bawd.PatientId); _log.Info($"(H{_hospital.Id}) BeginAppointmentWithDoctor for patient ID={bawd.PatientId} took {sw.ElapsedTicks} ticks"); }); Receive <UnregisterPatient>(urp => { var sw = Stopwatch.StartNew(); foreach (var doctor in _doctors.Where(doctor => (doctor.Value != null) && doctor.Value.PatientId.Equals(urp.PatientId))) { _doctors[doctor.Key] = null; break; } RemovePatient(urp.PatientId); _log.Info($"(H{_hospital.Id}) UnregisterPatient for patient ID={urp.PatientId} took {sw.ElapsedTicks} ticks"); }); }
public NeuronActor() { #region [ Setup Initial Actor State ] // Assign a unique Guid this.Id = Guid.NewGuid(); // Create and dispose a NeuronActorActivator so that we don't have to keep repeating ourselves. using (var activator = new NeuronActorActivator()) { activator.InitializeNeuronActor(this); } #endregion #region [ Property Receivers ] // Receive and process IEnumerable<IActorRef> for either InputActors or OutputActors. // If we can't process the message, log a warning and send a Enums.NeuronSignals.SignalFault. Receive <Tuple <Enums.NeuronSignals, IEnumerable <IActorRef> > >(m => { switch (m.Item1) { case Enums.NeuronSignals.InputActorsReceived: this.InputActors.ToList().AddRange(m.Item2); _log.Info($"[{DateTime.Now}] Received: {Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.InputActorsReceived), Self); break; case Enums.NeuronSignals.OutputActorsReceived: this.OutputActors.ToList().AddRange(m.Item2); _log.Info($"[{DateTime.Now}] Received: {Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.OutputActorsReceived), Self); break; default: _log.Warning($"[{DateTime.Now}] Invalid NeuronSignal Received: ${Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.SignalFault), Self); break; } }); //Receive<Tuple<Enums.NeuronSignals, float>>(m => //{ //}); Receive <Tuple <Enums.NeuronSignals, float?> >(m => { switch (m.Item1) { case Enums.NeuronSignals.BiasReceived: this.Bias = m.Item2; _log.Info($"[{DateTime.Now}] Received: {Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.BiasReceived), Self); break; case Enums.NeuronSignals.ThresholdReceived: this.Threshold = m.Item2; _log.Info($"[{DateTime.Now}] Received: {Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.ThresholdReceived), Self); break; case Enums.NeuronSignals.AccumulatorReceived: this.Accumulator = (float)m.Item2; _log.Info($"[{DateTime.Now}] Received: {Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.AccumulatorReceived), Self); break; case Enums.NeuronSignals.InputReceived: if (this.InputActors.ToList().Contains(Sender)) { this.Input.ToList().Add((float)m.Item2); _log.Info($"[{DateTime.Now}] Received: {Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.InputReceived), Self); } break; default: _log.Warning($"[{DateTime.Now}] Invalid NeuronSignal Received: ${Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.SignalFault), Self); break; } }); //Receive<Tuple<Enums.NeuronSignals, Tuple<float?, float?, float?>>>(m => Receive <Tuple <Enums.NeuronSignals, IEnumerable <float> > >(m => { switch (m.Item1) { case Enums.NeuronSignals.WeightsReceived: this.Weights = m.Item2; _log.Info($"[{DateTime.Now}] Received: {Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.WeightsReceived), Self); break; default: _log.Warning($"[{DateTime.Now}] Invalid NeuronSignal Received: ${Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.SignalFault), Self); break; } }); Receive <Enums.NeuronSignals>(m => { switch (m) { case Enums.NeuronSignals.CalculateDotProduct: if (this.Input != null && this.Weights != null) { try { this.DotProduct = (this.Input.DotProduct(this.Weights, this.Accumulator)) + this.Bias; Sender.Tell( new Tuple <Enums.NeuronSignals, float?>(Enums.NeuronSignals.CalculateDotProduct, this.DotProduct), Self); } catch (Exception e) { _log.Error(e, $"[{DateTime.Now}] Sending of {m} threw an exception when instantiated by: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.SignalFault), Self); } } else { _log.Warning($"[{DateTime.Now}] One of the inputs of the DotProduct function is null when invoked by {Sender}. this.Input: {this.Input} this.Weights: {this.Weights}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.SignalFault), Self); } break; case Enums.NeuronSignals.InvokeActivationFunction: _log.Info($"[{DateTime.Now}] Received: {Enum.GetName(typeof(Enums.NeuronSignals), m)} from: {Sender}"); this.Output = this.ActivationFunction(this.DotProduct, this.Threshold); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.InvokeActivationFunction), Self); break; case Enums.NeuronSignals.ForwardOutput: _log.Info($"[{DateTime.Now}] Received: {Enum.GetName(typeof(Enums.NeuronSignals), m)} from: {Sender}"); ForwardOutputToOutputNeurons(); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.ForwardOutput), Self); break; default: _log.Warning($"[{DateTime.Now}] Unprocessable NeuronSignal Received: ${Enum.GetName(typeof(Enums.NeuronSignals), m)} from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.SignalFault), Self); break; } }); #endregion }
public override void Start() { log.Info("Starting remoting"); if (_endpointManager == null) { _endpointManager = System.SystemActorOf(RARP.For(System).ConfigureDispatcher( Props.Create(() => new EndpointManager(System.Settings.Config, log)).WithDeploy(Deploy.Local)), EndpointManagerName); try { var addressPromise = new TaskCompletionSource <IList <ProtocolTransportAddressPair> >(); // tells the EndpointManager to start all transports and bind them to listenable addresses, and then set the results // of this promise to include them. _endpointManager.Tell(new EndpointManager.Listen(addressPromise)); addressPromise.Task.Wait(Provider.RemoteSettings.StartupTimeout); var akkaProtocolTransports = addressPromise.Task.Result; if (akkaProtocolTransports.Count == 0) { throw new Exception("No transports enabled"); } _addresses = new HashSet <Address>(akkaProtocolTransports.Select(a => a.Address)); // this.transportMapping = akkaProtocolTransports // .ToDictionary(p => p.ProtocolTransport.Transport.SchemeIdentifier,); IEnumerable <IGrouping <string, ProtocolTransportAddressPair> > tmp = akkaProtocolTransports.GroupBy(t => t.ProtocolTransport.SchemeIdentifier); _transportMapping = new Dictionary <string, HashSet <ProtocolTransportAddressPair> >(); foreach (var g in tmp) { var set = new HashSet <ProtocolTransportAddressPair>(g); _transportMapping.Add(g.Key, set); } _defaultAddress = akkaProtocolTransports.Head().Address; _addresses = new HashSet <Address>(akkaProtocolTransports.Select(x => x.Address)); log.Info("Remoting started; listening on addresses : [{0}]", string.Join(",", _addresses.Select(x => x.ToString()))); _endpointManager.Tell(new EndpointManager.StartupFinished()); _eventPublisher.NotifyListeners(new RemotingListenEvent(_addresses.ToList())); } catch (TaskCanceledException ex) { NotifyError("Startup was cancelled due to timeout", ex); throw; } catch (TimeoutException ex) { NotifyError("Startup timed out", ex); throw; } catch (Exception ex) { NotifyError("Startup failed", ex); throw; } } else { log.Warning("Remoting was already started. Ignoring start attempt."); } }
private void LogToEverything(IUntypedActorContext context, string message) { _mediator.Tell(new Publish(Topics.Status, new SignalRMessage($"{DateTime.Now}: {StaticMethods.GetSystemUniqueName()}", "LineReader", message)), context.Self); _logger.Info(message); }
public void Handle(NewEntityCreated message) { _log.Info("Entity {0}/{1:n} created.", message.Entity.GetType().Name, message.Key); }
protected override void PreStart() => _log.Info($"Device actor {SystemId}-{DownloaderId} started");
private void Commands() { Command <ConfirmableMessage <Ask> >(a => { // For the sake of efficiency - update orderbook and then persist all events var events = _matchingEngine.WithAsk(a.Message); var persistableEvents = new ITradeEvent[] { a.Message }.Concat <ITradeEvent>(events); // ask needs to go before Fill / Match PersistAll(persistableEvents, @event => { _log.Info("[{0}][{1}] - {2} units @ {3} per unit", TickerSymbol, @event.ToTradeEventType(), a.Message.AskQuantity, a.Message.AskPrice); if (@event is Ask) { // need to use the ID of the original sender to satisfy the PersistenceSupervisor //_confirmationActor.Tell(new Confirmation(a.ConfirmationId, a.SenderId)); } _publisher.Publish(TickerSymbol, @event); // Take a snapshot every N messages to optimize recovery time if (LastSequenceNr % SnapshotInterval == 0) { SaveSnapshot(_matchingEngine.GetSnapshot()); } }); }); Command <ConfirmableMessage <Bid> >(b => { // For the sake of efficiency -update orderbook and then persist all events var events = _matchingEngine.WithBid(b.Message); var persistableEvents = new ITradeEvent[] { b.Message }.Concat <ITradeEvent>(events); // bid needs to go before Fill / Match PersistAll(persistableEvents, @event => { _log.Info("[{0}][{1}] - {2} units @ {3} per unit", TickerSymbol, @event.ToTradeEventType(), b.Message.BidQuantity, b.Message.BidPrice); if (@event is Bid) { //_confirmationActor.Tell(new Confirmation(b.ConfirmationId, PersistenceId)); } _publisher.Publish(TickerSymbol, @event); // Take a snapshot every N messages to optimize recovery time if (LastSequenceNr % SnapshotInterval == 0) { SaveSnapshot(_matchingEngine.GetSnapshot()); } }); }); /* * Handle subscriptions directly in case we're using in-memory, local pub-sub. */ CommandAsync <TradeSubscribe>(async sub => { try { var ack = await _subscriptionManager.Subscribe(sub.TickerSymbol, sub.Events, sub.Subscriber); Context.Watch(sub.Subscriber); sub.Subscriber.Tell(ack); } catch (Exception ex) { _log.Error(ex, "Error while processing subscription {0}", sub); sub.Subscriber.Tell(new TradeSubscribeNack(sub.TickerSymbol, sub.Events, ex.Message)); } }); CommandAsync <TradeUnsubscribe>(async unsub => { try { var ack = await _subscriptionManager.Unsubscribe(unsub.TickerSymbol, unsub.Events, unsub.Subscriber); // leave DeathWatch intact, in case actor is still subscribed to additional topics unsub.Subscriber.Tell(ack); } catch (Exception ex) { _log.Error(ex, "Error while processing unsubscribe {0}", unsub); unsub.Subscriber.Tell(new TradeUnsubscribeNack(unsub.TickerSymbol, unsub.Events, ex.Message)); } }); CommandAsync <Terminated>(async t => { try { var ack = await _subscriptionManager.Unsubscribe(TickerSymbol, t.ActorRef); } catch (Exception ex) { _log.Error(ex, "Error while processing unsubscribe for terminated subscriber {0} for symbol {1}", t.ActorRef, TickerSymbol); } }); Command <GetOrderBookSnapshot>(s => { Sender.Tell(_matchingEngine.GetSnapshot()); }); }
public static async Task <int> Main(string[] args) { // Setup await SetupKafkaAsync(); await SetupAkkaAsync(); List <CpuUsage> usageBeforeLoad; List <CpuUsage> usageAfterLoad; try { _log = Logging.GetLogger(ConsumerSystem, nameof(Program)); // Create topic on Kafka server var builder = new AdminClientBuilder(new AdminClientConfig { BootstrapServers = Benchmark.Docker.KafkaAddress }); using (var client = builder.Build()) { await client.CreateTopicsAsync(new[] { new TopicSpecification { Name = KafkaTopic, NumPartitions = 3, ReplicationFactor = 1 } }); } // Set up consumer var consumerSettings = ConsumerSettings <string, string> .Create(ConsumerSystem, null, null) .WithBootstrapServers(Benchmark.Docker.KafkaAddress) .WithStopTimeout(TimeSpan.FromSeconds(1)) .WithProperty("auto.offset.reset", "earliest") .WithGroupId(KafkaGroup); var control = KafkaConsumer.PlainPartitionedSource(consumerSettings, Subscriptions.Topics(KafkaTopic)) .GroupBy(3, tuple => tuple.Item1) .SelectAsync(8, async tuple => { var(topicPartition, source) = tuple; _log.Info($"Sub-source for {topicPartition}"); var sourceMessages = await source .Scan(0, (i, message) => i + 1) .Select(i => { ReceivedMessage.IncrementAndGet(); return(LogReceivedMessages(topicPartition, i)); }) .RunWith(Sink.Last <long>(), ConsumerSystem.Materializer()); _log.Info($"{topicPartition}: Received {sourceMessages} messages in total"); return(sourceMessages); }) .MergeSubstreams() .AsInstanceOf <Source <long, IControl> >() .Scan(0L, (i, subValue) => i + subValue) .ToMaterialized(Sink.Last <long>(), Keep.Both) .MapMaterializedValue(tuple => DrainingControl <long> .Create(tuple.Item1, tuple.Item2)) .Run(ConsumerSystem.Materializer()); // Delay before benchmark await Task.Delay(TimeSpan.FromSeconds(DefaultDelay)); // Warmup await CollectSamplesAsync(DefaultWarmUpRepeat, DefaultSampleDuration, "[Warmup]"); // Collect CPU usage before load usageBeforeLoad = await CollectSamplesAsync(DefaultRepeat, DefaultSampleDuration, "[CPU Usage Before Load]"); // Create load var producerSettings = ProducerSettings <string, string> .Create(ConsumerSystem, null, null) .WithBootstrapServers(Benchmark.Docker.KafkaAddress); await Source .From(Enumerable.Range(1, DefaultMessageCount)) .Select(elem => new ProducerRecord <string, string>(KafkaTopic, "key", elem.ToString())) .RunWith(KafkaProducer.PlainSink(producerSettings), ConsumerSystem.Materializer()); // Wait until consumer consumed all messages var stopwatch = Stopwatch.StartNew(); while (stopwatch.Elapsed.TotalSeconds < DefaultTimeout && ReceivedMessage.Current < DefaultMessageCount) { await Task.Delay(100); } stopwatch.Stop(); if (stopwatch.Elapsed.TotalSeconds > DefaultTimeout) { throw new Exception($"Timed out while waiting consumer to process {DefaultMessageCount} messages"); } // Delay before benchmark await Task.Delay(TimeSpan.FromSeconds(DefaultDelay)); // Collect CPU usage after load usageAfterLoad = await CollectSamplesAsync(DefaultRepeat, DefaultSampleDuration, "[CPU Usage After Load]"); } finally { // Tear down await TearDownAkkaAsync(); await TearDownKafkaAsync(); } Console.WriteLine("CPU Benchmark complete."); await GenerateReportAsync(usageBeforeLoad, "BeforeLoad", DefaultSampleDuration, DefaultRepeat); await GenerateReportAsync(usageAfterLoad, "AfterLoad", DefaultSampleDuration, DefaultRepeat); return(0); }
public ChildActor() { _log.Info($">>> Current: {Self}"); Receive <int>(_ => Handle(_)); }
private void HandleFlush(Flush _) { _log.Info("{@QueueStats}", JObject.Parse(_.QueueStats)); }
private void HandlePaymentReceipt(PaymentResponse message) { message.Path = $"{this.Self.Path}--{message.Path}--HashCode:{this._articleGateway.GetHashCode()}"; logging.Info($"回复处理:{message.Message}"); Sender.Tell(message); }
private void Receiving() { Receive <PublishStatsTick>(stats => { if (!Stats.IsEmpty) { _logger.Info("Publishing {0} to parent", Stats); Commander.Tell(Stats.Copy()); //reset our stats after publishing Stats = Stats.Reset(); } }); //Received word from a ParseWorker that we need to check for new documents Receive <CheckDocuments>(documents => { //forward this onto the downloads tracker, but have it reply back to us DownloadsTracker.Tell(documents); }); //Update our local stats Receive <DiscoveredDocuments>(discovered => { Stats = Stats.WithDiscovered(discovered); }); //Received word from the DownloadTracker that we need to process some docs Receive <ProcessDocuments>(process => { foreach (var doc in process.Documents) { // Context.Parent is the router between the coordinators and the Commander if (doc.IsImage) { Context.Parent.Tell(new DownloadWorker.DownloadImage(doc)); } else { Context.Parent.Tell(new DownloadWorker.DownloadHtmlDocument(doc)); } } }); //hand the work off to the downloaders Receive <DownloadWorker.IDownloadDocument>(download => { DownloaderRouter.Tell(download); }); Receive <CompletedDocument>(completed => { //TODO: send verbose status messages to commander here? Stats = Stats.WithCompleted(completed); }); /* Set all of our local downloaders to message our local parsers */ Receive <DownloadWorker.RequestParseActor>(request => { Sender.Tell(new DownloadWorker.SetParseActor(ParserRouter)); }); /* Set all of our local parsers to message our local downloaders */ Receive <ParseWorker.RequestDownloadActor>(request => { Sender.Tell(new ParseWorker.SetDownloadActor(DownloaderRouter)); }); }
protected void InitFSM() { StartWith(State.Initial, null); WhenUnhandled(@event => { var clientDisconnected = @event.FsmEvent as Controller.ClientDisconnected; if (clientDisconnected != null) { if (@event.StateData != null) { @event.StateData.Tell(new Failure(new Controller.ClientDisconnectedException("client disconnected in state " + StateName + ": " + _channel))); } return(Stop()); } return(null); }); OnTermination(@event => { _controller.Tell(new Controller.ClientDisconnected(_roleName)); _channel.CloseAsync(); }); When(State.Initial, @event => { var hello = @event.FsmEvent as Hello; if (hello != null) { _roleName = new RoleName(hello.Name); _controller.Tell(new Controller.NodeInfo(_roleName, hello.Address, Self)); return(GoTo(State.Ready)); } if (@event.FsmEvent is INetworkOp) { _log.Warning("client {0}, sent not Hello in first message (instead {1}), disconnecting", _channel.RemoteAddress, @event.FsmEvent); _channel.CloseAsync(); return(Stop()); } if (@event.FsmEvent is IToClient) { _log.Warning("cannot send {0} in state Initial", @event.FsmEvent); return(Stay()); } if (@event.FsmEvent is StateTimeout) { _log.Info("closing channel to {0} because of Hello timeout", _channel.RemoteAddress); _channel.CloseAsync(); return(Stop()); } return(null); }, TimeSpan.FromSeconds(10)); When(State.Ready, @event => { if (@event.FsmEvent is Done && @event.StateData != null) { @event.StateData.Tell(@event.FsmEvent); return(Stay().Using(null)); } if (@event.FsmEvent is IServerOp) { _controller.Tell(@event.FsmEvent); return(Stay()); } if (@event.FsmEvent is INetworkOp) { _log.Warning("client {0} sent unsupported message {1}", _channel.RemoteAddress, @event.FsmEvent); return(Stop()); } var toClient = @event.FsmEvent as IToClient; if (toClient != null) { if (toClient.Msg is IUnconfirmedClientOp) { _channel.WriteAndFlushAsync(toClient.Msg); return(Stay()); } if (@event.StateData == null) { _channel.WriteAndFlushAsync(toClient.Msg); return(Stay().Using(Sender)); } _log.Warning("cannot send {0} while waiting for previous ACK", toClient.Msg); return(Stay()); } return(null); }); Initialize(); }
private void LogToEverything(IUntypedActorContext context, string message) { //context.ActorSelection("akka.tcp://[email protected]:4063/user/StatusActor").Tell(new SignalRMessage(StaticMethods.GetServiceName(), "LineReader", message)); _mediator.Tell(new Publish(Topics.Status, new SignalRMessage($"{DateTime.Now}: {StaticMethods.GetSystemUniqueName()}", "LineReader", message)), context.Self); _logger.Info(message); }
public void Handle(Shared.Messages.Category.CategoryCreated message) { _log.Info("CategoryCreated is handled."); _storage.Tell(new CreateNewCategory(message.AggregateId, message.Name, message.Status)); }
public BatchWriterActor() { ReceiveAsync <object>(async message => { if (message is Batch batchMessage) { Context.IncrementMessagesReceived(); var bulkItems_reseverd = new List <MessageReseved>(); var bulkItems_completed = new List <MessageCompleted>(); foreach (var item in batchMessage.Obj) { if (item is DelayMsg delayMsg) { if (delayMsg.State == DelayMsgState.Reserved) { bulkItems_reseverd.Add(new MessageReseved() { Seq = delayMsg.Seq, Message = delayMsg.Message, updateTime = DateTime.Now }); } else if (delayMsg.State == DelayMsgState.Completed) { bulkItems_completed.Add(new MessageCompleted() { Seq = delayMsg.Seq, Message = delayMsg.Message, updateTime = DateTime.Now }); } } } string BatchType = ""; if (bulkItems_reseverd.Count > 0 && IsWriteDB) { BatchType = "reserved"; EntityFrameworkManager.ContextFactory = context => new BatchRepository(Startup.AppSettings); using (var context = new BatchRepository(Startup.AppSettings)) { await context.BulkInsertAsync(bulkItems_reseverd, options => { options.BatchSize = BatchSize; }); Context.IncrementCounter("akka.custom.received1", bulkItems_reseverd.Count); } } if (bulkItems_completed.Count > 0 && IsWriteDB) { BatchType = "completed"; EntityFrameworkManager.ContextFactory = context => new BatchRepository(Startup.AppSettings); using (var context = new BatchRepository(Startup.AppSettings)) { await context.BulkInsertAsync(bulkItems_completed, options => { options.BatchSize = BatchSize; }); Context.IncrementCounter("akka.custom.received1", bulkItems_completed.Count); } } logger.Info($"========= Bulk Type:{BatchType} Count:{batchMessage.Obj.Count.ToString()}"); } }); }
public CatalogRegistryActor() { _log.Info(Self.Path.ToString()); SetUpReceivers(); }
//#region Actor States /// <summary> /// This method sets the recovering state.It will remain active until the actor if fully recovered from the /// Journal/Snashot stores. It will switch to the CommandProcessing state once it receives the RecoveryCompleted message. /// </summary> private void Recovering() { try { _logger.Debug($"Setting Up Persistence Actor with persistence id: {PersistenceId}"); //// ******** IMPORTANT *********** //// For each command there should be a handler defined //// ****************************** //UseRecoveryCommandHandler.Add("ClientListInsertCommand", command => InsertNewClientListItemRecoveryCommand(command as ClientListInsertCommand)); //UseRecoveryCommandHandler.Add("ClientListUpdateCommand", command => UpdateClientListRecoveryCommand(command as ClientListUpdateCommand)); //UseRecoveryCommandHandler.Add("ClientListDeleteCommand", command => DeleteClientListRecoveryCommand(command as ClientListDeleteCommand)); //UseRecoveryCommandHandler.Add("ClientListUnDeleteCommand", command => UnDeleteClientListRecoveryCommand(command as ClientListUnDeleteCommand)); //// Process any snapshots recovered from the data store //Recover<SnapshotOffer>(snapshotOffer => ProcessSnapshot(snapshotOffer)); //// These will be commands restored from the data store. //Recover<JObject>(jo => //{ // //Console.WriteLine("Received object: " + jo); // try // { // string commandName = jo["CommandClass"].Value<string>(); // Type commandType = Type.GetType(typeof(Command).Namespace + "." + commandName); // Command cmd = jo.ToObject(commandType) as Command; // UseRecoveryCommandHandler[commandName](cmd); // } // catch (Exception e) // { // _logger.Error("Error:{0}\nFailed to process journal entry:{1}", e.Message, jo); // } //}); Recover <RecoveryCompleted>(rc => { _logger.Info("Recovery Complete.", Self.Path.ToStringWithAddress()); }); //_logger.Info("Setting Up Command Handlers."); //// Commands //Command<SaveSnapshotSuccess>(c => HandleSuccessfulSnapshotSave(c)); //Command<SaveSnapshotFailure>(c => HandleUnSuccessfulSnapshotSave(c)); //// Events //Command<ClientInsertedEvent>(e => HandleClientInsertedEvent(e)); //Command<ClientDeletedEvent>(e => HandleClientDeletedEvent(e)); //Command<ClientUnDeletedEvent>(e => HandleClientUnDeletedEvent(e)); //Command<ClientUpdatedEvent>(e => HandleClientUpdatedEvent(e)); //Command<SubscribedForCommandEvents>(e => { _logger.Info("Now listening to:{0}", e.Id); }); //// Requests //Command<ClientGetStateRequest>(r => { Sender.Tell(new ClientGetStateResponse(Sender, null, r)); }); //Command<ClientIdGetListRequest>(r => HandleClientIdGetListRequest(r)); //// Configuration //Command<SetSnapshotTriggerCount>(c => { Persist<SetSnapshotTriggerCount>(c, SetSnapshotTriggerConfigurationValue); }); //Command<SetInactivityFlushSec>(c => { Persist<SetInactivityFlushSec>(c, SetInactivityFlushSecConfigurationValue); }); //// Handle any string commands //Command<string>(s => HandleStringCommand(s)); CommandAny(o => { _logger.Debug(o.ToString()); }); // This catch all will log if there are any weird unhandled messages. //Command<object>(message => //{ // _logger.Debug($"In Command State Received unhandled message from:{Sender.Path.ToStringWithAddress()} Unhandled Message:{message.GetType().Name}"); //}); //_logger.Debug("Command Handlers Set Up."); _logger.Debug($"Completed set-up of persistence actor persistence id: {PersistenceId}"); } catch (Exception ex) { _logger.Error("Something went really bad during recovery."); } }
private void Handle(string msg) { _log.Info($">>> Recevied message : \"{msg}\", Sender: {Sender}"); }
private void HandleMemberUp(ClusterEvent.MemberUp up) { Log.Info("Member is up: {0}", up.Member); }