public IEnumerable <ITradeEvent> WithAsk(Ask a) { if (AskTrades.ContainsKey(a.OrderId)) { _logger?.Warning("Already have trade with ID {0} recorded for symbol {1}. Ignoring duplicate Ask.", a.OrderId, a.StockId); return(EmptyTradeEvents); } var order = a.ToOrder(); var(hasMatch, matches) = HasMatches(order, BidsByPrice); if (!hasMatch) // no matches { /* * Save order into our matching system and rebuild the index. */ _asks[order.OrderId] = order; RebuildAskIndex(); return(EmptyTradeEvents); // no new events } var events = new List <ITradeEvent>(); var time = _timestamper.Now; // process all matches foreach (var e in matches) { var(bidFill, askFill) = FillOrders(e, order, _timestamper); events.Add(askFill); events.Add(bidFill); order = order.WithFill(askFill); // Update bid-side matching engine state UpdateOrder(e, bidFill, _bids); // generate match notification var match = new Match(order.StockId, e.OrderId, order.OrderId, askFill.Price, askFill.Quantity, time); events.Add(match); } // need to rebuild the bids that have been modified RebuildBidIndex(); if (!order.Completed) // Ask was not completely filled { // need to save it back into matching engine _asks[order.OrderId] = order; RebuildAskIndex(); } return(events); }
protected override async Task <IImmutableList <Exception> > WriteMessagesAsync( IEnumerable <AtomicWrite> atomicWrites) { try { var taggedEntries = ImmutableDictionary <string, List <EventTagEntry> > .Empty; var exceptions = ImmutableList <Exception> .Empty; var highSequenceNumbers = ImmutableDictionary <string, long> .Empty; using (var currentWrites = atomicWrites.GetEnumerator()) { while (currentWrites.MoveNext()) { Debug.Assert(currentWrites.Current != null, "atomicWrites.Current != null"); var list = currentWrites.Current.Payload.AsInstanceOf <IImmutableList <IPersistentRepresentation> >(); var batchItems = ImmutableList <ITableEntity> .Empty; foreach (var t in list) { var item = t; Debug.Assert(item != null, nameof(item) + " != null"); byte[] payloadBytes = null; string[] tags = {}; // If the payload is a tagged payload, reset to a non-tagged payload if (item.Payload is Tagged tagged) { item = item.WithPayload(tagged.Payload); payloadBytes = _serialization.PersistentToBytes(item); if (tagged.Tags.Count > 0) { tags = tagged.Tags.ToArray(); } } if (payloadBytes == null) { payloadBytes = _serialization.PersistentToBytes(item); } var newItem = new PersistentJournalEntry( item.PersistenceId, item.SequenceNr, payloadBytes, item.Manifest, tags); batchItems = batchItems.Add(newItem); foreach (var tag in tags) { if (!taggedEntries.ContainsKey(tag)) { taggedEntries = taggedEntries.SetItem(tag, new List <EventTagEntry>()); } taggedEntries[tag].Add( new EventTagEntry( newItem.PartitionKey, tag, newItem.SeqNo, newItem.Payload, newItem.Manifest, newItem.UtcTicks)); } highSequenceNumbers = highSequenceNumbers.SetItem( item.PersistenceId, item.SequenceNr); } try { var persistenceBatch = new TableBatchOperation(); highSequenceNumbers.ForEach( x => batchItems = batchItems.Add( new HighestSequenceNrEntry(x.Key, x.Value))); // Encode partition keys for writing foreach (var tableEntity in batchItems) { tableEntity.PartitionKey = PartitionKeyEscapeHelper.Escape(tableEntity.PartitionKey); } batchItems.ForEach(x => persistenceBatch.InsertOrReplace(x)); if (_log.IsDebugEnabled && _settings.VerboseLogging) { _log.Debug("Attempting to write batch of {0} messages to Azure storage", persistenceBatch.Count); } var persistenceResults = await Table.ExecuteBatchAsLimitedBatches(persistenceBatch); if (_log.IsDebugEnabled && _settings.VerboseLogging) { foreach (var r in persistenceResults) { _log.Debug("Azure table storage wrote entity [{0}] with status code [{1}]", r.Etag, r.HttpStatusCode); } } exceptions = exceptions.Add(null); } catch (Exception ex) { _log.Warning(ex, "Failure while writing messages to Azure table storage"); exceptions = exceptions.Add(ex); } } } if (exceptions.All(ex => ex == null)) { var allPersistenceIdsBatch = new TableBatchOperation(); highSequenceNumbers.ForEach(x => { var encodedKey = PartitionKeyEscapeHelper.Escape(x.Key); allPersistenceIdsBatch.InsertOrReplace(new AllPersistenceIdsEntry(encodedKey)); }); var allPersistenceResults = await Table.ExecuteBatchAsLimitedBatches(allPersistenceIdsBatch); if (_log.IsDebugEnabled && _settings.VerboseLogging) { foreach (var r in allPersistenceResults) { _log.Debug("Azure table storage wrote entity [{0}] with status code [{1}]", r.Etag, r.HttpStatusCode); } } if (HasPersistenceIdSubscribers || HasAllPersistenceIdSubscribers) { highSequenceNumbers.ForEach(x => NotifyNewPersistenceIdAdded(x.Key)); } if (taggedEntries.Count > 0) { var eventTagsBatch = new TableBatchOperation(); foreach (var kvp in taggedEntries) { eventTagsBatch.Clear(); foreach (var item in kvp.Value) { item.PartitionKey = PartitionKeyEscapeHelper.Escape(item.PartitionKey); eventTagsBatch.InsertOrReplace(item); } var eventTagsResults = await Table.ExecuteBatchAsLimitedBatches(eventTagsBatch); if (_log.IsDebugEnabled && _settings.VerboseLogging) { foreach (var r in eventTagsResults) { _log.Debug("Azure table storage wrote entity [{0}] with status code [{1}]", r.Etag, r.HttpStatusCode); } } if (HasTagSubscribers && taggedEntries.Count != 0) { foreach (var tag in taggedEntries.Keys) { NotifyTagChange(tag); } } } } } /* * Part of the Akka.Persistence design. * * Either return null or return an exception for each failed AtomicWrite. * * Either everything fails or everything succeeds is the idea I guess. */ return(exceptions.Any(ex => ex != null) ? exceptions : null); } catch (Exception ex) { _log.Error(ex, "Error during WriteMessagesAsync"); throw; } }
/// <summary> /// Run tasks of all phases including and after the given phase. /// </summary> /// <param name="fromPhase">Optional. The phase to start the run from.</param> /// <returns>A task that is completed when all such tasks have been completed, or /// there is failure when <see cref="Phase.Recover"/> is disabled.</returns> /// <remarks> /// It is safe to call this method multiple times. It will only run once. /// </remarks> public Task <Done> Run(string fromPhase = null) { if (_runStarted.CompareAndSet(false, true)) { var debugEnabled = Log.IsDebugEnabled; Func <List <string>, Task <Done> > loop = null; loop = remainingPhases => { var phase = remainingPhases.FirstOrDefault(); if (phase == null) { return(TaskEx.Completed); } var remaining = remainingPhases.Skip(1).ToList(); Task <Done> phaseResult = null; ImmutableList <Tuple <string, Func <Task <Done> > > > phaseTasks; if (!_tasks.TryGetValue(phase, out phaseTasks)) { if (debugEnabled) { Log.Debug("Performing phase [{0}] with [0] tasks.", phase); } phaseResult = TaskEx.Completed; } else { if (debugEnabled) { Log.Debug("Performing phase [{0}] with [{1}] tasks: [{2}]", phase, phaseTasks.Count, string.Join(",", phaseTasks.Select(x => x.Item1))); } // note that tasks within same phase are performed in parallel var recoverEnabled = Phases[phase].Recover; var result = Task.WhenAll <Done>(phaseTasks.Select(x => { var taskName = x.Item1; var task = x.Item2; try { // need to begin execution of task var r = task(); if (recoverEnabled) { return(r.ContinueWith(tr => { if (tr.IsCanceled || tr.IsFaulted) { Log.Warning("Task [{0}] failed in phase [{1}]: {2}", taskName, phase, tr.Exception?.Flatten().Message); } return Done.Instance; })); } return(r); } catch (Exception ex) { // in case task.Start() throws if (recoverEnabled) { Log.Warning("Task [{0}] failed in phase [{1}]: {2}", taskName, phase, ex.Message); return(TaskEx.Completed); } return(TaskEx.FromException <Done>(ex)); } })).ContinueWith(tr => { // forces downstream error propagation if recover is disabled var force = tr.Result; return(Done.Instance); }); var timeout = Phases[phase].Timeout; var deadLine = MonotonicClock.Elapsed + timeout; Task <Done> timeoutFunction = null; try { timeoutFunction = After(timeout, System.Scheduler, () => { if (phase == CoordinatedShutdown.PhaseActorSystemTerminate && MonotonicClock.ElapsedHighRes < deadLine) { // too early, i.e. triggered by system termination return(result); } else if (result.IsCompleted) { return(TaskEx.Completed); } else if (recoverEnabled) { Log.Warning("Coordinated shutdown phase [{0}] timed out after {1}", phase, timeout); return(TaskEx.Completed); } else { return (TaskEx.FromException <Done>( new TimeoutException( $"Coordinated shutdown phase[{phase}] timed out after {timeout}"))); } }); } catch (SchedulerException) { // The call to `after` threw SchedulerException, triggered by system termination timeoutFunction = result; } catch (InvalidOperationException) { // The call to `after` threw SchedulerException, triggered by Scheduler being in unset state timeoutFunction = result; } phaseResult = Task.WhenAny <Done>(result, timeoutFunction).Unwrap(); } if (!remaining.Any()) { return(phaseResult); } return(phaseResult.ContinueWith(tr => { // force any exceptions to be rethrown so next phase stops // and so failure gets propagated back to caller var r = tr.Result; return loop(remaining); }).Unwrap()); }; var runningPhases = (fromPhase == null ? OrderedPhases // all : OrderedPhases.From(fromPhase)).ToList(); var done = loop(runningPhases); done.ContinueWith(tr => { if (!tr.IsFaulted && !tr.IsCanceled) { _runPromise.SetResult(tr.Result); } else { // ReSharper disable once PossibleNullReferenceException _runPromise.SetException(tr.Exception.Flatten()); } }); } return(_runPromise.Task); }
private void Ready() { _log.Debug("AggregateRootCoordinatorActor entering Ready state"); Receive <IAccountMessage>(msg => { //forward on if (!_accountWorkerRefs.ContainsKey(msg.AccountId)) { var accountProjection = Context.ActorOf(Props.Create <AccountProjection>(msg.AccountId)); var parms = new AggregateRootCreationParameters( msg.AccountId, new List <IActorRef>() { _accountIndexProjection, accountProjection }, snapshotThreshold: 5, receiveTimeout: TimeSpan.FromMinutes(2) ); //register our projection _registry.Tell(new RegisterProjection(msg.AccountId, accountProjection)); _accountWorkerRefs.Add(msg.AccountId, Context.ActorOf(Props.Create <AccountActor>(parms), "aggregates(account)" + msg.AccountId.ToString())); _log.Debug("Account:{0}, Added to Agg Root Coordinator Cache", msg.AccountId); } _accountWorkerRefs[msg.AccountId].Forward(msg); }); Receive <PassivateMessage>(msg => { _log.Debug("Account:{0}, timed out, due to inactivity", msg.Id); var actorToUnload = Context.Sender; actorToUnload.GracefulStop(TimeSpan.FromSeconds(10)).ContinueWith((success) => { if (success.Result) { _accountWorkerRefs.Remove(msg.Id); _log.Debug("Account:{0}, removed", msg.Id); } else { _log.Warning("Account:{0}, failed to removed", msg.Id); } }); // the time between the above and below lines, we need to intercept messages to the child that is being // removed from memory - how to do this? //task.Wait(); // dont block thread, use pipeto instead? }); Receive <GetProjection>(msg => { _log.Debug("GetProjection requested ", msg.Key); _registry.Forward(msg); }); Receive <GetIndexProjection>(msg => { _log.Debug("GetIndexProjection requested ", msg.Key); _registry.Forward(msg); }); }
public void InitFSM() { StartWith(State.Connecting, new Data(null, null)); When(State.Connecting, @event => { if (@event.FsmEvent is IClientOp) { return(Stay().Replying(new Status.Failure(new IllegalStateException("not connected yet")))); } var connected = @event.FsmEvent as Connected; if (connected != null) { connected.Channel.WriteAndFlushAsync(new Hello(_name.Name, TestConductor.Get(Context.System).Address)); return(GoTo(State.AwaitDone).Using(new Data(connected.Channel, null))); } if (@event.FsmEvent is ConnectionFailure) { return(GoTo(State.Failed)); } if (@event.FsmEvent is StateTimeout) { _log.Error($"Failed to connect to test conductor within {_settings.ConnectTimeout.TotalMilliseconds} ms."); return(GoTo(State.Failed)); } return(null); }, _settings.ConnectTimeout); When(State.AwaitDone, @event => { if (@event.FsmEvent is Done) { _log.Debug("received Done: starting test"); return(GoTo(State.Connected)); } if (@event.FsmEvent is INetworkOp) { _log.Error("Received {0} instead of Done", @event.FsmEvent); return(GoTo(State.Failed)); } if (@event.FsmEvent is IServerOp) { return(Stay().Replying(new Failure(new IllegalStateException("not connected yet")))); } if (@event.FsmEvent is StateTimeout) { _log.Error("connect timeout to TestConductor"); return(GoTo(State.Failed)); } return(null); }, _settings.BarrierTimeout); When(State.Connected, @event => { if (@event.FsmEvent is Disconnected) { _log.Info("disconnected from TestConductor"); throw new ConnectionFailure("disconnect"); } if (@event.FsmEvent is ToServer <Done> && @event.StateData.Channel != null) { @event.StateData.Channel.WriteAndFlushAsync(Done.Instance); return(Stay()); } var toServer = @event.FsmEvent as IToServer; if (toServer != null && @event.StateData.Channel != null && @event.StateData.RunningOp == null) { @event.StateData.Channel.WriteAndFlushAsync(toServer.Msg); string token = null; var enterBarrier = @event.FsmEvent as ToServer <EnterBarrier>; if (enterBarrier != null) { token = enterBarrier.Msg.Name; } else { var getAddress = @event.FsmEvent as ToServer <GetAddress>; if (getAddress != null) { token = getAddress.Msg.Node.Name; } } return(Stay().Using(@event.StateData.Copy(runningOp: Tuple.Create(token, Sender)))); } if (toServer != null && @event.StateData.Channel != null && @event.StateData.RunningOp != null) { _log.Error("cannot write {0} while waiting for {1}", toServer.Msg, @event.StateData.RunningOp); return(Stay()); } if (@event.FsmEvent is IClientOp && @event.StateData.Channel != null) { var barrierResult = @event.FsmEvent as BarrierResult; if (barrierResult != null) { if (@event.StateData.RunningOp == null) { _log.Warning("did not expect {0}", @event.FsmEvent); } else { object response; if (barrierResult.Name != @event.StateData.RunningOp.Item1) { response = new Failure( new Exception("wrong barrier " + barrierResult + " received while waiting for " + @event.StateData.RunningOp.Item1)); } else if (!barrierResult.Success) { response = new Failure( new Exception("barrier failed:" + @event.StateData.RunningOp.Item1)); } else { response = barrierResult.Name; } @event.StateData.RunningOp.Item2.Tell(response); } return(Stay().Using(@event.StateData.Copy(runningOp: null))); } var addressReply = @event.FsmEvent as AddressReply; if (addressReply != null) { if (@event.StateData.RunningOp == null) { _log.Warning("did not expect {0}", @event.FsmEvent); } else { @event.StateData.RunningOp.Item2.Tell(addressReply.Addr); } return(Stay().Using(@event.StateData.Copy(runningOp: null))); } var throttleMsg = @event.FsmEvent as ThrottleMsg; if (@event.FsmEvent is ThrottleMsg) { ThrottleMode mode; if (throttleMsg.RateMBit < 0.0f) { mode = Unthrottled.Instance; } else if (throttleMsg.RateMBit == 0.0f) { mode = Blackhole.Instance; } else { mode = new Transport.TokenBucket(1000, throttleMsg.RateMBit * 125000, 0, 0); } var cmdTask = TestConductor.Get(Context.System) .Transport.ManagementCommand(new SetThrottle(throttleMsg.Target, throttleMsg.Direction, mode)); var self = Self; cmdTask.ContinueWith(t => { if (t.IsFaulted) { throw new ConfigurationException("Throttle was requested from the TestConductor, but no transport " + "adapters available that support throttling. Specify 'testTransport(on=true)' in your MultiNodeConfig"); } self.Tell(new ToServer <Done>(Done.Instance)); }); return(Stay()); } if (@event.FsmEvent is DisconnectMsg) { return(Stay()); //FIXME is this the right EC for the future below? } var terminateMsg = @event.FsmEvent as TerminateMsg; if (terminateMsg != null) { _log.Info("Received TerminateMsg - shutting down..."); if (terminateMsg.ShutdownOrExit.IsLeft && terminateMsg.ShutdownOrExit.ToLeft().Value == false) { Context.System.Terminate(); return(Stay()); } if (terminateMsg.ShutdownOrExit.IsLeft && terminateMsg.ShutdownOrExit.ToLeft().Value == true) { Context.System.AsInstanceOf <ActorSystemImpl>().Abort(); return(Stay()); } if (terminateMsg.ShutdownOrExit.IsRight) { Environment.Exit(terminateMsg.ShutdownOrExit.ToRight().Value); return(Stay()); } } if (@event.FsmEvent is Done) { return(Stay()); //FIXME what should happen? } } return(null); }); When(State.Failed, @event => { if (@event.FsmEvent is IClientOp) { return(Stay().Replying(new Status.Failure(new Exception("cannot do " + @event.FsmEvent + " while failed")))); } if (@event.FsmEvent is INetworkOp) { _log.Warning("ignoring network message {0} while Failed", @event.FsmEvent); return(Stay()); } return(null); }); OnTermination(e => { _log.Info("Terminating connection to multi-node test controller due to [{0}]", e.Reason); if (e.StateData.Channel != null) { var disconnectTimeout = TimeSpan.FromSeconds(2); //todo: make into setting loaded from HOCON if (!e.StateData.Channel.CloseAsync().Wait(disconnectTimeout)) { _log.Warning("Failed to disconnect from conductor within {0}", disconnectTimeout); } } }); Initialize(); }
protected void InitFSM() { StartWith(State.Initial, null); WhenUnhandled(@event => { var clientDisconnected = @event.FsmEvent as Controller.ClientDisconnected; if (clientDisconnected != null) { if (@event.StateData != null) { @event.StateData.Tell(new Failure(new Controller.ClientDisconnectedException("client disconnected in state " + StateName + ": " + _channel))); } return(Stop()); } return(null); }); OnTermination(@event => { _controller.Tell(new Controller.ClientDisconnected(_roleName)); _channel.CloseAsync(); }); When(State.Initial, @event => { var hello = @event.FsmEvent as Hello; if (hello != null) { _roleName = new RoleName(hello.Name); _controller.Tell(new Controller.NodeInfo(_roleName, hello.Address, Self)); return(GoTo(State.Ready)); } if (@event.FsmEvent is INetworkOp) { _log.Warning("client {0}, sent not Hello in first message (instead {1}), disconnecting", _channel.RemoteAddress, @event.FsmEvent); _channel.CloseAsync(); return(Stop()); } if (@event.FsmEvent is IToClient) { _log.Warning("cannot send {0} in state Initial", @event.FsmEvent); return(Stay()); } if (@event.FsmEvent is StateTimeout) { _log.Info("closing channel to {0} because of Hello timeout", _channel.RemoteAddress); _channel.CloseAsync(); return(Stop()); } return(null); }, TimeSpan.FromSeconds(10)); When(State.Ready, @event => { if (@event.FsmEvent is Done && @event.StateData != null) { @event.StateData.Tell(@event.FsmEvent); return(Stay().Using(null)); } if (@event.FsmEvent is IServerOp) { _controller.Tell(@event.FsmEvent); return(Stay()); } if (@event.FsmEvent is INetworkOp) { _log.Warning("client {0} sent unsupported message {1}", _channel.RemoteAddress, @event.FsmEvent); return(Stop()); } var toClient = @event.FsmEvent as IToClient; if (toClient != null) { if (toClient.Msg is IUnconfirmedClientOp) { _channel.WriteAndFlushAsync(toClient.Msg); return(Stay()); } if (@event.StateData == null) { _channel.WriteAndFlushAsync(toClient.Msg); return(Stay().Using(Sender)); } _log.Warning("cannot send {0} while waiting for previous ACK", toClient.Msg); return(Stay()); } return(null); }); Initialize(); }
/// <summary> /// /// </summary> /// <param name="message"></param> /// <returns></returns> public virtual async Task <bool> Interpret(InterpretBlocksMessage message) { if (message == null) { throw new ArgumentNullException(nameof(message)); } if (unitOfWork == null) { throw new NullReferenceException(nameof(unitOfWork)); } if (signingActorProvider == null) { throw new NullReferenceException(nameof(signingActorProvider)); } if (logger == null) { throw new NullReferenceException(nameof(logger)); } foreach (var block in message.BlockIDs) { var coinExists = await unitOfWork.BlockID.HasCoin(block.SignedBlock.Coin.Stamp, block.SignedBlock.Coin.Version); if (coinExists) { logger.Warning($"<<< InterpretBlocksProvider.InterpretBlocks >>>: Coin exists for block {block.Round} from node {block.Node}"); continue; } var blockIdProto = new BlockIDProto { Hash = block.Hash, Node = block.Node, Round = block.Round, SignedBlock = block.SignedBlock }; if (!await signingActorProvider.VerifiyBlockSignature(new VerifiyBlockSignatureMessage(blockIdProto))) { logger.Error($"<<< InterpretBlocksProvider.InterpretBlocks >>>: unable to verify signature for block {block.Round} from node {block.Node}"); continue; } if (!await signingActorProvider.ValidateCoinRule(new ValidateCoinRuleMessage(blockIdProto.SignedBlock.Coin))) { logger.Error($"<<< InterpretBlocksProvider.InterpretBlocks >>>: unable to validate coin rule for block {block.Round} from node {block.Node}"); continue; } var coins = await unitOfWork.BlockID .GetWhere(x => x.SignedBlock.Coin.Stamp.Equals(blockIdProto.SignedBlock.Coin.Stamp) && x.Node.Equals(message.Node)); if (coins?.Any() == true) { var list = coins.ToList(); for (int i = 0; i < list.Count; i++) { CoinProto previous; CoinProto next; try { previous = list[(i - 1) % (list.Count - 1)].SignedBlock.Coin; } catch (DivideByZeroException) { previous = list[i].SignedBlock.Coin; } if (!await signingActorProvider.ValidateCoinRule(new ValidateCoinRuleMessage(previous))) { logger.Error($"<<< InterpretBlocksProvider.InterpretBlocks >>>: unable to validate coin rule for block {block.Round} from node {block.Node}"); return(false); } try { next = list[(i + 1) % (list.Count - 1)].SignedBlock.Coin; if (!await signingActorProvider.ValidateCoinRule(new ValidateCoinRuleMessage(next))) { logger.Error($"<<< InterpretBlocksProvider.InterpretBlocks >>>: unable to validate coin rule for block {block.Round} from node {block.Node}"); return(false); } } catch (DivideByZeroException) { next = blockIdProto.SignedBlock.Coin; } if (!await signingActorProvider.VerifiyHashChain(new VerifiyHashChainMessage(previous, next))) { logger.Error($"<<< InterpretBlocksProvider.InterpretBlocks >>>: Could not verify hash chain for Interpreted BlockID"); return(false); } } using var pedersen = new Pedersen(); var sum = coins.Select(c => c.SignedBlock.Coin.Commitment.FromHex()); var success = pedersen.VerifyCommitSum(new List <byte[]> { sum.First() }, sum.Skip(1)); if (!success) { logger.Error($"<<< InterpretBlocksProvider.InterpretBlocks >>>: Could not verify committed sum for Interpreted BlockID"); return(false); } } var blockId = await unitOfWork.BlockID.StoreOrUpdate(blockIdProto); if (blockId == null) { logger.Error($"<<< InterpretBlocksProvider.InterpretBlocks >>>: Could not save block for {blockIdProto.Node} and round {blockIdProto.Round}"); return(false); } } return(true); }
public NeuronActor() { #region [ Setup Initial Actor State ] // Assign a unique Guid this.Id = Guid.NewGuid(); // Create and dispose a NeuronActorActivator so that we don't have to keep repeating ourselves. using (var activator = new NeuronActorActivator()) { activator.InitializeNeuronActor(this); } #endregion #region [ Property Receivers ] // Receive and process IEnumerable<IActorRef> for either InputActors or OutputActors. // If we can't process the message, log a warning and send a Enums.NeuronSignals.SignalFault. Receive <Tuple <Enums.NeuronSignals, IEnumerable <IActorRef> > >(m => { switch (m.Item1) { case Enums.NeuronSignals.InputActorsReceived: this.InputActors.ToList().AddRange(m.Item2); _log.Info($"[{DateTime.Now}] Received: {Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.InputActorsReceived), Self); break; case Enums.NeuronSignals.OutputActorsReceived: this.OutputActors.ToList().AddRange(m.Item2); _log.Info($"[{DateTime.Now}] Received: {Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.OutputActorsReceived), Self); break; default: _log.Warning($"[{DateTime.Now}] Invalid NeuronSignal Received: ${Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.SignalFault), Self); break; } }); //Receive<Tuple<Enums.NeuronSignals, float>>(m => //{ //}); Receive <Tuple <Enums.NeuronSignals, float?> >(m => { switch (m.Item1) { case Enums.NeuronSignals.BiasReceived: this.Bias = m.Item2; _log.Info($"[{DateTime.Now}] Received: {Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.BiasReceived), Self); break; case Enums.NeuronSignals.ThresholdReceived: this.Threshold = m.Item2; _log.Info($"[{DateTime.Now}] Received: {Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.ThresholdReceived), Self); break; case Enums.NeuronSignals.AccumulatorReceived: this.Accumulator = (float)m.Item2; _log.Info($"[{DateTime.Now}] Received: {Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.AccumulatorReceived), Self); break; case Enums.NeuronSignals.InputReceived: if (this.InputActors.ToList().Contains(Sender)) { this.Input.ToList().Add((float)m.Item2); _log.Info($"[{DateTime.Now}] Received: {Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.InputReceived), Self); } break; default: _log.Warning($"[{DateTime.Now}] Invalid NeuronSignal Received: ${Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.SignalFault), Self); break; } }); //Receive<Tuple<Enums.NeuronSignals, Tuple<float?, float?, float?>>>(m => Receive <Tuple <Enums.NeuronSignals, IEnumerable <float> > >(m => { switch (m.Item1) { case Enums.NeuronSignals.WeightsReceived: this.Weights = m.Item2; _log.Info($"[{DateTime.Now}] Received: {Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.WeightsReceived), Self); break; default: _log.Warning($"[{DateTime.Now}] Invalid NeuronSignal Received: ${Enum.GetName(typeof(Enums.NeuronSignals), m.Item1)} with [{JsonConvert.SerializeObject(m.Item2)}] from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.SignalFault), Self); break; } }); Receive <Enums.NeuronSignals>(m => { switch (m) { case Enums.NeuronSignals.CalculateDotProduct: if (this.Input != null && this.Weights != null) { try { this.DotProduct = (this.Input.DotProduct(this.Weights, this.Accumulator)) + this.Bias; Sender.Tell( new Tuple <Enums.NeuronSignals, float?>(Enums.NeuronSignals.CalculateDotProduct, this.DotProduct), Self); } catch (Exception e) { _log.Error(e, $"[{DateTime.Now}] Sending of {m} threw an exception when instantiated by: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.SignalFault), Self); } } else { _log.Warning($"[{DateTime.Now}] One of the inputs of the DotProduct function is null when invoked by {Sender}. this.Input: {this.Input} this.Weights: {this.Weights}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.SignalFault), Self); } break; case Enums.NeuronSignals.InvokeActivationFunction: _log.Info($"[{DateTime.Now}] Received: {Enum.GetName(typeof(Enums.NeuronSignals), m)} from: {Sender}"); this.Output = this.ActivationFunction(this.DotProduct, this.Threshold); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.InvokeActivationFunction), Self); break; case Enums.NeuronSignals.ForwardOutput: _log.Info($"[{DateTime.Now}] Received: {Enum.GetName(typeof(Enums.NeuronSignals), m)} from: {Sender}"); ForwardOutputToOutputNeurons(); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.ForwardOutput), Self); break; default: _log.Warning($"[{DateTime.Now}] Unprocessable NeuronSignal Received: ${Enum.GetName(typeof(Enums.NeuronSignals), m)} from: {Sender}"); Sender.Tell(new Tuple <Enums.NeuronSignals, Enums.NeuronSignals>(Enums.NeuronSignals.Ack, Enums.NeuronSignals.SignalFault), Self); break; } }); #endregion }
public KoteActor() { _hungerControl = Context.System.Scheduler.ScheduleTellRepeatedlyCancelable( TimeSpan.FromSeconds(5), TimeSpan.FromSeconds(5), Self, new GrowHungry(10), Self); StartWith(KoteState.Idle, new VitalSigns(5)); When(KoteState.Idle, state => { switch (state.FsmEvent) { case FallAsleep _: return(GoTo(KoteState.Sleeping)); case Initialize init: return(Born(init.Name)); } return(null); }); When(KoteState.Sleeping, state => { switch (state.FsmEvent) { case WakeUp wakeUp: return(GoTo(KoteState.Idle)); } return(null); }); When(KoteState.Hunger, state => null); When(KoteState.Walking, state => { SetTimer("walkingTimer", $"Kote {_name} walking", TimeSpan.FromSeconds(1), true); return(null); }); WhenUnhandled(DefaultHandle); OnTransition((state, nextState) => { switch (state) { case KoteState.Sleeping: _log.Warning($"Kote {_name} awake"); break; case KoteState.Walking: CancelTimer("walkingTimer"); break; } if (nextState == KoteState.Sleeping) { _log.Warning($"Kote {_name} is now sleeping"); } }); Initialize(); }
private void Commands() { Command <EventEnvelope>(e => { if (e.Event is Match m) { // update the offset if (e.Offset is Sequence s) { QueryOffset = s.Value; } if (_matchAggregate == null) { _matchAggregate = new MatchAggregate(TickerSymbol, m.SettlementPrice, m.Quantity); return; } if (!_matchAggregate.WithMatch(m)) { _log.Warning("Received Match for ticker symbol [{0}] - but we only accept symbols for [{1}]", m.StockId, TickerSymbol); } } }); // Command sent by a PriceViewActor to pull down a complete snapshot of active pricing data Command <FetchPriceAndVolume>(f => { // no price data yet if (_priceUpdates.Count == 0 || _volumeUpdates.Count == 0) { Sender.Tell(PriceAndVolumeSnapshot.Empty(TickerSymbol)); } else { Sender.Tell(new PriceAndVolumeSnapshot(TickerSymbol, _priceUpdates.ToArray(), _volumeUpdates.ToArray())); } }); Command <PublishEvents>(p => { if (_matchAggregate == null) { return; } var(latestPrice, latestVolume) = _matchAggregate.FetchMetrics(_timestamper); // Need to update pricing records prior to persisting our state, since this data is included in // output of SaveAggregateData() _priceUpdates.Add(latestPrice); _volumeUpdates.Add(latestVolume); PersistAsync(SaveAggregateData(), snapshot => { _log.Info("Saved latest price {0} and volume {1}", snapshot.AvgPrice, snapshot.AvgVolume); if (LastSequenceNr % SnapshotEveryN == 0) { SaveSnapshot(snapshot); } }); // publish updates to in-memory replicas _mediator.Tell(new Publish(_priceTopic, latestPrice)); _mediator.Tell(new Publish(_volumeTopic, latestVolume)); }); Command <Ping>(p => { if (_log.IsDebugEnabled) { _log.Debug("pinged via {0}", Sender); } }); Command <SaveSnapshotSuccess>(s => { // clean-up prior snapshots and journal events DeleteSnapshots(new SnapshotSelectionCriteria(s.Metadata.SequenceNr - 1)); DeleteMessages(s.Metadata.SequenceNr); }); }
private void Playing() { Receive <PlayMovieMessage>(message => _logger.Warning($"UserActor {_id} cannot start playing another movie before stopping existing one")); Receive <StopMovieMessage>(message => StopPlayingCurrentMovie()); _logger.Info($"UserActor {_id} behavior has now become Playing"); }
/// <summary> /// Obsolete. Use <see cref="M:Akka.Event.ILoggingAdapter.Warning(System.String,System.Object[])" /> instead! /// </summary> /// <param name="format">N/A</param> /// <param name="args">N/A</param> public virtual void Warn(string format, params object[] args) { adapter.Warning(format, BuildArgs(args)); }
protected override void PreStart() { _logger.Warning("ChildActor PreStart.... "); }
private void Commands() { AwaitingSubscription(); Command <Match>(m => TickerSymbol.Equals(m.StockId), m => { _log.Info("Received MATCH for {0} - price: {1} quantity: {2}", TickerSymbol, m.SettlementPrice, m.Quantity); if (_matchAggregate == null) { _matchAggregate = new MatchAggregate(TickerSymbol, m.SettlementPrice, m.Quantity); return; } if (!_matchAggregate.WithMatch(m)) { // should never get to here _log.Warning("Received Match for ticker symbol [{0}] - but we only accept symbols for [{1}]", m.StockId, TickerSymbol); } }); // Matches for a different ticker symbol Command <Match>(m => { _log.Warning("Received Match for ticker symbol [{0}] - but we only accept symbols for [{1}]", m.StockId, TickerSymbol); }); // Command sent to pull down a complete snapshot of active pricing data for this ticker symbol Command <FetchPriceAndVolume>(f => { // no price data yet if (_priceUpdates.Count == 0 || _volumeUpdates.Count == 0) { Sender.Tell(PriceAndVolumeSnapshot.Empty(TickerSymbol)); } else { Sender.Tell(new PriceAndVolumeSnapshot(TickerSymbol, _priceUpdates.ToArray(), _volumeUpdates.ToArray())); } }); Command <PublishEvents>(p => { if (_matchAggregate == null) { return; } var(latestPrice, latestVolume) = _matchAggregate.FetchMetrics(_timestamper); // Need to update pricing records prior to persisting our state, since this data is included in // output of SaveAggregateData() _priceUpdates.Add(latestPrice); _volumeUpdates.Add(latestVolume); PersistAsync(SaveAggregateData(), snapshot => { _log.Info("Saved latest price {0} and volume {1}", snapshot.RecentAvgPrice, snapshot.RecentAvgVolume); if (LastSequenceNr % SnapshotEveryN == 0) { SaveSnapshot(snapshot); } }); // publish updates to price and volume subscribers _marketEventPublisher.Publish(TickerSymbol, latestPrice); _marketEventPublisher.Publish(TickerSymbol, latestVolume); }); Command <Ping>(p => { if (_log.IsDebugEnabled) { _log.Debug("pinged via {0}", Sender); } }); Command <SaveSnapshotSuccess>(s => { // clean-up prior snapshots and journal events DeleteSnapshots(new SnapshotSelectionCriteria(s.Metadata.SequenceNr - 1)); DeleteMessages(s.Metadata.SequenceNr); }); /* * Handle subscriptions directly in case we're using in-memory, local pub-sub. */ CommandAsync <MarketSubscribe>(async sub => { try { var ack = await _marketEventSubscriptionManager.Subscribe(TickerSymbol, sub.Events, sub.Subscriber); Context.Watch(sub.Subscriber); sub.Subscriber.Tell(ack); Sender.Tell(ack); // need this for ASK operations. } catch (Exception ex) { _log.Error(ex, "Error while processing subscription {0}", sub); sub.Subscriber.Tell(new MarketSubscribeNack(TickerSymbol, sub.Events, ex.Message)); } }); CommandAsync <MarketUnsubscribe>(async unsub => { try { var ack = await _marketEventSubscriptionManager.Unsubscribe(PersistenceId, unsub.Events, unsub.Subscriber); // leave DeathWatch intact, in case actor is still subscribed to additional topics unsub.Subscriber.Tell(ack); Sender.Tell(ack); // need this for ASK operations. } catch (Exception ex) { _log.Error(ex, "Error while processing unsubscribe {0}", unsub); unsub.Subscriber.Tell(new MarketUnsubscribeNack(TickerSymbol, unsub.Events, ex.Message)); } }); CommandAsync <Terminated>(async t => { try { var ack = await _marketEventSubscriptionManager.Unsubscribe(TickerSymbol, t.ActorRef); } catch (Exception ex) { _log.Error(ex, "Error while processing unsubscribe for terminated subscriber {0} for symbol {1}", t.ActorRef, TickerSymbol); } }); }
private void Handle(PongTimeout msg) { _log.Warning($">>> Attempt to send message [{msg.OriginalMsg}] timed out after {msg.Timeout} - no nodes responded in time"); }
private void Ready() { Receive <UpdateCharacterMessage>(message => ProcessNotifyCandidateMessage(message)); ReceiveAny(message => _logger.Warning("Unhandeled Message while ready {@message}", message)); }
public override void Start() { log.Info("Starting remoting"); if (_endpointManager == null) { _endpointManager = System.SystemActorOf(RARP.For(System).ConfigureDispatcher( Props.Create(() => new EndpointManager(System.Settings.Config, log)).WithDeploy(Deploy.Local)), EndpointManagerName); try { var addressPromise = new TaskCompletionSource <IList <ProtocolTransportAddressPair> >(); // tells the EndpointManager to start all transports and bind them to listenable addresses, and then set the results // of this promise to include them. _endpointManager.Tell(new EndpointManager.Listen(addressPromise)); addressPromise.Task.Wait(Provider.RemoteSettings.StartupTimeout); var akkaProtocolTransports = addressPromise.Task.Result; if (akkaProtocolTransports.Count == 0) { throw new Exception("No transports enabled"); } _addresses = new HashSet <Address>(akkaProtocolTransports.Select(a => a.Address)); // this.transportMapping = akkaProtocolTransports // .ToDictionary(p => p.ProtocolTransport.Transport.SchemeIdentifier,); IEnumerable <IGrouping <string, ProtocolTransportAddressPair> > tmp = akkaProtocolTransports.GroupBy(t => t.ProtocolTransport.SchemeIdentifier); _transportMapping = new Dictionary <string, HashSet <ProtocolTransportAddressPair> >(); foreach (var g in tmp) { var set = new HashSet <ProtocolTransportAddressPair>(g); _transportMapping.Add(g.Key, set); } _defaultAddress = akkaProtocolTransports.Head().Address; _addresses = new HashSet <Address>(akkaProtocolTransports.Select(x => x.Address)); log.Info("Remoting started; listening on addresses : [{0}]", string.Join(",", _addresses.Select(x => x.ToString()))); _endpointManager.Tell(new EndpointManager.StartupFinished()); _eventPublisher.NotifyListeners(new RemotingListenEvent(_addresses.ToList())); } catch (TaskCanceledException ex) { NotifyError("Startup was cancelled due to timeout", ex); throw; } catch (TimeoutException ex) { NotifyError("Startup timed out", ex); throw; } catch (Exception ex) { NotifyError("Startup failed", ex); throw; } } else { log.Warning("Remoting was already started. Ignoring start attempt."); } }
private void Ready() { Receive <GetClusterState>(ic => { List <ClusterRoleLeader> clusterRoleLeaders = new List <ClusterRoleLeader>(); foreach (var member in _clusterState.Members) { var role = member.Roles.First(); var address = _clusterState.RoleLeader(role); clusterRoleLeaders.Add(new ClusterRoleLeader(role, address)); } var currentClusterStatus = new CurrentClusterStatus(clusterRoleLeaders, Cluster.SelfAddress, _clusterState); Sender.Tell(currentClusterStatus); }); Receive <ClusterEvent.CurrentClusterState>(state => { _clusterState = state; // Check Cluster Leader if (state.Leader == null) { _logger.Warning("ClusterLeader is null"); } else { _logger.Debug("ClusterLeader : {0}", state.Leader.ToString()); } // Check Role Leaders var roles = _clusterState.Members.Where(y => y.Status == MemberStatus.Up).Select(x => x.Roles.First()).Distinct().ToList(); foreach (var role in roles) { var address = state.RoleLeader(role); if (address == null) { _logger.Warning("RoleLeader: {0}, No leader found!", role); } else { _logger.Debug("RoleLeader: {0}, Address:{1}", role, address); } } // Check Unreachable Members? foreach (var member in state.Unreachable) { _logger.Warning("Unreachable Member; Role: {0}, Status: {1}, Address: {2}, ", member.Roles.Join(";"), member.Status, member.Address.ToString()); } // Check who I am seen by? foreach (var seenBy in state.SeenBy) { if (_clusterState.Members.Any(x => x.Address == seenBy)) { _logger.Debug("SeenBy Members; Role: {0}, Status: {1}, Address: {2}, ", _clusterState.Members.First(x => x.Address == seenBy).Roles.Join(";"), _clusterState.Members.First(x => x.Address == seenBy).Status, _clusterState.Members.First(x => x.Address == seenBy).Address.ToString()); } else { _logger.Debug("SeenBy Members; Role: null, Status: null, Address: {0}, ", _clusterState.Members.First(x => x.Address == seenBy).Address.ToString()); } } }); Receive <SendCurrentClusterState>(ic => { Cluster.SendCurrentClusterState(Self); }); Receive <ClusterEvent.MemberUp>(mem => { _logger.Info("MemberUp: {0}, Role(s): {1}", mem.Member, mem.Member.Roles.Join(",")); }); Receive <ClusterEvent.UnreachableMember>(mem => { _logger.Info("UnreachableMember: {0}, Role(s): {1}", mem.Member, mem.Member.Roles.Join(",")); }); Receive <ClusterEvent.ReachableMember>(mem => { _logger.Info("ReachableMember: {0}, Role(s): {1}", mem.Member, mem.Member.Roles.Join(",")); }); Receive <ClusterEvent.MemberRemoved>(mem => { _logger.Info("MemberRemoved: {0}, Role(s): {1}", mem.Member, mem.Member.Roles.Join(",")); // Check to see if we have been removed? if (Cluster.SelfAddress.Equals(mem.Member.Address)) { ClusterNeedsToRestart(string.Format("This member has been removed from the cluster. This system needs to be restarted. Address:{0} ", Cluster.SelfAddress)); } }); Receive <ClusterEvent.IMemberEvent>(mem => { _logger.Info("IMemberEvent: {0}, Role(s): {1}", mem.Member, mem.Member.Roles.Join(",")); }); Receive <ClusterEvent.ClusterShuttingDown>(cluster => { _logger.Warning("ClusterShuttingDown"); }); ReceiveAny(task => { _logger.Error("{{EventId:999}} [x] Oh Snap! ClusterStatus.Ready.ReceiveAny: \r\n{0}", task); }); }
protected override void PostRestart(Exception reason) { log.Warning("Restarted because of: {0}", reason.Message); base.PostRestart(reason); }
private void Ready() { // kick off the job Receive <IStartJob>(start => { _logger.Info("JobWorker.Ready.IStartJob"); // Need to reset tracking buckets. WorkerTracker.Tell(new WorkerTracker.ResetTrackerBuckets()); RunningStatus = new JobStatusUpdate(Job) { Status = JobStatus.Starting }; TotalStats = new JobStats(Job); RunningStatus.Stats = TotalStats; if (!Subscribers.Contains(start.Requestor)) { Subscribers.Add(start.Requestor); } PublishJobStatus(); Self.Tell(new JobCanStart(start.Job)); }); Receive <JobCanStart>(start => { RunningStatus.Status = JobStatus.Running; CoordinatorRouter.Tell(new WorkerCoordinator.GetJobData(start.Job.JobInfo)); Become(Started); Stash.UnstashAll(); }); Receive <JobCanStart>(start => { _logger.Warning("Can't start job yet. No routees."); }); Receive <CheckJobStatus>(start => { Sender.Tell(new ReceivedJobStatus(Job, RunningStatus), Self); }); Receive <ReceiveTimeout>(ic => { _logger.Error("JobWorker.Ready.ReceiveTimeout: \r\n{0}", ic); }); Receive <ISubscribeToJob>(subscribe => { Stash.Stash(); }); ReceiveAny(o => { _logger.Error("JobWorker.Ready.ReceiveAny and stashing: \r\n{0}", o); Stash.Stash(); }); }
protected override void Unhandled(object message) { Log.Warning("Got unhandled message: {0}", message); base.Unhandled(message); }
public TransferOperatorActor() { StartWith(OperatorState.Waiting, new OperatorData()); When(OperatorState.Waiting, state => { switch (state.FsmEvent) { case TransmitRequest transmit: _log.Info("Incoming Recieve Request {id} -- {Data}", GetId(state), transmit.Data); Parent.Tell(new IncomingDataTransfer(transmit.OperationId, new DataTransferManager(Parent), transmit.Data)); return(GoTo(OperatorState.InitReciving).Using(state.StateData.StartRecdiving(transmit))); case DataTransferRequest request: _log.Info("Incoming Trensfer Request {id} -- {Data}", GetId(state), request.Data); request.Target.Actor.Tell(new TransmitRequest(request.OperationId, Parent, request.Data), Parent); return(GoTo(OperatorState.InitSending).Using(state.StateData.StartSending(request, Sender))); default: return(null); } }); When(OperatorState.InitSending, state => { switch (state.FsmEvent) { case BeginTransfering _: _log.Info("Start Tranfer {Id}", GetId(state)); try { var newState = GoTo(OperatorState.Sending).Using(state.StateData.Open()); Self.Tell(new StartTrensfering(state.StateData.OperationId)); return(newState); } catch (Exception e) { _log.Error(e, "Open Sending Stream Failed {Id}", GetId(state)); return(GoTo(OperatorState.Failed).Using(state.StateData.Failed(Parent, FailReason.StreamError, e.Message))); } case RequestDeny _: _log.Info("Tranfer Request Deny {Id}", state.StateData.OperationId); return(GoTo(OperatorState.Failed).Using(state.StateData.Failed(Parent, FailReason.Deny, null))); default: return(null); } }, TimeSpan.FromMinutes(5)); When(OperatorState.InitReciving, state => { switch (state.FsmEvent) { case RequestDeny deny: _log.Info("Tranfer Request Deny {Id}", state.StateData.OperationId); state.StateData.TargetManager.Tell(deny, Parent); return(GoTo(OperatorState.Failed)); case RequestAccept accept: _log.Info("Request Accepted {Id}", GetId(state)); try { var newState = GoTo(OperatorState.Reciving).Using(state.StateData.SetData(accept.Target, accept.TaskCompletionSource).Open()); state.StateData.TargetManager.Tell(new BeginTransfering(state.StateData.OperationId)); return(newState); } catch (Exception e) { _log.Error(e, "Open Reciving Stream Failed {Id}", GetId(state)); return(GoTo(OperatorState.Failed).Using(state.StateData.Failed(Parent, FailReason.StreamError, e.Message))); } default: return(null); } }, TimeSpan.FromMinutes(2)); When(OperatorState.Sending, state => { switch (state.FsmEvent) { case SendNextChunk _: case StartTrensfering _: _outgoningBytes ??= ArrayPool <byte> .Shared.Rent(1024 * 1024); try { _sendingAttempts = 0; var count = state.StateData.TransferStrem.Read(_outgoningBytes, 0, _outgoningBytes.Length); var last = count == 0; var crc = OperatorData.Crc32.ComputeChecksum(_outgoningBytes, count); state.StateData.TargetManager.Tell( _lastChunk = new NextChunk(state.StateData.OperationId, _outgoningBytes, count, last, crc, state.StateData.TransferStrem.ReadCrc), Parent); return(Stay()); } catch (Exception e) { _log.Error(e, "Error on Read Stream or Sending"); return(GoTo(OperatorState.Failed).Using(state.StateData.Failed(Parent, FailReason.ReadError, e.Message))); } case SendingCompled _: state.StateData.TransferStrem.Dispose(); ArrayPool <byte> .Shared.Return(_outgoningBytes); _outgoningBytes = null; var comp = new TransferCompled(state.StateData.OperationId, state.StateData.Metadata); Parent.Tell(comp); if (state.StateData.SendBack) { state.StateData.Sender.Tell(comp); } return(GoTo(OperatorState.Compled)); case RepeadChunk _: _sendingAttempts += 1; if (_sendingAttempts > 5) { return(GoTo(OperatorState.Failed).Using(state.StateData.Failed(Parent, FailReason.ToManyResends, null))); } state.StateData.TargetManager.Tell(_lastChunk, Parent); return(Stay()); default: return(null); } }, TimeSpan.FromSeconds(10)); When(OperatorState.Reciving, state => { switch (state.FsmEvent) { case NextChunk chunk: try { var reciveCrc = OperatorData.Crc32.ComputeChecksum(chunk.Data, chunk.Count); if (reciveCrc != chunk.Crc) { state.StateData.TargetManager.Tell(new RepeadChunk(state.StateData.OperationId), Parent); } else { if (chunk.Count > 0) { state.StateData.TransferStrem.Write(chunk.Data, 0, chunk.Count); } if (chunk.Finish) { var data = state.StateData; try { if (data.TransferStrem.WriteCrc != chunk.FinishCrc) { return(GoTo(OperatorState.Failed).Using(state.StateData.Failed(Parent, FailReason.ComunicationError, null))); } data.TargetManager.Tell(new SendingCompled(state.StateData.OperationId)); var msg = new TransferCompled(state.StateData.OperationId, state.StateData.Metadata); state.StateData.Completion?.SetResult(msg); Parent.Tell(msg); return(GoTo(OperatorState.Compled)); } finally { data.TransferStrem.Dispose(); } } state.StateData.TargetManager.Tell(new SendNextChunk(state.StateData.OperationId)); } return(Stay()); } catch (Exception e) { _log.Error(e, "Error on Write Stream"); return(GoTo(OperatorState.Failed).Using(state.StateData.Failed(Parent, FailReason.WriteError, e.Message))); } case RequestDeny _: return(Stay()); default: return(null); } }, TimeSpan.FromSeconds(10)); When(OperatorState.Failed, state => { _log.Warning("Transfer Failed {Id}", GetId(state)); void Set(TransferMessages.TransferCompled failed) { state.StateData.Completion?.SetResult(failed); if (state.StateData.SendBack) { state.StateData.Sender.Tell(failed); } Parent.Tell(failed); } switch (state.FsmEvent) { case TransferError error: { Set(error.ToFailed()); break; } case StateTimeout _: Set(new TransferFailed(GetId(state), FailReason.Timeout, null)); break; default: { var manmesg = state.StateData.Error ?? new TransferError((state.FsmEvent as TransferMessage)?.OperationId ?? state.StateData.OperationId, FailReason.CorruptState, null); state.StateData.TargetManager.Tell(manmesg, Parent); Set(manmesg.ToFailed()); break; } } return(Stay()); }, TimeSpan.FromSeconds(30)); When(OperatorState.Compled, s => null); OnTransition((state, nextState) => { if (nextState != OperatorState.Failed && nextState != OperatorState.Compled) { return; } NextStateData.TransferStrem.Dispose(); if (_outgoningBytes != null) { ArrayPool <byte> .Shared.Return(_outgoningBytes); } }); WhenUnhandled( state => { switch (state.FsmEvent) { case StateTimeout _: _log.Error("Trisnmission Timeout {Id}", GetId(state)); return(GoTo(OperatorState.Failed).Using(state.StateData.Failed(Parent, FailReason.Timeout, null))); case TransferError error: _log.Warning("Incoming Transfer Failed {Id}", GetId(state)); return(GoTo(OperatorState.Failed).Using(state.StateData.InComingError(error))); case DataTranfer _: _log.Warning("Incorrect DataTransfer Event {Id}", GetId(state)); return(GoTo(OperatorState.Failed).Using(state.StateData.Failed(Parent, FailReason.ComunicationError, null))); default: _log.Warning("Unkown or Incorrect message Incming {Type}", state.FsmEvent.GetType()); return(Stay()); } }); Initialize(); }
protected override SupervisorStrategy SupervisorStrategy() { return(new OneForOneStrategy(ex => { var directive = Directive.Stop; ex.Match() .With <InvalidAssociation>(ia => { KeepQuarantinedOr(ia.RemoteAddress, () => { var causedBy = ia.InnerException == null ? "" : string.Format("Caused by: [{0}]", ia.InnerException); _log.Warning("Tried to associate with unreachable remote address [{0}]. Address is now gated for {1} ms, all messages to this address will be delivered to dead letters. Reason: [{2}] {3}", ia.RemoteAddress, _settings.RetryGateClosedFor.TotalMilliseconds, ia.Message, causedBy); _endpoints.MarkAsFailed(Sender, Deadline.Now + _settings.RetryGateClosedFor); }); if (ia.DisassociationInfo.HasValue && ia.DisassociationInfo == DisassociateInfo.Quarantined) { //TODO: add context.system.eventStream.publish(ThisActorSystemQuarantinedEvent(localAddress, remoteAddress)) } directive = Directive.Stop; }) .With <ShutDownAssociation>(shutdown => { KeepQuarantinedOr(shutdown.RemoteAddress, () => { _log.Debug("Remote system with address [{0}] has shut down. Address is now gated for {1}ms, all messages to this address will be delivered to dead letters.", shutdown.RemoteAddress, _settings.RetryGateClosedFor.TotalMilliseconds); _endpoints.MarkAsFailed(Sender, Deadline.Now + _settings.RetryGateClosedFor); }); directive = Directive.Stop; }) .With <HopelessAssociation>(hopeless => { if (hopeless.Uid.HasValue) { _log.Error("Association to [{0}] with UID [{1}] is irrecoverably failed. Quarantining address.", hopeless.RemoteAddress, hopeless.Uid); if (_settings.QuarantineDuration.HasValue) { _endpoints.MarkAsQuarantined(hopeless.RemoteAddress, hopeless.Uid.Value, Deadline.Now + _settings.QuarantineDuration.Value); _eventPublisher.NotifyListeners(new QuarantinedEvent(hopeless.RemoteAddress, hopeless.Uid.Value)); } } else { _log.Warning("Association to [{0}] with unknown UID is irrecoverably failed. Address cannot be quarantined without knowing the UID, gating instead for {1} ms.", hopeless.RemoteAddress, _settings.RetryGateClosedFor.TotalMilliseconds); _endpoints.MarkAsFailed(Sender, Deadline.Now + _settings.RetryGateClosedFor); } directive = Directive.Stop; }) .Default(msg => { if (msg is EndpointDisassociatedException || msg is EndpointAssociationException) { } //no logging else { _log.Error(ex, ex.Message); } _endpoints.MarkAsFailed(Sender, Deadline.Now + _settings.RetryGateClosedFor); directive = Directive.Stop; }); return directive; })); }
protected override void PostStop() { _logger.Warning("AccountActor stopped!"); base.PostStop(); }
public Lock(string code, TimeSpan timeout, Latches latches) { var code1 = code; _latches = latches; StartWith(LockState.Locked, new CodeState("", code1)); When(LockState.Locked, evt => { if (evt.FsmEvent is char) { var codeState = evt.StateData; if (codeState.Code == code1) { DoUnlock(); return(GoTo(LockState.Open).Using(new CodeState("", codeState.Code)).ForMax(timeout)); } } else if (evt.FsmEvent.Equals("hello")) { return(Stay().Replying("world")); } else if (evt.FsmEvent.Equals("bey")) { return(Stop(Shutdown.Instance)); } return(null); }); When(LockState.Open, evt => { if (evt.FsmEvent is StateTimeout) { DoLock(); return(GoTo(LockState.Locked)); } return(null); }); WhenUnhandled(evt => { var msg = evt.FsmEvent; Log.Warning($"unhandled event {msg} in state {StateName} with data {StateData}"); latches.UnhandledLatch.Open(); return(Stay()); }); OnTransition((state, nextState) => { if (state == LockState.Locked && nextState == LockState.Open) { _latches.TransitionLatch.Open(); } }); OnTermination(evt => { if (evt.Reason == Shutdown.Instance && evt.TerminatedState == LockState.Locked) { // stop is called from lockstate with shutdown as reason... latches.TerminatedLatch.Open(); } }); Initialize(); }
/// <summary> /// Creates a new Akka.Persistence extension. /// </summary> /// <param name="system">The ActorSystem that will be using Akka.Persistence</param> /// <exception cref="NullReferenceException"> /// This exception is thrown when the default journal plugin, <c>journal.plugin</c> is not configured. /// </exception> /// <remarks> /// DO NOT CALL DIRECTLY. Will be instantiated automatically be Akka.Persistence actors. /// </remarks> public PersistenceExtension(ExtendedActorSystem system) { _system = system; _system.Settings.InjectTopLevelFallback(Persistence.DefaultConfig()); _config = system.Settings.Config.GetConfig("akka.persistence"); _log = Logging.GetLogger(_system, this); _defaultJournalPluginId = new Lazy <string>(() => { var configPath = _config.GetString("journal.plugin"); if (string.IsNullOrEmpty(configPath)) { throw new NullReferenceException("Default journal plugin is not configured"); } return(configPath); }, LazyThreadSafetyMode.ExecutionAndPublication); _defaultSnapshotPluginId = new Lazy <string>(() => { var configPath = _config.GetString("snapshot-store.plugin"); if (string.IsNullOrEmpty(configPath)) { if (_log.IsWarningEnabled) { _log.Warning("No default snapshot store configured! " + "To configure a default snapshot-store plugin set the `akka.persistence.snapshot-store.plugin` key. " + "For details see 'persistence.conf'"); } return(NoSnapshotStorePluginId); } return(configPath); }, LazyThreadSafetyMode.ExecutionAndPublication); _defaultInternalStashOverflowStrategy = new Lazy <IStashOverflowStrategy>(() => { var configuratorTypeName = _config.GetString("internal-stash-overflow-strategy"); var configuratorType = Type.GetType(configuratorTypeName); return(((IStashOverflowStrategyConfigurator)Activator.CreateInstance(configuratorType)).Create(_system.Settings.Config)); }); Settings = new PersistenceSettings(_system, _config); _config.GetStringList("journal.auto-start-journals").ForEach(id => { if (_log.IsInfoEnabled) { _log.Info("Auto-starting journal plugin `{0}`", id); } JournalFor(id); }); _config.GetStringList("snapshot-store.auto-start-snapshot-stores").ForEach(id => { if (_log.IsInfoEnabled) { _log.Info("Auto-starting snapshot store `{0}`", id); } SnapshotStoreFor(id); }); _recoveryPermitter = new Lazy <IActorRef>(() => { var maxPermits = _config.GetInt("max-concurrent-recoveries"); return(_system.SystemActorOf(Akka.Persistence.RecoveryPermitter.Props(maxPermits), "recoveryPermitter")); }); }
private void NotifyMeOnOpen() { _log.Warning("My CircuitBreaker is now open, and will not close for one minute"); }
private bool Establishing(object message) { ICancelable connectTimerCancelable = null; if (_settings.ReconnectTimeout.HasValue) { connectTimerCancelable = Context.System.Scheduler.ScheduleTellOnceCancelable( _settings.ReconnectTimeout.Value, Self, ReconnectTimeout.Instance, Self); } if (message is ClusterReceptionist.Contacts) { var contacts = (ClusterReceptionist.Contacts)message; if (contacts.ContactPoints.Count > 0) { _contactPaths = contacts.ContactPoints.Select(ActorPath.Parse).ToImmutableHashSet(); _contacts = _contactPaths.Select(Context.ActorSelection).ToArray(); _contacts.ForEach(c => c.Tell(new Identify(null))); } PublishContactPoints(); } else if (message is ActorIdentity) { var actorIdentify = (ActorIdentity)message; var receptionist = actorIdentify.Subject; if (receptionist != null) { _log.Info("Connected to [{0}]", receptionist.Path); ScheduleRefreshContactsTick(_settings.RefreshContactsInterval); SendBuffered(receptionist); Context.Become(Active(receptionist)); connectTimerCancelable?.Cancel(); _failureDetector.HeartBeat(); } else { // ok, use another instead } } else if (message is HeartbeatTick) { _failureDetector.HeartBeat(); } else if (message is RefreshContactsTick) { SendGetContacts(); } else if (message is Send) { var send = (Send)message; Buffer(new PublishSubscribe.Send(send.Path, send.Message, send.LocalAffinity)); } else if (message is SendToAll) { var sendToAll = (SendToAll)message; Buffer(new PublishSubscribe.SendToAll(sendToAll.Path, sendToAll.Message)); } else if (message is Publish) { var publish = (Publish)message; Buffer(new PublishSubscribe.Publish(publish.Topic, publish.Message)); } else if (message is ReconnectTimeout) { _log.Warning("Receptionist reconnect not successful within {0} stopping cluster client", _settings.ReconnectTimeout); Context.Stop(Self); } else { return(ContactPointMessages(message)); } return(true); }
public void OnException(Exception ex, IConnection erroredChannel) { _log.Warning("handled network error from {0}: {1}", erroredChannel.RemoteHost, ex.Message); }
public BuildingActor(DataTransferManager fileHandler) { StartWith(BuildState.Waiting, new BuildData()); When(BuildState.Waiting, evt => { switch (evt.FsmEvent) { case TransferFailed: return(Stay()); case TransferMessages.TransferCompled: return(Stay()); case BuildRequest request: { _log.Info("Incomming Build Request {Apps}", request.AppData.Id); var newData = evt.StateData.Set(request); newData.Api.Send(new TransferRepository(newData.AppData.Repository), TimeSpan.FromMinutes(5), fileHandler, newData.Reporter.Send, () => newData.Paths.RepoFile.Stream) .PipeTo(Self); return(GoTo(BuildState.Repository) .Using(newData.SetListner(ActorRefs.Nobody))); } default: return(null); } }); When(BuildState.Repository, evt => { switch (evt.FsmEvent) { case TransferFailed fail: _log.Warning("Repository Transfer Failed {Name}--{Reason}", evt.StateData.AppData.Id, fail.Reason); //if (fail.OperationId != evt.StateData.OperationId) // return Stay(); return(GoTo(BuildState.Failing) .Using(evt.StateData.SetError(fail.Reason.ToString()))); case TransferMessages.TransferCompled c: _log.Info("Repository Transfer Compled {Name}", evt.StateData.AppData.Id); //if (c.OperationId != evt.StateData.OperationId) // return Stay(); evt.StateData.Commit = c.Data ?? "Unkowen"; return(GoTo(BuildState.Extracting) .ReplyingSelf(Trigger.Inst)); default: return(null); } }, TimeSpan.FromMinutes(5)); When(BuildState.Extracting, evt => { switch (evt.FsmEvent) { case Trigger: _log.Info("Extract Repository {Name}", evt.StateData.AppData.Id); var paths = evt.StateData.Paths; evt.StateData.Reporter.Send(DeploymentMessages.BuildExtractingRepository); Task.Run(() => { var stream = paths.RepoFile.Stream; stream.Seek(0, SeekOrigin.Begin); using var archive = ZipFile.Read(stream); archive.ExtractAll(paths.RepoPath.FullPath, ExtractExistingFileAction.OverwriteSilently); }).PipeTo(Self, success: () => new Status.Success(null), failure: e => new Status.Failure(e)); return(Stay());