/// <summary> /// Runs the broker process. /// </summary> /// <param name="node">This nodes information</param> /// <param name="maxconnections">The maximum number of connections to allow</param> /// <returns>An awaitable task.</returns> public static Task RunAsync(PeerInfo node, int maxconnections = 50) { // The primary table for finding peers var peers = new Dictionary <EndPoint, Tuple <Task, IWriteChannel <ConnectionRequest> > >(); // The peers listed by key var peersbykey = new Dictionary <Key, EndPoint>(); // The MRU cache of peers var mrucache = new MRUCache <EndPoint, Key>(maxconnections, TimeSpan.FromDays(10)); return(AutomationExtensions.RunTask( new { Request = Channels.ConnectionBrokerRequests.ForRead, Registrations = Channels.ConnectionBrokerRegistrations.ForRead, Stats = Channels.ConnectionBrokerStats.ForRead, SelfHandler = Channels.RemoteRequests.ForWrite, Routing = Channels.RoutingTableRequests.ForWrite }, async self => { log.Debug($"Broker is now running"); while (true) { log.Debug($"Broker is waiting for requests ..."); var mreq = await MultiChannelAccess.ReadFromAnyAsync( self.Stats.RequestRead(), self.Registrations.RequestRead(), self.Request.RequestRead() ); if (mreq.Channel == self.Stats) { log.Debug($"Broker got stat request"); var req = (IWriteChannel <ConnectionStatsResponse>)mreq.Value; await req.WriteAsync(new ConnectionStatsResponse() { EndPoints = peers.Count, Keys = peersbykey.Count, Stats = (Channels.ConnectionBrokerRequests.Get() as ProfilingChannel <ConnectionRequest>)?.ReportStats() }); } else if (mreq.Channel == self.Registrations) { var req = (ConnectionRegistrationRequest)mreq.Value; log.Debug($"Broker got {(req.IsTerminate ? "termination" : "registration")} request"); if (req.IsTerminate) { // Make sure we do not have stale stuff in the MRU cache if (req.Peer != null && req.Peer.Address != null) { mrucache.Remove(req.Peer.Address); } if (req.Peer.Address != null && peers.TryGetValue(req.Peer.Address, out var c) && c.Item2 == req.Channel) { peers.Remove(req.Peer.Address); if (req.Peer.Key != null) { peersbykey.Remove(req.Peer.Key); } } if (req.UpdateRouting) { log.Debug($"Removing peer in routing table due to termination of connection {req.Peer.Key} - {req.Peer.Address}"); await self.Routing.RemovePeerAsync(req.Peer.Key); } } else { if (req.Peer.Address != null && peers.TryGetValue(req.Peer.Address, out var c) && (c.Item2 == req.Channel || c == null)) { if (c == null) { peers[req.Peer.Address] = new Tuple <Task, IWriteChannel <ConnectionRequest> >(null, req.Channel); } if (!peersbykey.ContainsKey(req.Peer.Key)) { peersbykey[req.Peer.Key] = req.Peer.Address; } } if (req.UpdateRouting) { log.Debug($"Adding new peer to routing table {req.Peer.Key} - {req.Peer.Address}"); await self.Routing.AddPeerAsync(req.Peer.Key, req.Peer); } } } else { var req = (ConnectionRequest)mreq.Value; log.Debug($"Broker got connection request for {req.EndPoint}"); // Check if we request ourselves if (node.Key.Equals(req.Key) || node.Address.Equals(req.EndPoint)) { log.Debug($"Broker got self-request, forwarding to owner"); await self.SelfHandler.WriteAsync(req); continue; } Tuple <Task, IWriteChannel <ConnectionRequest> > peer = null; try { // Existing connection, update MRU var overflow = mrucache.Add(req.EndPoint, req.Key); // If we have too many connections, kill one now if (overflow != null) { // We could make this also take the closest k peers into account log.Debug($"Broker has too many connections, closing {req.EndPoint}"); await peers[overflow].Item2.RetireAsync(); } if (!peers.TryGetValue(req.EndPoint, out peer)) { log.Debug($"Broker is starting a connection to {req.EndPoint}"); mrucache.Add(req.EndPoint, req.Key); peer = peers[req.EndPoint] = PeerConnection.CreatePeer( node, new PeerInfo(req.Key, req.EndPoint), () => ConnectToPeerAsync(req.EndPoint), REQ_BUFFER_SIZE ); if (req.Key != null) { peersbykey[req.Key] = req.EndPoint; } } await peer.Item2.WriteAsync(req); } catch (Exception ex) { log.Warn("Failed to send request to peer", ex); try { await req.Response.WriteAsync(new ConnectionResponse() { Exception = ex }); } catch (Exception ex2) { log.Warn("Failed to write failure response", ex2); } if (peer != null) { try { peer.Item2.AsWriteOnly().Dispose(); } catch (Exception ex2) { log.Warn("Failed to terminate write channel", ex2); } try { await peer.Item1; } catch (Exception ex2) { log.Warn("Peer connection stopped with error", ex2); } } peers.Remove(req.EndPoint); } } } } )); }
/// <summary> /// Runs the MRU cache /// </summary> /// <returns>An awaitable task.</returns> /// <param name="selfinfo">This peer's information</param> /// <param name="storesize">The size of the MRU store</param> /// <param name="maxage">The maximum amount of time items are stored</param> /// <param name="buffersize">The size of the parallel processing buffer</param> private static Task RunMRUAsync(PeerInfo selfinfo, int storesize, TimeSpan maxage, int buffersize) { var storechan = Channel.Create <MRUInternalStore>(); return(AutomationExtensions.RunTask(new { Request = Channels.MRURequests.ForRead, Routing = Channels.RoutingTableRequests.ForWrite, Stats = Channels.MRUStats.ForRead, Store = storechan.AsRead() }, async self => { var cache = new MRUCache <Key, byte[]>(storesize, maxage); var store = new MRUCache <Key, byte[]>(int.MaxValue, maxage); log.Debug($"Store is now running"); // Set up a shared error handler for logging and reporting errors Func <MRURequest, Exception, Task> errorHandler = async(req, ex) => { log.Warn("Failed to process request, sending error", ex); try { await req.SendResponseAsync(ex); } catch (Exception ex2) { log.Warn("Failed to forward error report", ex2); } }; using (var tp = new TaskPool <MRURequest>(buffersize, errorHandler)) while (true) { log.Debug($"Store is waiting for requests ..."); var mreq = await MultiChannelAccess.ReadFromAnyAsync( self.Stats.RequestRead(), self.Store.RequestRead(), self.Request.RequestRead() ); if (mreq.Channel == self.Stats) { log.Debug($"Store got stat request"); var r = (IWriteChannel <MRUStatResponse>)mreq.Value; await r.WriteAsync(new MRUStatResponse() { Items = cache.Count + store.Count, Oldest = new DateTime(Math.Min(cache.OldestItem.Ticks, store.OldestItem.Ticks)), Size = cache.Select(x => x.Value.Length).Sum() + store.Select(x => x.Value.Length).Sum(), Stats = (Channels.MRURequests.Get() as ProfilingChannel <MRURequest>)?.ReportStats() }); continue; } if (mreq.Channel == self.Store) { var sreq = (MRUInternalStore)mreq.Value; log.Debug($"Store got internal store request"); var shouldBroadCast = sreq.Peers != null && !store.TryGetValue(sreq.Key, out _); store.Add(sreq.Key, sreq.Data); // We currently rely on the injector to broadcast, // If we enable this, we need some logic to figure out // the source of the Add, to both allow re-insertion // and avoid repeated broadcasts if two peers determine // they are *the* handling peer //if (shouldBroadCast) //await tp.Run(new MRURequest() { }, () => BroadcastValueAsync(selfinfo, sreq)); continue; } var req = (MRURequest)mreq.Value; log.Debug($"Store got request {req.Operation}"); try { switch (req.Operation) { case MRUOperation.Add: { // Always store it in our cache cache.Add(req.Key, req.Data); // Process long-term if needed await tp.Run(req, () => StoreLongTermAsync(selfinfo, self.Routing, storechan.AsWrite(), req.Key, req.Data)); // Respond that we completed await tp.Run(req, () => req.SendResponseAsync(req.Key, null)); break; } case MRUOperation.Get: { var res = cache.TryGetValue(req.Key, out var data); if (!res) { res = store.TryGetValue(req.Key, out data); } await tp.Run(req, () => req.SendResponseAsync(req.Key, data, res)); break; } case MRUOperation.Expire: cache.ExpireOldItems(); store.ExpireOldItems(); await tp.Run(req, () => req.SendResponseAsync(null, null)); break; default: throw new Exception($"Unable to handle request with type {req.Operation}"); } log.Debug($"Store completed request {req.Operation}"); } catch (Exception ex) { await errorHandler(req, ex); } } })); }