/// <summary> /// Functional "Fold". Now parallel! /// /// WARNING: Use only with commutative functions. Also, fn must be defined in such a way that f(a,default) = a. /// </summary> /// <typeparam name="T1"></typeparam> /// <param name="pool"></param> /// <param name="source"></param> /// <param name="fn"></param> /// <returns></returns> public static List <T1> Fold <T1>(this WorkerPool pool, List <T1> source, Func <T1, T1, T1> fn) { var result = new ConcurrentBag <T1>(); if (source.Count == 1) { result.Add(fn(source[0], default(T1))); } else if (source.Count > 1) { if (source.Count % 2 == 0) { var j = 0; while (j < source.Count) { var j1 = j; pool.AddJob(() => { result.Add(fn(source[j1], source[j1 + 1])); }); j += 2; } pool.SpinWaitUntilComplete(); return(Fold(pool, result, fn)); } source.Add(default(T1)); return(Fold(pool, source, fn)); } return(result.ToList()); }
public static void RunStandalone(RuntimeConfiguration runtimeConfiguration) { bool shouldRun = true; // Create a manager thread. Manager manager = new Manager(runtimeConfiguration); // Setup the Manager thread. manager.Setup(runtimeConfiguration); // Start the Manager service thread. Task managerThread = Task.Run(() => manager.RunAsync()); // Wait a few seconds for the manager to get set up. Thread.Sleep(1000); // Set up the workers. WorkerPool workerPool = new WorkerPool(runtimeConfiguration); workerPool.SetupAllWorkers(runtimeConfiguration); workerPool.RunAllWorkersAsync(); Console.CancelKeyPress += (s, e) => { runtimeConfiguration.GetLoggerInstance().Error("Runner", "Overwatch", "Console kill command received. Forcing shutdown."); workerPool.KillAllWorkers(); shouldRun = false; }; Task.WaitAll(managerThread); }
public void workerpool_distributes_items_to_workers() { var counter = 0; Action <byte[], IZmqSocket> proc = (data, socket) => { Interlocked.Increment(ref counter); // var data = socket.Recv(); Thread.Sleep(1); // preparing beverage socket.Send("done"); }; // Creates a wpool with 10 workers using (var pool = new WorkerPool(base.Context, "tcp://0.0.0.0:8000", "inproc://workerp1", proc, 10)) { pool.Start(); // Sends a 100 request for coffee // just like Monday 9am, at any starbucks for (int i = 0; i < 100; i++) { using (var req = base.Context.Req()) { req.Connect("tcp://127.0.0.1:8000"); req.Send("Venti Mocha please!"); var res = req.RecvString(); Assert.AreEqual("done", res); } } Assert.AreEqual(100, counter); } }
public async Task WorkerPool() { DivvyUp.RegisterJobsFromAssembly(typeof(TestJob)); var service = new Service(new MockRedisDatabase()); var pool = new WorkerPool(service); pool.AddWorker("test"); pool.AddWorkers(3, "test"); foreach (var worker in pool.Workers) { worker.CheckinInterval = 1; worker.DelayAfterInternalError = 0; worker.NoWorkCheckInterval = 0; } pool.WorkInBackground(); for (int i = 0; i < 40; i++) { await service.Enqueue(new TestJob("delay")); } var start = DateTime.UtcNow; while (DateTime.UtcNow - start < TimeSpan.FromSeconds(3.5)) { ; } await pool.Stop(); Assert.Equal(40, TestJob.Count("delay")); }
public void Start() { if (Active == false) { ThreadPool = new WorkerPool <HttpRequest>(WorkerCount, 10); _listener = new HttpListener(); if (SslPort != 0) { _listener.Prefixes.Add($"https://*:{SslPort}/"); } if (Port != 0) { _listener.Prefixes.Add($"http://*:{Port}/"); } foreach (string binding in Bindings) { _listener.Prefixes.Add(binding); } Active = true; _listener.Start(); Log("Start", "Server Started"); Task.Run(() => { while (Active) { var result = _listener.BeginGetContext(HandleContext, _listener); result.AsyncWaitHandle.WaitOne(); } }); } }
public void TestFold() { var listofeight = new List <int>(); var i = 0; while (i < 8) { listofeight.Add(i + 1); i++; } var pool = new WorkerPool(); pool.Start(); var resultA = pool.Fold(listofeight, (a, b) => a + b); Assert.IsTrue(resultA[0] == 36); var listofeleven = new List <int>(); i = 0; while (i < 11) { listofeleven.Add(i + 1); i++; } var resultB = pool.Fold(listofeleven, (a, b) => a + b); Assert.IsTrue(resultB[0] == 66); }
public void TestZip() { var listoften = new List <int>(); var i = 0; while (i < 10) { listoften.Add(i); i++; } var listofdouble = new List <int>(); i = 0; while (i < 10) { listofdouble.Add(i * 2); i++; } var pool = new WorkerPool(); pool.Start(); var zipped = pool.Zip(Tuple.Create(listoften, listofdouble)); foreach (var zip in zipped) { Assert.IsTrue(zip.Item1 * 2 == zip.Item2); } }
/// <summary> /// Functional Some. Now in parallel! /// </summary> /// <typeparam name="T1"></typeparam> /// <param name="pool"></param> /// <param name="source"></param> /// <param name="fn"></param> /// <returns></returns> public static bool Some <T1>(this WorkerPool pool, List <T1> source, Func <T1, bool> fn) { var wasFound = false; var j = 0; while (j < source.Count) { var j1 = j; pool.AddJob(() => { if (wasFound) { return; } if (!fn(source[j1])) { return; } wasFound = true; pool.ClearJobs(); }); j++; } pool.SpinWaitUntilComplete(); return(wasFound); }
public ApiBootstrapper(ComputeServer computeServer, WorkerPool workerPool, AlgorithmFactory algoFactory) { this.computeServer = computeServer; this.workerPool = workerPool; this.algoFactory = algoFactory; serverType = ServerTypes.Compute; }
public static int TotalTime(IList <string> input, int workers, int timeOffset) { var steps = Parse(input); var startSteps = steps.Values.Where(s => s.IsStartStep).OrderBy(s => s.Id).ToList(); var workerPool = new WorkerPool(workers, timeOffset); startSteps.ForEach(s => workerPool.AddTask(s)); HashSet <Step> allCompleted = new HashSet <Step>(); HashSet <Step> waitingForWorker = new HashSet <Step>(); var time = 0; while (true) { while (true) { var completed = workerPool.WorkOne(); time++; if (completed.Any()) { waitingForWorker.UnionWith(completed.SelectMany(s => s.Successors)); foreach (var complete in completed) { allCompleted.Add(complete); } break; } } var preconditionsMet = waitingForWorker .Where(s => s.Predecessors.All(p => allCompleted.Contains(p))) .OrderBy(s => s) .ToList(); waitingForWorker = waitingForWorker .Where(s => !s.Predecessors.All(p => allCompleted.Contains(p))) .ToHashSet(); while (workerPool.WorkerAvailable && preconditionsMet.Any()) { workerPool.AddTask(preconditionsMet.First()); preconditionsMet.RemoveAt(0); } if (preconditionsMet.Any()) { waitingForWorker.UnionWith(preconditionsMet); } if (!waitingForWorker.Any() && workerPool.Empty) { break; } } return(time); }
public void WorkItemStateChanged(IWorkItem workItem, WorkItemState previousState) { OnChangedWorkItemState(workItem, previousState); switch (workItem.State) { case WorkItemState.Scheduled: lock (this) { // Housekeeping chores. ++runningItems; // Now start it. WorkerPool.BeginWork(workItem); } break; case WorkItemState.Running: OnRunningWorkItem(workItem); break; case WorkItemState.Failing: OnFailedWorkItem(workItem); break; case WorkItemState.Completed: bool allDone = false; lock (this) { --runningItems; allDone = queue.Count == 0 && runningItems == 0; } // Tell the world that the workitem has completed. if (queue.Count < ConcurrentLimit / 2 && (ConcurrentLimit- runningItems)*100/ConcurrentLimit > 5 ) { OnCompletedWorkItem(workItem); } // Find some more work. if (allDone) { // Wakeup. //OnAllWorkCompleted(EventArgs.Empty); lock (completed) { Monitor.PulseAll(completed); } } else { DoNextWorkItem(); } break; } }
public PrimeCalculatorImpl(Seeder seeder, WorkerPool pool) { _seeder = seeder; _pool = pool; _pool.PushResult += PoolOnPushResult; _pool.TakeTask += PoolOnTakeTask; _pool.FreeTask += PoolOnFreeTask; }
private IConsumerEngine <int> BuildConsumerEngine() { return(new SynchronousConsumerEngine <int>( WorkerPool.Create(), new DefaultConsumerExecutionStrategy <int>( new CallbackConsumerResolver <int>( (context) => new IntConsumer())))); }
public void Stop() { if (_workerPool != null) { _workerPool.Dispose(); _workerPool = null; } }
public WorkerPoolShould() { _workerPool = new WorkerPool(); _firstWorker = new Mock <IWorker>(); _secondWorker = new Mock <IWorker>(); _cancellationToken = new CancellationToken(false); _workerPool.Add(_firstWorker.Object, _secondWorker.Object); }
public void Start() { this.Stop(); this._workerPool = new WorkerPool(this._context, this._endpoint, this._localEndpoint, this.OnRequestReceived, this._workers); this._workerPool.Start(); }
/// <summary> /// Adds a batch of identical tasks. /// </summary> /// <param name="pool"></param> /// <param name="task"></param> /// <param name="total"></param> public static void AddTasks(this WorkerPool pool, Action task, int total) { var i = 0; while (i < total) { pool.AddJob(task); i++; } }
// Has to be called (by GameManager) directly after building was built // don't use start here, should not be called for building preview and has nothing to do with instanciation in general public void EconomyInit(JobManager jobManager, WorkerPool workerPool) { _jobManager = jobManager; _workerPool = workerPool; workers = new List <Worker>(workerCapacity); // call virtual method of specializations EconomyInited(); }
public void SetUp() { workpool1 = new WorkerPool <VolatileLong>(() => new VolatileLong() , new FatalExceptionHandler() , new AtomicLongWorkHandler() , new AtomicLongWorkHandler()); workpool2 = new WorkerPool <VolatileLong>(() => new VolatileLong() , new FatalExceptionHandler() , new AtomicLongWorkHandler() , new AtomicLongWorkHandler()); }
private void SetupPool() { var names = new List <string>(); foreach (var p in _peerManager.GetPeers()) { names.Add(Queues.PeerAppendLog + p.Address); } names.Add(Queues.LogCommit); names.Add(Queues.HeartBeatReceiveAndCandidacy); names.Add(Queues.HeartBeatSend); names.Add(Queues.ProcessCommandQueue); names.Add(Queues.CreateSnapshot); _workers = new WorkerPool(names.ToArray()); _workers.Start(); // LogCommit Func <CancellationToken, Task> logCommit = LogCommit; _workers.Enqueue(Queues.LogCommit, new Job(logCommit, TheTrace.LogPolicy(_meAsAPeer.ShortName).RetryForeverAsync(), _settings.ElectionTimeoutMin.Multiply(0.2))); // receiving heartbeat Func <CancellationToken, Task> hbr = HeartBeatReceive; _workers.Enqueue(Queues.HeartBeatReceiveAndCandidacy, new Job(hbr, TheTrace.LogPolicy(_meAsAPeer.ShortName).RetryForeverAsync(), _settings.ElectionTimeoutMin.Multiply(0.2))); // sending heartbeat Func <CancellationToken, Task> hbs = HeartBeatSend; _workers.Enqueue(Queues.HeartBeatSend, new Job(hbs, TheTrace.LogPolicy(_meAsAPeer.ShortName).RetryForeverAsync(), _settings.ElectionTimeoutMin.Multiply(0.2))); // Applying commands received from the clients Func <CancellationToken, Task> cs = CreateSnapshot; _workers.Enqueue(Queues.CreateSnapshot, new Job(cs, TheTrace.LogPolicy(_meAsAPeer.ShortName).WaitAndRetryAsync(2, (i) => TimeSpan.FromMilliseconds(i * i * 50)), _settings.ElectionTimeoutMin.Multiply(0.2))); TheTrace.TraceInformation($"[{_meAsAPeer.ShortName}] Setup finished."); }
public async Task <ApiWorkerPoolResponseModel> ByName(string name) { WorkerPool record = await this.workerPoolRepository.ByName(name); if (record == null) { return(null); } else { return(this.bolWorkerPoolMapper.MapBOToModel(this.dalWorkerPoolMapper.MapEFToBO(record))); } }
private async Task <bool> BeUniqueByName(ApiWorkerPoolRequestModel model, CancellationToken cancellationToken) { WorkerPool record = await this.workerPoolRepository.ByName(model.Name); if (record == null || (this.existingRecordId != default(string) && record.Id == this.existingRecordId)) { return(true); } else { return(false); } }
public async Task Start() { pool = new WorkerPool(4, true, false, 0); ServerContext.StartFunc(); while (true) { RestListenerContext httpContext = await ServerContext.GetContextAsync(); AddSession(httpContext); HandleContext(httpContext); } }
public virtual BOWorkerPool MapEFToBO( WorkerPool ef) { var bo = new BOWorkerPool(); bo.SetProperties( ef.Id, ef.IsDefault, ef.JSON, ef.Name, ef.SortOrder); return(bo); }
public virtual WorkerPool MapBOToEF( BOWorkerPool bo) { WorkerPool efWorkerPool = new WorkerPool(); efWorkerPool.SetProperties( bo.Id, bo.IsDefault, bo.JSON, bo.Name, bo.SortOrder); return(efWorkerPool); }
public ComputeServer(SystemConfiguration systemConfig, string name, AlgorithmFactory algoFactory) { uuid = Guid.NewGuid().ToString(); this.name = name; config = systemConfig.Servers[name]; var mediatorConfig = systemConfig.Servers.First(kvp => kvp.Value.Type == ServerTypes.Mediator).Value; mediator = new MediatorConnector(mediatorConfig); workerPool = new WorkerPool(config.PoolSize, uuid, mediator); //bootstrapper = new ComputeBootstrapper(this, workerPool); bootstrapper = new ApiBootstrapper(this, workerPool, algoFactory); }
public void MapEFToBOList() { var mapper = new DALWorkerPoolMapper(); WorkerPool entity = new WorkerPool(); entity.SetProperties("A", true, "A", "A", 1); List <BOWorkerPool> response = mapper.MapEFToBO(new List <WorkerPool>() { entity }); response.Count.Should().Be(1); }
public void TestCalculateOverhead() { const int amount = 1000; var pool = new WorkerPool(15); var c = new Dictionary <string, int> { ["count"] = 0 }; pool.AddLockedTasks(() => { c["count"]++; }, amount); var sw = new Stopwatch(); pool.Start(); sw.Start(); while (!pool.IsJobQueueEmpty) { } sw.Stop(); Debug.WriteLine("Multi-threaded Ticks Init (ns): " + sw.ElapsedNs()); pool.AddLockedTasks(() => { c["count"]++; }, amount); sw.Restart(); while (!pool.IsJobQueueEmpty) { } sw.Stop(); Debug.WriteLine("Multi-threaded Ticks Spun (ns): " + sw.ElapsedNs()); var x = new Dictionary <string, int> { ["count"] = 0 }; sw.Restart(); var i = 0; while (i < amount) { x["count"]++; i++; } sw.Stop(); Debug.WriteLine("Single-threaded Ticks (ns): " + sw.ElapsedTicks * 100); }
public MessageBus(MessageBusCreateParameters parameters = null) { parameters = parameters ?? MessageBusCreateParameters.Default; Id = parameters.Id ?? Guid.NewGuid().ToString(); Logger = parameters.GetLogger() ?? new SilentLogger(); WorkerPool = new WorkerPool(Logger, parameters.NumberOfWorkers, parameters.MaximumQueuedMessages); Modules = new ModuleManager(Logger); EnvelopeFactory = new EnvelopeFactory(Id, parameters.IdGenerator ?? new LocalIncrementIdGenerator()); _subscriptionDispatcher = new SubscriptionDispatcher(Logger, parameters.AllowWildcards); _requestDispatcher = new RequestDispatcher(Logger, parameters.AllowWildcards); _participantDispatcher = new ParticipantDispatcher(Logger, parameters.AllowWildcards); _router = new TopicRouter(); }
public void MapBOToEF() { var mapper = new DALWorkerPoolMapper(); var bo = new BOWorkerPool(); bo.SetProperties("A", true, "A", "A", 1); WorkerPool response = mapper.MapBOToEF(bo); response.Id.Should().Be("A"); response.IsDefault.Should().Be(true); response.JSON.Should().Be("A"); response.Name.Should().Be("A"); response.SortOrder.Should().Be(1); }
public void MapEFToBO() { var mapper = new DALWorkerPoolMapper(); WorkerPool entity = new WorkerPool(); entity.SetProperties("A", true, "A", "A", 1); BOWorkerPool response = mapper.MapEFToBO(entity); response.Id.Should().Be("A"); response.IsDefault.Should().Be(true); response.JSON.Should().Be("A"); response.Name.Should().Be("A"); response.SortOrder.Should().Be(1); }
public OneToThreeReleasingWorkerPoolThroughputTest() : base(Test_Disruptor, ITERATIONS) { ThreadPool.SetMaxThreads(NUM_WORKERS, NUM_WORKERS); for (int i = 0; i < NUM_WORKERS; i++) { counters[i] = new _Volatile.PaddedLong(); handlers[i] = new EventCountingAndReleasingWorkHandler(counters, i); } workerPool = new WorkerPool <ValueEvent>(ringBuffer , ringBuffer.NewBarrier() , new FatalExceptionHandler() , handlers); ringBuffer.AddGatingSequences(workerPool.getWorkerSequences()); }
public OneToThreeWorkerPoolThroughputTest() : base(Test_Disruptor, ITERATIONS) { for (int i = 0; i < NUM_WORKERS; i++) { counters[i] = new _Volatile.PaddedLong(); queueWorkers[i] = new EventCountingQueueProcessor(blockingQueue, counters, i); handlers[i] = new EventCountingWorkHandler(counters, i); } workerPool = new WorkerPool<ValueEvent>(ringBuffer, ringBuffer.NewBarrier(), new FatalExceptionHandler(), handlers); ringBuffer.AddGatingSequences(workerPool.getWorkerSequences()); }
public OneToThreeWorkerPoolThroughputTest() : base(Test_Disruptor, ITERATIONS) { for (int i = 0; i < NUM_WORKERS; i++) { counters[i] = new _Volatile.PaddedLong(); queueWorkers[i] = new EventCountingQueueProcessor(blockingQueue, counters, i); handlers[i] = new EventCountingWorkHandler(counters, i); } workerPool = new WorkerPool <ValueEvent>(ringBuffer, ringBuffer.NewBarrier(), new FatalExceptionHandler(), handlers); ringBuffer.AddGatingSequences(workerPool.getWorkerSequences()); }
public OneToThreeReleasingWorkerPoolThroughputTest() : base(Test_Disruptor, ITERATIONS) { ThreadPool.SetMaxThreads(NUM_WORKERS, NUM_WORKERS); for (int i = 0; i < NUM_WORKERS; i++) { counters[i] = new _Volatile.PaddedLong(); handlers[i] = new EventCountingAndReleasingWorkHandler(counters, i); } workerPool = new WorkerPool<ValueEvent>(ringBuffer , ringBuffer.NewBarrier() ,new FatalExceptionHandler() ,handlers); ringBuffer.AddGatingSequences(workerPool.getWorkerSequences()); }
public OneToThreeWorkerPoolThroughputTest() { for (var i = 0; i < _numWorkers; i++) { _counters[i] = new PaddedLong(); } for (var i = 0; i < _numWorkers; i++) { _queueWorkers[i] = new EventCountingQueueProcessor(_blockingQueue, _counters, i); } for (var i = 0; i < _numWorkers; i++) { _handlers[i] = new EventCountingWorkHandler(_counters, i); } _workerPool = new WorkerPool<ValueEvent>(_ringBuffer, _ringBuffer.NewBarrier(), new FatalExceptionHandler(), _handlers); _ringBuffer.AddGatingSequences(_workerPool.GetWorkerSequences()); }
public Worker(string id, WorkerPool workerPool) { Id = id; this.workerPool = workerPool; }
public NodeServiceImpl(WorkerPool workerPool) { _workerPool = workerPool; }
/// <summary> /// Factory method to construct a problem instance from a role specification and an existing pool of workers. /// </summary> /// <param name="specification"></param> /// <param name="workers"></param> /// <returns></returns> public static ProblemInstance Construct(Dictionary<ushort, Dictionary<ushort, ushort>> specification, WorkerPool workers) { Dictionary<ushort, Dictionary<ushort, bool>> roles = new Dictionary<ushort, Dictionary<ushort, bool>>(), availability = new Dictionary<ushort, Dictionary<ushort, bool>>(), instruments = new Dictionary<ushort, Dictionary<ushort, bool>>(); Dictionary<ushort, bool> dbs = new Dictionary<ushort, bool>(); foreach (Worker w in workers) { foreach (Constants.Role r in Constants.Role.All) { roles[w.Id][r] = w.PerformsRole(r); } for (ushort s = 0; s < Constants.MAX_SERVICES; s++) { availability[w.Id][s] = w.IsAvailableOn(s); } foreach (Constants.Instrument i in Constants.Instrument.All) { instruments[w.Id][i] = w.PlaysInstrument(i); } dbs[w.Id] = w.DbsCheck; } return new ProblemInstance(specification, roles, availability, instruments, dbs); }
public ComputeBootstrapper(ComputeServer computeServer, WorkerPool workerPool) { this.computeServer = computeServer; this.workerPool = workerPool; }