Esempio n. 1
0
        private Result <IssueData[]> ProcessIssueBatch(ElasticThreadPool pool, string projectId, string filterId, int pageNumber, int issuesInBatch, Tuplet <bool> canceled, Result <IssueData[]> result)
        {
            pool.QueueWorkItem(HandlerUtil.WithEnv(delegate {
                // TODO (steveb): use result.IsCanceled instead of shared tuple once cancellation is supported on the result object

                // check if request has been canceled
                if (!canceled.Item1)
                {
                    IssueData[] issuesForBatch;
                    if (!string.IsNullOrEmpty(filterId))
                    {
                        issuesForBatch = _service.mc_filter_get_issues(_username, _password, projectId, filterId, pageNumber.ToString(), issuesInBatch.ToString());
                    }
                    else
                    {
                        issuesForBatch = _service.mc_project_get_issues(_username, _password, projectId, pageNumber.ToString(), issuesInBatch.ToString());
                    }
                    result.Return(issuesForBatch);
                }
                else
                {
                    // TODO (steveb): throw a more specific exception
                    result.Throw(new Exception("unspecified error"));
                }
            }, TaskEnv.Clone()));
            return(result);
        }
Esempio n. 2
0
        public void New_TryEnqueueItem_x50_TryEnqueueConsumer_x50_ItemCount_ConsumerCount()
        {
            var q   = new LockFreeItemConsumerQueue <int>();
            var etp = new ElasticThreadPool(10, 10);

            // submit enqueue & dequeue work-items
            const int max   = 10000;
            int       count = max + 1;
            var       e     = new ManualResetEvent(false);

            int[] checks = new int[max];
            for (int i = 0; i < max; ++i)
            {
                int j = i;
                etp.TryQueueWorkItem(() => {
                    int k = Interlocked.Increment(ref checks[j]);
                    Assert.AreEqual(1, k, "value for {0} was already increased", j);
                    q.TryEnqueue(j);
                });
                etp.TryQueueWorkItem(() => {
                    q.TryEnqueue(x => {
                        int k = Interlocked.Decrement(ref checks[x]);
                        Assert.AreEqual(0, k, "value for {0} was already decreased", x);
// ReSharper disable AccessToModifiedClosure
                        if (Interlocked.Decrement(ref count) == 0)
                        {
// ReSharper restore AccessToModifiedClosure
                            e.Set();
                        }
                    });
                });
            }
            if (Interlocked.Decrement(ref count) == 0)
            {
                e.Set();
            }
            if (!e.WaitOne(TimeSpan.FromSeconds(10)))
            {
                Assert.Fail("test timed out");
            }
            for (int i = 0; i < max; ++i)
            {
                Assert.AreEqual(0, checks[i], "entry {0}", i);
            }
            Assert.AreEqual(0, q.ItemCount);
            Assert.IsTrue(q.ItemIsEmpty);
            Assert.AreEqual(0, q.ConsumerCount);
            Assert.IsTrue(q.ConsumerIsEmpty);
        }
Esempio n. 3
0
        public void ElasticThreadPool_Fibonacci_Min_0_Max_4()
        {
            var      stp = new ElasticThreadPool(0, 4);
            int      value;
            TimeSpan elapsed;

            FibonacciThreadPool(stp, 30, TimeSpan.Zero, out value, out elapsed);
            Assert.AreEqual(832040, value);

            _log.Debug("Time: " + elapsed);
            _log.Debug("Work items processed: " + _counter);
            stp.Dispose();
            Assert.AreEqual(0, stp.WorkItemCount, "WorkQueue items");
            Assert.AreEqual(0, stp.ThreadCount, "WorkQueue threads");
        }
Esempio n. 4
0
        public void ElasticThreadPool_Fibonacci_Min_0_Max_100_with_1ms_delay()
        {
            var      stp = new ElasticThreadPool(0, 100);
            int      value;
            TimeSpan elapsed;

            FibonacciThreadPool(stp, 25, TimeSpan.FromSeconds(0.001), out value, out elapsed);
            Assert.AreEqual(75025, value);

            _log.Debug("Time: " + elapsed);
            _log.Debug("Work items processed: " + _counter);
            stp.Dispose();
            Assert.AreEqual(0, stp.WorkItemCount, "WorkQueue items");
            Assert.AreEqual(0, stp.ThreadCount, "WorkQueue threads");
        }
Esempio n. 5
0
        //--- Constructors ---
        static Async()
        {
            if (!int.TryParse(System.Configuration.ConfigurationManager.AppSettings["threadpool-min"], out _minThreads))
            {
                _minThreads = 4;
            }
            if (!int.TryParse(System.Configuration.ConfigurationManager.AppSettings["threadpool-max"], out _maxThreads))
            {
                _maxThreads = 200;
            }
            int maxStackSize;

            if (int.TryParse(System.Configuration.ConfigurationManager.AppSettings["max-stacksize"], out maxStackSize))
            {
                _maxStackSize = maxStackSize;
            }

            // check which global dispatch queue implementation to use
            int dummy;

            switch (System.Configuration.ConfigurationManager.AppSettings["threadpool"])
            {
            default:
            case "elastic":
                ThreadPool.GetMinThreads(out dummy, out _minPorts);
                ThreadPool.GetMaxThreads(out dummy, out _maxPorts);
                _log.DebugFormat("Using ElasticThreadPool with {0}min / {1}max", _minThreads, _maxThreads);
                var elasticThreadPool = new ElasticThreadPool(_minThreads, _maxThreads);
                GlobalDispatchQueue       = elasticThreadPool;
                _inplaceActivation        = false;
                _availableThreadsCallback = delegate(out int threads, out int ports) {
                    int dummy2;
                    ThreadPool.GetAvailableThreads(out dummy2, out ports);
                    threads = elasticThreadPool.MaxParallelThreads - elasticThreadPool.ThreadCount;
                };
                break;

            case "legacy":
                ThreadPool.GetMinThreads(out dummy, out _minPorts);
                ThreadPool.GetMaxThreads(out dummy, out _maxPorts);
                ThreadPool.SetMinThreads(_minThreads, _minPorts);
                ThreadPool.SetMaxThreads(_maxThreads, _maxPorts);
                _log.Debug("Using LegacyThreadPool");
                GlobalDispatchQueue       = LegacyThreadPool.Instance;
                _availableThreadsCallback = ThreadPool.GetAvailableThreads;
                break;
            }
        }
Esempio n. 6
0
        public void ElasticThreadPool_Fibonacci_from_1_to_33_threads()
        {
            var throughputs = new TimeSpan[33];

            for (int i = 1; i < throughputs.Length; ++i)
            {
                using (var stp = new ElasticThreadPool(i, i)) {
                    int      value;
                    TimeSpan elapsed;
                    FibonacciThreadPool(stp, 30, TimeSpan.Zero, out value, out elapsed);
                    Assert.AreEqual(832040, value);
                    throughputs[i] = elapsed;
                }
            }
            _log.Debug("--- Results ---");
            for (int i = 1; i < throughputs.Length; ++i)
            {
                _log.DebugFormat("{0,2}: {1}", i, throughputs[i]);
            }
        }
Esempio n. 7
0
        public void ElasticThreadPool_Multi_Staged_Fibonacci_Min_1_Max_30()
        {
            const int test = 4;

            // initialize data structures
            ElasticThreadPool[] stp     = new ElasticThreadPool[test];
            Result <int>[]      results = new Result <int> [test];
            for (int i = 0; i < test; ++i)
            {
                stp[i]     = new ElasticThreadPool(1, 30);
                results[i] = new Result <int>(TimeSpan.MaxValue, TaskEnv.New(stp[i]));
            }

            // start test
            var sw = Stopwatch.StartNew();

            for (int i = 0; i < results.Length; ++i)
            {
                _log.DebugFormat("--- FIBONACCI KICK-OFF: {0}", i);
                Fibonacci(stp[i], 30, TimeSpan.Zero, results[i]);
                Thread.Sleep(TimeSpan.FromSeconds(1));
            }
            results.Join(new Result(TimeSpan.MaxValue)).Wait();
            sw.Stop();
            TimeSpan elapsed = sw.Elapsed;

            // check results
            for (int i = 0; i < test; ++i)
            {
                Assert.AreEqual(832040, results[i].Value, "result {0} did not match", i);
            }
            _log.Debug("Time: " + elapsed);
            _log.Debug("Work items processed: " + _counter);
            for (int i = 0; i < test; ++i)
            {
                stp[i].Dispose();
                Assert.AreEqual(0, stp[i].WorkItemCount, "WorkQueue[{0}] items", i);
                Assert.AreEqual(0, stp[i].ThreadCount, "WorkQueue[{0}] threads", i);
            }
        }
Esempio n. 8
0
        private IssueData[] RetrieveIssueData(string username, string password, string projectId, string filterId, int pageNumber, int numberPerPage)
        {
            using (ElasticThreadPool pool = new ElasticThreadPool(0, 2)) {
                List <IssueData>             result  = new List <IssueData>();
                List <Result <IssueData[]> > results = new List <Result <IssueData[]> >();
                Tuplet <bool> canceled = new Tuplet <bool>(false);
                for (int issuesRemaining = numberPerPage; issuesRemaining > 0; issuesRemaining -= MAX_ISSUES_IN_REQUEST, ++pageNumber)
                {
                    int issuesInBatch = Math.Min(issuesRemaining, MAX_ISSUES_IN_REQUEST);
                    results.Add(ProcessIssueBatch(pool, projectId, filterId, pageNumber, issuesInBatch, canceled, new Result <IssueData[]>(TimeSpan.FromSeconds(30))));
                }
                Dictionary <string, IssueData> tempHash = new Dictionary <string, IssueData>();
                foreach (Result <IssueData[]> r in results)
                {
                    IssueData[] batch = r.Wait();

                    //HACK: Workaround for Mantis's broken paging: Asking for a batch at a page number that doesnt exist
                    // will return the first page's results.
                    // This takes care of the case when the #of tickets is evenly divisible by the batch size. (i.e 100 tix, 20/page)
                    foreach (IssueData bug in batch)
                    {
                        if (!tempHash.ContainsKey(bug.id))
                        {
                            tempHash[bug.id] = bug;
                            result.Add(bug);
                        }
                    }
                    if (batch.Length < MAX_ISSUES_IN_REQUEST)
                    {
                        //the current batch did not fill up, don't go to the next batch
                        canceled.Item1 = true;
                        break;
                    }
                }
                return(result.ToArray());
            }
        }