Exemple #1
0
        public void ElasticThreadPool_Fibonacci_Min_0_Max_1()
        {
            var stp = new ElasticThreadPool(0, 1);
            int value;
            TimeSpan elapsed;

            FibonacciThreadPool(stp, 30, TimeSpan.Zero, out value, out elapsed);
            Assert.AreEqual(832040, value);

            _log.Debug("Time: " + elapsed);
            _log.Debug("Work items processed: " + _counter);
            stp.Dispose();
            Assert.AreEqual(0, stp.WorkItemCount, "WorkQueue items");
            Assert.AreEqual(0, stp.ThreadCount, "WorkQueue threads");
        }
Exemple #2
0
 public void ElasticThreadPool_Fibonacci_from_1_to_33_threads()
 {
     var throughputs = new TimeSpan[33];
     for(int i = 1; i < throughputs.Length; ++i) {
         using(var stp = new ElasticThreadPool(i, i)) {
             int value;
             TimeSpan elapsed;
             FibonacciThreadPool(stp, 30, TimeSpan.Zero, out value, out elapsed);
             Assert.AreEqual(832040, value);
             throughputs[i] = elapsed;
         }
     }
     _log.Debug("--- Results ---");
     for(int i = 1; i < throughputs.Length; ++i) {
         _log.DebugFormat("{0,2}: {1}", i, throughputs[i]);
     }
 }
        public void New_TryEnqueueItem_x50_TryEnqueueConsumer_x50_ItemCount_ConsumerCount()
        {
            var q = new LockFreeItemConsumerQueue<int>();
            var etp = new ElasticThreadPool(10, 10);

            // submit enqueue & dequeue work-items
            const int max = 10000;
            int count = max + 1;
            var e = new ManualResetEvent(false);
            int[] checks = new int[max];
            for(int i = 0; i < max; ++i) {
                int j = i;
                etp.TryQueueWorkItem(() => {
                    int k = Interlocked.Increment(ref checks[j]);
                    Assert.AreEqual(1, k, "value for {0} was already increased", j);
                    q.TryEnqueue(j);
                });
                etp.TryQueueWorkItem(() => {
                    q.TryEnqueue(x => {
                        int k = Interlocked.Decrement(ref checks[x]);
                        Assert.AreEqual(0, k, "value for {0} was already decreased", x);
            // ReSharper disable AccessToModifiedClosure
                        if(Interlocked.Decrement(ref count) == 0) {
            // ReSharper restore AccessToModifiedClosure
                            e.Set();
                        }
                    });
                });
            }
            if(Interlocked.Decrement(ref count) == 0) {
                e.Set();
            }
            if(!e.WaitOne(TimeSpan.FromSeconds(10))) {
                Assert.Fail("test timed out");
            }
            for(int i = 0; i < max; ++i) {
                Assert.AreEqual(0, checks[i], "entry {0}", i);
            }
            Assert.AreEqual(0, q.ItemCount);
            Assert.IsTrue(q.ItemIsEmpty);
            Assert.AreEqual(0, q.ConsumerCount);
            Assert.IsTrue(q.ConsumerIsEmpty);
        }
Exemple #4
0
        public void ElasticThreadPool_Multi_Staged_Fibonacci_Min_1_Max_30()
        {
            const int test = 4;

            // initialize data structures
            ElasticThreadPool[] stp = new ElasticThreadPool[test];
            Result<int>[] results = new Result<int>[test];
            for(int i = 0; i < test; ++i) {
                stp[i] = new ElasticThreadPool(1, 30);
                results[i] = new Result<int>(TimeSpan.MaxValue, TaskEnv.New(stp[i]));
            }

            // start test
            var sw = Stopwatch.StartNew();
            for(int i = 0; i < results.Length; ++i) {
                _log.DebugFormat("--- FIBONACCI KICK-OFF: {0}", i);
                Fibonacci(stp[i], 30, TimeSpan.Zero, results[i]);
                Thread.Sleep(TimeSpan.FromSeconds(1));
            }
            results.Join(new Result(TimeSpan.MaxValue)).Wait();
            sw.Stop();
            TimeSpan elapsed = sw.Elapsed;

            // check results
            for(int i = 0; i < test; ++i) {
                Assert.AreEqual(832040, results[i].Value, "result {0} did not match", i);
            }
            _log.Debug("Time: " + elapsed);
            _log.Debug("Work items processed: " + _counter);
            for(int i = 0; i < test; ++i) {
                stp[i].Dispose();
                Assert.AreEqual(0, stp[i].WorkItemCount, "WorkQueue[{0}] items", i);
                Assert.AreEqual(0, stp[i].ThreadCount, "WorkQueue[{0}] threads", i);
            }
        }
Exemple #5
0
        public void ElasticThreadPool_Fibonacci_Min_0_Max_100_with_1ms_delay()
        {
            var stp = new ElasticThreadPool(0, 100);
            int value;
            TimeSpan elapsed;

            FibonacciThreadPool(stp, 25, TimeSpan.FromSeconds(0.001), out value, out elapsed);
            Assert.AreEqual(75025, value);

            _log.Debug("Time: " + elapsed);
            _log.Debug("Work items processed: " + _counter);
            stp.Dispose();
            Assert.AreEqual(0, stp.WorkItemCount, "WorkQueue items");
            Assert.AreEqual(0, stp.ThreadCount, "WorkQueue threads");
        }
        private Result<IssueData[]> ProcessIssueBatch(ElasticThreadPool pool,  string projectId, string filterId, int pageNumber, int issuesInBatch, Tuplet<bool> canceled, Result<IssueData[]> result) {
            pool.QueueWorkItem(HandlerUtil.WithEnv(delegate {

                // TODO (steveb): use result.IsCanceled instead of shared tuple once cancellation is supported on the result object

                // check if request has been canceled
                if(!canceled.Item1) {
                    IssueData[] issuesForBatch;
                    if(!string.IsNullOrEmpty(filterId)) {
                        issuesForBatch = _service.mc_filter_get_issues(_username, _password, projectId, filterId, pageNumber.ToString(), issuesInBatch.ToString());
                    } else {
                        issuesForBatch = _service.mc_project_get_issues(_username, _password, projectId, pageNumber.ToString(), issuesInBatch.ToString());
                    }
                    result.Return(issuesForBatch);
                } else {
                	
                	// TODO (steveb): throw a more specific exception
                    result.Throw(new Exception("unspecified error"));
                }
            },TaskEnv.Clone()));
            return result;
        } 
        private IssueData[] RetrieveIssueData(string username, string password, string projectId, string filterId, int pageNumber, int numberPerPage) {
            using(ElasticThreadPool pool = new ElasticThreadPool(0, 2)) {
                List<IssueData> result = new List<IssueData>();
                List<Result<IssueData[]>> results = new List<Result<IssueData[]>>();
                Tuplet<bool> canceled = new Tuplet<bool>(false);
                for(int issuesRemaining = numberPerPage; issuesRemaining > 0; issuesRemaining -= MAX_ISSUES_IN_REQUEST, ++pageNumber) {
                    int issuesInBatch = Math.Min(issuesRemaining, MAX_ISSUES_IN_REQUEST);
                    results.Add(ProcessIssueBatch(pool, projectId, filterId, pageNumber, issuesInBatch, canceled, new Result<IssueData[]>(TimeSpan.FromSeconds(30))));
                }
                Dictionary<string, IssueData> tempHash = new Dictionary<string, IssueData>();
                foreach(Result<IssueData[]> r in results) {
                    IssueData[] batch = r.Wait();

                    //HACK: Workaround for Mantis's broken paging: Asking for a batch at a page number that doesnt exist
                    // will return the first page's results.
                    // This takes care of the case when the #of tickets is evenly divisible by the batch size. (i.e 100 tix, 20/page)
                    foreach(IssueData bug in batch) {
                        if(!tempHash.ContainsKey(bug.id)) {
                            tempHash[bug.id] = bug;
                            result.Add(bug);
                        }
                    }
                    if(batch.Length < MAX_ISSUES_IN_REQUEST) {

                        //the current batch did not fill up, don't go to the next batch
                        canceled.Item1 = true;
                        break;
                    }
                }
                return result.ToArray();
            }
        }