/// <summary> /// Refills the bucket and works the backlog in the order the requests came in /// </summary> private void OnRefill(object o) { // add tokens to the bucket if (Add(ratePerSecond) < capacity) { MTurkLog.Debug("Refilled {0} tokens to throttle bucket (Current size: {1})", ratePerSecond, curTokenCount); } // work backlog in order lock (queue) { if (queue.Count > 0) { int num = Math.Min(queue.Count, curTokenCount); MTurkLog.Debug("Processing {0} throttled requests from backlog (Size: {1})", num, queue.Count); for (int i = 0; i < num; i++) { if (Add(-1) != null) { // signal waiting thread to resume sending FIFO queue[0].Set(); queue.RemoveAt(0); } } } } }
public override void Close() { // write buffer to ILog (to the end of the soap envelope) string s = sb.ToString().Trim(); int i = s.IndexOf(END_OF_ENVELOPE); MTurkLog.Debug(s.Substring(0, i + END_OF_ENVELOPE.Length)); sb = null; _sink.Close(); }
/// <summary> /// Starts a throttled request. If it can get a slice from the bucket, then it /// can run immediately. Otherwise enqueue it and notify it once a slice becomes available. /// </summary> public void StartRequest() { if (Add(-1) == null) { // No more tokens available: enqueue thread MTurkLog.Debug("Throttling request"); ManualResetEvent evt = new ManualResetEvent(false); lock (queue) { queue.Add(evt); } evt.WaitOne(); //Thread.CurrentThread.Suspend(); MTurkLog.Debug("Released throttle on request"); } }
/// <summary> /// Returns a throttler for a specific service endpoint URL /// </summary> /// <param name="serviceEndpoint">URL of the mechanical turk service endpoint</param> /// <param name="capacity">Number of requests the throttler permits all at once /// (bucket capacity)</param> /// <param name="rate">Number of requests the throttler allows per second /// (average long term)</param> /// <returns>A <see cref="IRequestThrottler"/> instance</returns> public static LeakyBucketRequestThrottler GetInstance(string serviceEndpoint, int capacity, int rate) { if (serviceEndpoint == null) { throw new ArgumentNullException("serviceEndpoint", "Endpoint URL may not be null"); } if (capacity <= 0) { throw new ArgumentException("Capacity must be bigger than zero", "capacity"); } if (rate > capacity) { throw new ArgumentException("Rate must be bigger than capacity", "rate"); } LeakyBucketRequestThrottler ret = null; string key = string.Format("{0}{1}{2}", serviceEndpoint, capacity, rate); if (instances.ContainsKey(key)) { ret = instances[key]; } else { lock (instances) { if (instances.ContainsKey(key)) { ret = instances[key]; } else { MTurkLog.Debug("Throttling requests to {0} (Capacity: {1}. Rate: {2}/sec)", serviceEndpoint, capacity, rate); ret = new LeakyBucketRequestThrottler(serviceEndpoint, capacity, rate); instances[key] = ret; } } } return(ret); }
private void Dispose(bool disposing) { if (disposing) { lock (instances) { string key = string.Format("{0}{1}{2}", endpoint, capacity, ratePerSecond); LeakyBucketRequestThrottler throttler = instances[key]; MTurkLog.Debug("Disposing {0}", throttler); if (timerRefill != null) { timerRefill.Dispose(); } instances.Remove(key); } } }