public void PassesTicksStraightThrough() { var enumerator = new EnqueueableEnumerator<Tick>(); // add some ticks var currentTime = new DateTime(2015, 10, 08); // returns true even if no data present until stop is called Assert.IsTrue(enumerator.MoveNext()); Assert.IsNull(enumerator.Current); var tick1 = new Tick(currentTime, Symbols.SPY, 199.55m, 199, 200) {Quantity = 10}; enumerator.Enqueue(tick1); Assert.IsTrue(enumerator.MoveNext()); Assert.AreEqual(tick1, enumerator.Current); Assert.IsTrue(enumerator.MoveNext()); Assert.IsNull(enumerator.Current); var tick2 = new Tick(currentTime, Symbols.SPY, 199.56m, 199.21m, 200.02m) {Quantity = 5}; enumerator.Enqueue(tick2); Assert.IsTrue(enumerator.MoveNext()); Assert.AreEqual(tick2, enumerator.Current); enumerator.Stop(); Assert.IsFalse(enumerator.MoveNext()); Assert.IsNull(enumerator.Current); }
/// <summary> /// Setups a new <see cref="Subscription"/> which will consume a blocking <see cref="EnqueueableEnumerator{T}"/> /// that will be feed by a worker task /// </summary> /// <param name="request">The subscription data request</param> /// <param name="enumerator">The data enumerator stack</param> /// <returns>A new subscription instance ready to consume</returns> public static Subscription CreateAndScheduleWorker( SubscriptionRequest request, IEnumerator <BaseData> enumerator) { var exchangeHours = request.Security.Exchange.Hours; var enqueueable = new EnqueueableEnumerator <SubscriptionData>(true); var timeZoneOffsetProvider = new TimeZoneOffsetProvider(request.Security.Exchange.TimeZone, request.StartTimeUtc, request.EndTimeUtc); var subscription = new Subscription(request, enqueueable, timeZoneOffsetProvider); Func <int, bool> produce = (workBatchSize) => { try { var count = 0; while (enumerator.MoveNext()) { // subscription has been removed, no need to continue enumerating if (enqueueable.HasFinished) { enumerator.DisposeSafely(); return(false); } var subscriptionData = SubscriptionData.Create(subscription.Configuration, exchangeHours, subscription.OffsetProvider, enumerator.Current); // drop the data into the back of the enqueueable enqueueable.Enqueue(subscriptionData); count++; // stop executing if added more data than the work batch size, we don't want to fill the ram if (count > workBatchSize) { return(true); } } } catch (Exception exception) { Log.Error(exception, $"Subscription worker task exception {request.Configuration}."); } // we made it here because MoveNext returned false or we exploded, stop the enqueueable enqueueable.Stop(); // we have to dispose of the enumerator enumerator.DisposeSafely(); return(false); }; WeightedWorkScheduler.Instance.QueueWork(produce, // if the subscription finished we return 0, so the work is prioritized and gets removed () => enqueueable.HasFinished ? 0 : enqueueable.Count); return(subscription); }
private void ScheduleEnumerator(Subscription subscription, IEnumerator <BaseData> enumerator, EnqueueableEnumerator <SubscriptionData> enqueueable, int lowerThreshold, int upperThreshold, int firstLoopCount = 5) { // schedule the work on the controller var security = subscription.Security; var configuration = subscription.Configuration; var firstLoop = true; FuncParallelRunnerWorkItem workItem = null; workItem = new FuncParallelRunnerWorkItem(() => enqueueable.Count < lowerThreshold, () => { var count = 0; while (enumerator.MoveNext()) { // subscription has been removed, no need to continue enumerating if (enqueueable.HasFinished) { enumerator.Dispose(); return; } var subscriptionData = SubscriptionData.Create(configuration, security.Exchange.Hours, subscription.OffsetProvider, enumerator.Current); // drop the data into the back of the enqueueable enqueueable.Enqueue(subscriptionData); count++; // special behavior for first loop to spool up quickly if (firstLoop && count > firstLoopCount) { // there's more data in the enumerator, reschedule to run again firstLoop = false; _controller.Schedule(workItem); return; } // stop executing if we've dequeued more than the lower threshold or have // more total that upper threshold in the enqueueable's queue if (count > lowerThreshold || enqueueable.Count > upperThreshold) { // there's more data in the enumerator, reschedule to run again _controller.Schedule(workItem); return; } } // we made it here because MoveNext returned false, stop the enqueueable and don't reschedule enqueueable.Stop(); }); _controller.Schedule(workItem); }
/// <summary> /// Setups a new <see cref="Subscription"/> which will consume a blocking <see cref="EnqueueableEnumerator{T}"/> /// that will be feed by a worker task /// </summary> /// <param name="request">The subscription data request</param> /// <param name="enumerator">The data enumerator stack</param> /// <param name="lowerThreshold">The lower threshold for the worker task, for which the consumer will trigger the worker /// if it has stopped <see cref="EnqueueableEnumerator{T}.TriggerProducer"/></param> /// <param name="upperThreshold">The upper threshold for the worker task, after which it will stop producing until requested /// by the consumer <see cref="EnqueueableEnumerator{T}.TriggerProducer"/></param> /// <returns>A new subscription instance ready to consume</returns> public static Subscription CreateAndScheduleWorker( SubscriptionRequest request, IEnumerator <BaseData> enumerator, int lowerThreshold, int upperThreshold) { var exchangeHours = request.Security.Exchange.Hours; var enqueueable = new EnqueueableEnumerator <SubscriptionData>(true); var timeZoneOffsetProvider = new TimeZoneOffsetProvider(request.Security.Exchange.TimeZone, request.StartTimeUtc, request.EndTimeUtc); var subscription = new Subscription(request, enqueueable, timeZoneOffsetProvider); Action produce = () => { var count = 0; while (enumerator.MoveNext()) { // subscription has been removed, no need to continue enumerating if (enqueueable.HasFinished) { enumerator.Dispose(); return; } var subscriptionData = SubscriptionData.Create(subscription.Configuration, exchangeHours, subscription.OffsetProvider, enumerator.Current); // drop the data into the back of the enqueueable enqueueable.Enqueue(subscriptionData); count++; // stop executing if we have more data than the upper threshold in the enqueueable, we don't want to fill the ram if (count > upperThreshold) { // we use local count for the outside if, for performance, and adjust here count = enqueueable.Count; if (count > upperThreshold) { // we will be re scheduled to run by the consumer, see EnqueueableEnumerator return; } } } // we made it here because MoveNext returned false, stop the enqueueable enqueueable.Stop(); // we have to dispose of the enumerator enumerator.Dispose(); }; enqueueable.SetProducer(produce, lowerThreshold); return(subscription); }
public void MoveNextBlocks() { var finished = new ManualResetEvent(false); var enumerator = new EnqueueableEnumerator <Tick>(true); // producer int count = 0; Task.Run(() => { while (!finished.WaitOne(TimeSpan.FromMilliseconds(50))) { enumerator.Enqueue(new Tick(DateTime.Now, Symbols.SPY, 100, 101)); count++; // 5 data points is plenty if (count > 5) { finished.Set(); enumerator.Stop(); } } }); // consumer int dequeuedCount = 0; bool encounteredError = false; var consumerTaskFinished = new ManualResetEvent(false); Task.Run(() => { while (enumerator.MoveNext()) { dequeuedCount++; if (enumerator.Current == null) { encounteredError = true; } } consumerTaskFinished.Set(); }); finished.WaitOne(Timeout.Infinite); consumerTaskFinished.WaitOne(Timeout.Infinite); Assert.IsFalse(enumerator.MoveNext()); Assert.IsFalse(encounteredError); Assert.AreEqual(count, dequeuedCount); enumerator.Dispose(); }
private void ScheduleEnumerator(IEnumerator <BaseData> enumerator, EnqueueableEnumerator <BaseData> enqueueable, int lowerThreshold, int upperThreshold, int firstLoopCount = 5) { // schedule the work on the controller var firstLoop = true; FuncParallelRunnerWorkItem workItem = null; workItem = new FuncParallelRunnerWorkItem(() => enqueueable.Count < lowerThreshold, () => { var count = 0; while (enumerator.MoveNext()) { // drop the data into the back of the enqueueable enqueueable.Enqueue(enumerator.Current); count++; // special behavior for first loop to spool up quickly if (firstLoop && count > firstLoopCount) { // there's more data in the enumerator, reschedule to run again firstLoop = false; _controller.Schedule(workItem); return; } // stop executing if we've dequeued more than the lower threshold or have // more total that upper threshold in the enqueueable's queue if (count > lowerThreshold || enqueueable.Count > upperThreshold) { // there's more data in the enumerator, reschedule to run again _controller.Schedule(workItem); return; } } // we made it here because MoveNext returned false, stop the enqueueable and don't reschedule enqueueable.Stop(); }); _controller.Schedule(workItem); }
private void ScheduleEnumerator(Subscription subscription, IEnumerator <BaseData> enumerator, EnqueueableEnumerator <SubscriptionData> enqueueable, int lowerThreshold, int upperThreshold, SecurityExchangeHours exchangeHours, int firstLoopCount = 5) { Action produce = () => { var count = 0; while (enumerator.MoveNext()) { // subscription has been removed, no need to continue enumerating if (enqueueable.HasFinished) { enumerator.Dispose(); return; } var subscriptionData = SubscriptionData.Create(subscription.Configuration, exchangeHours, subscription.OffsetProvider, enumerator.Current); // drop the data into the back of the enqueueable enqueueable.Enqueue(subscriptionData); count++; // stop executing if we have more data than the upper threshold in the enqueueable if (count > upperThreshold) { // we use local count for the outside if, for performance, and adjust here count = enqueueable.Count; if (count > upperThreshold) { return; } } } // we made it here because MoveNext returned false, stop the enqueueable enqueueable.Stop(); }; enqueueable.SetProducer(produce, lowerThreshold); }
/// <summary> /// Calls stop on the internal enqueueable enumerator /// </summary> public override void OnEnumeratorFinished() { _enqueueable.Stop(); }
/// <summary> /// Setups a new <see cref="Subscription"/> which will consume a blocking <see cref="EnqueueableEnumerator{T}"/> /// that will be feed by a worker task /// </summary> /// <param name="request">The subscription data request</param> /// <param name="enumerator">The data enumerator stack</param> /// <param name="firstLoopLimit">The first loop data point count for which the worker will stop</param> /// <returns>A new subscription instance ready to consume</returns> public static Subscription CreateAndScheduleWorker( SubscriptionRequest request, IEnumerator <BaseData> enumerator, int firstLoopLimit = 50) { var upperThreshold = GetUpperThreshold(request.Configuration.Resolution); var lowerThreshold = GetLowerThreshold(request.Configuration.Resolution); if (request.Configuration.Type == typeof(CoarseFundamental)) { // the lower threshold will be when we start the worker again, if he is stopped lowerThreshold = 200; // the upper threshold will stop the worker from loading more data. This is roughly 1 GB upperThreshold = 500; } var exchangeHours = request.Security.Exchange.Hours; var enqueueable = new EnqueueableEnumerator <SubscriptionData>(true); var timeZoneOffsetProvider = new TimeZoneOffsetProvider(request.Security.Exchange.TimeZone, request.StartTimeUtc, request.EndTimeUtc); var subscription = new Subscription(request, enqueueable, timeZoneOffsetProvider); // The first loop of a backtest can load hundreds of subscription feeds, resulting in long delays while the thresholds // for the buffer are reached. For the first loop start up with just 50 items in the buffer. var firstLoop = Ref.Create(true); Action produce = () => { try { var count = 0; while (enumerator.MoveNext()) { // subscription has been removed, no need to continue enumerating if (enqueueable.HasFinished) { enumerator.DisposeSafely(); return; } var subscriptionData = SubscriptionData.Create(subscription.Configuration, exchangeHours, subscription.OffsetProvider, enumerator.Current); // drop the data into the back of the enqueueable enqueueable.Enqueue(subscriptionData); count++; // stop executing if we have more data than the upper threshold in the enqueueable, we don't want to fill the ram if (count > upperThreshold || count > firstLoopLimit && firstLoop.Value) { // we use local count for the outside if, for performance, and adjust here count = enqueueable.Count; if (count > upperThreshold || firstLoop.Value) { firstLoop.Value = false; // we will be re scheduled to run by the consumer, see EnqueueableEnumerator // if the consumer is already waiting for us wake him up, he will rescheduled us if required enqueueable.CancellationTokenSource.Cancel(); return; } } } } catch (Exception exception) { Log.Error(exception, $"Subscription worker task exception {request.Configuration}."); } // we made it here because MoveNext returned false or we exploded, stop the enqueueable enqueueable.Stop(); // we have to dispose of the enumerator enumerator.DisposeSafely(); }; enqueueable.SetProducer(produce, lowerThreshold); return(subscription); }
private void ScheduleEnumerator(IEnumerator<BaseData> enumerator, EnqueueableEnumerator<BaseData> enqueueable, int lowerThreshold, int upperThreshold, int firstLoopCount = 5) { // schedule the work on the controller var firstLoop = true; FuncParallelRunnerWorkItem workItem = null; workItem = new FuncParallelRunnerWorkItem(() => enqueueable.Count < lowerThreshold, () => { var count = 0; while (enumerator.MoveNext()) { // drop the data into the back of the enqueueable enqueueable.Enqueue(enumerator.Current); count++; // special behavior for first loop to spool up quickly if (firstLoop && count > firstLoopCount) { // there's more data in the enumerator, reschedule to run again firstLoop = false; _controller.Schedule(workItem); return; } // stop executing if we've dequeued more than the lower threshold or have // more total that upper threshold in the enqueueable's queue if (count > lowerThreshold || enqueueable.Count > upperThreshold) { // there's more data in the enumerator, reschedule to run again _controller.Schedule(workItem); return; } } // we made it here because MoveNext returned false, stop the enqueueable and don't reschedule enqueueable.Stop(); }); _controller.Schedule(workItem); }
/// <summary> /// Setups a new <see cref="Subscription"/> which will consume a blocking <see cref="EnqueueableEnumerator{T}"/> /// that will be feed by a worker task /// </summary> /// <param name="request">The subscription data request</param> /// <param name="enumerator">The data enumerator stack</param> /// <param name="factorFileProvider">The factor file provider</param> /// <param name="enablePriceScale">Enables price factoring</param> /// <returns>A new subscription instance ready to consume</returns> public static Subscription CreateAndScheduleWorker( SubscriptionRequest request, IEnumerator <BaseData> enumerator, IFactorFileProvider factorFileProvider, bool enablePriceScale) { var factorFile = GetFactorFileToUse(request.Configuration, factorFileProvider); var exchangeHours = request.Security.Exchange.Hours; var enqueueable = new EnqueueableEnumerator <SubscriptionData>(true); var timeZoneOffsetProvider = new TimeZoneOffsetProvider(request.Security.Exchange.TimeZone, request.StartTimeUtc, request.EndTimeUtc); var subscription = new Subscription(request, enqueueable, timeZoneOffsetProvider); var config = subscription.Configuration; var lastTradableDate = DateTime.MinValue; decimal?currentScale = null; Func <int, bool> produce = (workBatchSize) => { try { var count = 0; while (enumerator.MoveNext()) { // subscription has been removed, no need to continue enumerating if (enqueueable.HasFinished) { enumerator.DisposeSafely(); return(false); } var data = enumerator.Current; var requestMode = config.DataNormalizationMode; var mode = requestMode != DataNormalizationMode.Raw ? requestMode : DataNormalizationMode.Adjusted; if (enablePriceScale && data?.Time.Date > lastTradableDate) { lastTradableDate = data.Time.Date; currentScale = GetScaleFactor(factorFile, mode, data.Time.Date); } SubscriptionData subscriptionData = SubscriptionData.Create( config, exchangeHours, subscription.OffsetProvider, data, mode, enablePriceScale ? currentScale : null); // drop the data into the back of the enqueueable enqueueable.Enqueue(subscriptionData); count++; // stop executing if added more data than the work batch size, we don't want to fill the ram if (count > workBatchSize) { return(true); } } } catch (Exception exception) { Log.Error(exception, $"Subscription worker task exception {request.Configuration}."); } // we made it here because MoveNext returned false or we exploded, stop the enqueueable enqueueable.Stop(); // we have to dispose of the enumerator enumerator.DisposeSafely(); return(false); }; WeightedWorkScheduler.Instance.QueueWork(produce, // if the subscription finished we return 0, so the work is prioritized and gets removed () => { if (enqueueable.HasFinished) { return(0); } var count = enqueueable.Count; return(count > WeightedWorkScheduler.MaxWorkWeight ? WeightedWorkScheduler.MaxWorkWeight : count); } ); return(subscription); }
/// <summary> /// Setups a new <see cref="Subscription"/> which will consume a blocking <see cref="EnqueueableEnumerator{T}"/> /// that will be feed by a worker task /// </summary> /// <param name="request">The subscription data request</param> /// <param name="enumerator">The data enumerator stack</param> /// <param name="factorFileProvider">The factor file provider</param> /// <param name="enablePriceScale">Enables price factoring</param> /// <returns>A new subscription instance ready to consume</returns> public static Subscription CreateAndScheduleWorker( SubscriptionRequest request, IEnumerator <BaseData> enumerator, IFactorFileProvider factorFileProvider, bool enablePriceScale) { var factorFile = GetFactorFileToUse(request.Configuration, factorFileProvider); var exchangeHours = request.Security.Exchange.Hours; var enqueueable = new EnqueueableEnumerator <SubscriptionData>(true); var timeZoneOffsetProvider = new TimeZoneOffsetProvider(request.Security.Exchange.TimeZone, request.StartTimeUtc, request.EndTimeUtc); var subscription = new Subscription(request, enqueueable, timeZoneOffsetProvider); var config = subscription.Configuration; var lastTradableDate = DateTime.MinValue; decimal?currentScale = null; Func <int, bool> produce = (workBatchSize) => { try { var count = 0; while (enumerator.MoveNext()) { // subscription has been removed, no need to continue enumerating if (enqueueable.HasFinished) { enumerator.DisposeSafely(); return(false); } var data = enumerator.Current; // Use our config filter to see if we should emit this // This currently catches Auxiliary data that we don't want to emit if (data != null && !config.ShouldEmitData(data)) { continue; } // In the event we have "Raw" configuration, we will force our subscription data // to precalculate adjusted data. The data will still be emitted as raw, but // if the config is changed at any point it can emit adjusted data as well // See SubscriptionData.Create() and PrecalculatedSubscriptionData for more var requestMode = config.DataNormalizationMode; var mode = requestMode != DataNormalizationMode.Raw ? requestMode : DataNormalizationMode.Adjusted; // We update our price scale factor when the date changes for non fill forward bars or if we haven't initialized yet. // We don't take into account auxiliary data because we don't scale it and because the underlying price data could be fill forwarded if (enablePriceScale && data?.Time.Date > lastTradableDate && data.DataType != MarketDataType.Auxiliary && (!data.IsFillForward || lastTradableDate == DateTime.MinValue)) { lastTradableDate = data.Time.Date; currentScale = GetScaleFactor(factorFile, mode, data.Time.Date); } SubscriptionData subscriptionData = SubscriptionData.Create( config, exchangeHours, subscription.OffsetProvider, data, mode, enablePriceScale ? currentScale : null); // drop the data into the back of the enqueueable enqueueable.Enqueue(subscriptionData); count++; // stop executing if added more data than the work batch size, we don't want to fill the ram if (count > workBatchSize) { return(true); } } } catch (Exception exception) { Log.Error(exception, $"Subscription worker task exception {request.Configuration}."); } // we made it here because MoveNext returned false or we exploded, stop the enqueueable enqueueable.Stop(); // we have to dispose of the enumerator enumerator.DisposeSafely(); return(false); }; WeightedWorkScheduler.Instance.QueueWork(config.Symbol, produce, // if the subscription finished we return 0, so the work is prioritized and gets removed () => { if (enqueueable.HasFinished) { return(0); } var count = enqueueable.Count; return(count > WeightedWorkScheduler.MaxWorkWeight ? WeightedWorkScheduler.MaxWorkWeight : count); } ); return(subscription); }
public void MoveNextBlocks() { var finished = new ManualResetEvent(false); var enumerator = new EnqueueableEnumerator<Tick>(true); // producer int count = 0; Task.Run(() => { while (!finished.WaitOne(TimeSpan.FromMilliseconds(50))) { enumerator.Enqueue(new Tick(DateTime.Now, Symbols.SPY, 100, 101)); count++; // 5 data points is plenty if (count > 5) { finished.Set(); enumerator.Stop(); } } }); // consumer int dequeuedCount = 0; bool encounteredError = false; var consumerTaskFinished = new ManualResetEvent(false); Task.Run(() => { while (enumerator.MoveNext()) { dequeuedCount++; if (enumerator.Current == null) { encounteredError = true; } } consumerTaskFinished.Set(); }); finished.WaitOne(Timeout.Infinite); consumerTaskFinished.WaitOne(Timeout.Infinite); Assert.IsFalse(enumerator.MoveNext()); Assert.IsFalse(encounteredError); Assert.AreEqual(count, dequeuedCount); }