/// <summary> /// Setups a new <see cref="Subscription"/> which will consume a blocking <see cref="EnqueueableEnumerator{T}"/> /// that will be feed by a worker task /// </summary> /// <param name="request">The subscription data request</param> /// <param name="enumerator">The data enumerator stack</param> /// <returns>A new subscription instance ready to consume</returns> public static Subscription CreateAndScheduleWorker( SubscriptionRequest request, IEnumerator <BaseData> enumerator) { var exchangeHours = request.Security.Exchange.Hours; var enqueueable = new EnqueueableEnumerator <SubscriptionData>(true); var timeZoneOffsetProvider = new TimeZoneOffsetProvider(request.Security.Exchange.TimeZone, request.StartTimeUtc, request.EndTimeUtc); var subscription = new Subscription(request, enqueueable, timeZoneOffsetProvider); Func <int, bool> produce = (workBatchSize) => { try { var count = 0; while (enumerator.MoveNext()) { // subscription has been removed, no need to continue enumerating if (enqueueable.HasFinished) { enumerator.DisposeSafely(); return(false); } var subscriptionData = SubscriptionData.Create(subscription.Configuration, exchangeHours, subscription.OffsetProvider, enumerator.Current); // drop the data into the back of the enqueueable enqueueable.Enqueue(subscriptionData); count++; // stop executing if added more data than the work batch size, we don't want to fill the ram if (count > workBatchSize) { return(true); } } } catch (Exception exception) { Log.Error(exception, $"Subscription worker task exception {request.Configuration}."); } // we made it here because MoveNext returned false or we exploded, stop the enqueueable enqueueable.Stop(); // we have to dispose of the enumerator enumerator.DisposeSafely(); return(false); }; WeightedWorkScheduler.Instance.QueueWork(produce, // if the subscription finished we return 0, so the work is prioritized and gets removed () => enqueueable.HasFinished ? 0 : enqueueable.Count); return(subscription); }
/// <summary> /// Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources. /// </summary> public void Dispose() { foreach (var auxDataEnumerator in _auxDataEnumerators) { auxDataEnumerator.DisposeSafely(); } _tradeBarAggregator.DisposeSafely(); }
/// <summary> /// Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources. /// </summary> public void Dispose() { foreach (var enumerator in _enumerators) { enumerator.DisposeSafely(); } _concatEnumerator.DisposeSafely(); }
/// <summary> /// Advances the enumerator to the next element. /// </summary> public bool MoveNext() { if (_previousEnumerator != null) { // if previous is set we dispose of it here since we are the consumers of it _previousEnumerator.DisposeSafely(); _previousEnumerator = null; } var result = _underlyingEnumerator.MoveNext(); if (Current != null) { Current.Symbol = _requestedSymbol; } return(result); }
/// <summary> /// Advances the enumerator to the next element. /// </summary> public bool MoveNext() { if (_previousEnumerator != null) { // if previous is set we dispose of it here since we are the consumers of it _previousEnumerator.DisposeSafely(); _previousEnumerator = null; } var result = _underlyingEnumerator.MoveNext(); _current = _underlyingEnumerator.Current; if (_current != null && _current.Symbol != _requestedSymbol) { // if we've done some mapping at this layer let's clone the underlying and set the requested symbol, // don't trust the IDQH implementations for data uniqueness, since the configuration could be shared _current = _current.Clone(); _current.Symbol = _requestedSymbol; } return(result); }
/// <summary> /// Advances the enumerator to the next element of the collection. /// </summary> /// <returns> /// true if the enumerator was successfully advanced to the next element; false if the enumerator has passed the end of the collection. /// </returns> /// <exception cref="T:System.InvalidOperationException">The collection was modified after the enumerator was created. </exception><filterpriority>2</filterpriority> public bool MoveNext() { if (_enumerator == null) { _enumerator = _enumeratorFactory.Invoke(); } var moveNext = _enumerator.MoveNext(); if (moveNext) { _current = _enumerator.Current; } else { _enumerator.DisposeSafely(); _enumerator = null; _current = default(T); } return(true); }
public void TearDown() { _enumerator?.DisposeSafely(); }
/// <summary> /// Disposes of the used enumerators /// </summary> public void Dispose() { _previousEnumerator.DisposeSafely(); _underlyingEnumerator.DisposeSafely(); }
/// <summary> /// Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources. /// </summary> public void Dispose() { _auxDataEnumerator.DisposeSafely(); _tradeBarAggregator.DisposeSafely(); }
/// <summary> /// Setups a new <see cref="Subscription"/> which will consume a blocking <see cref="EnqueueableEnumerator{T}"/> /// that will be feed by a worker task /// </summary> /// <param name="request">The subscription data request</param> /// <param name="enumerator">The data enumerator stack</param> /// <param name="firstLoopLimit">The first loop data point count for which the worker will stop</param> /// <returns>A new subscription instance ready to consume</returns> public static Subscription CreateAndScheduleWorker( SubscriptionRequest request, IEnumerator <BaseData> enumerator, int firstLoopLimit = 50) { var upperThreshold = GetUpperThreshold(request.Configuration.Resolution); var lowerThreshold = GetLowerThreshold(request.Configuration.Resolution); if (request.Configuration.Type == typeof(CoarseFundamental)) { // the lower threshold will be when we start the worker again, if he is stopped lowerThreshold = 200; // the upper threshold will stop the worker from loading more data. This is roughly 1 GB upperThreshold = 500; } var exchangeHours = request.Security.Exchange.Hours; var enqueueable = new EnqueueableEnumerator <SubscriptionData>(true); var timeZoneOffsetProvider = new TimeZoneOffsetProvider(request.Security.Exchange.TimeZone, request.StartTimeUtc, request.EndTimeUtc); var subscription = new Subscription(request, enqueueable, timeZoneOffsetProvider); // The first loop of a backtest can load hundreds of subscription feeds, resulting in long delays while the thresholds // for the buffer are reached. For the first loop start up with just 50 items in the buffer. var firstLoop = Ref.Create(true); Action produce = () => { try { var count = 0; while (enumerator.MoveNext()) { // subscription has been removed, no need to continue enumerating if (enqueueable.HasFinished) { enumerator.DisposeSafely(); return; } var subscriptionData = SubscriptionData.Create(subscription.Configuration, exchangeHours, subscription.OffsetProvider, enumerator.Current); // drop the data into the back of the enqueueable enqueueable.Enqueue(subscriptionData); count++; // stop executing if we have more data than the upper threshold in the enqueueable, we don't want to fill the ram if (count > upperThreshold || count > firstLoopLimit && firstLoop.Value) { // we use local count for the outside if, for performance, and adjust here count = enqueueable.Count; if (count > upperThreshold || firstLoop.Value) { firstLoop.Value = false; // we will be re scheduled to run by the consumer, see EnqueueableEnumerator // if the consumer is already waiting for us wake him up, he will rescheduled us if required enqueueable.CancellationTokenSource.Cancel(); return; } } } } catch (Exception exception) { Log.Error(exception, $"Subscription worker task exception {request.Configuration}."); } // we made it here because MoveNext returned false or we exploded, stop the enqueueable enqueueable.Stop(); // we have to dispose of the enumerator enumerator.DisposeSafely(); }; enqueueable.SetProducer(produce, lowerThreshold); return(subscription); }
/// <summary> /// Clean up /// </summary> public void Dispose() { _dataQueueHandler.Unsubscribe(_dataConfig); _delistingEnumerator.DisposeSafely(); }
/// <summary> /// Setups a new <see cref="Subscription"/> which will consume a blocking <see cref="EnqueueableEnumerator{T}"/> /// that will be feed by a worker task /// </summary> /// <param name="request">The subscription data request</param> /// <param name="enumerator">The data enumerator stack</param> /// <param name="factorFileProvider">The factor file provider</param> /// <param name="enablePriceScale">Enables price factoring</param> /// <returns>A new subscription instance ready to consume</returns> public static Subscription CreateAndScheduleWorker( SubscriptionRequest request, IEnumerator <BaseData> enumerator, IFactorFileProvider factorFileProvider, bool enablePriceScale) { var factorFile = GetFactorFileToUse(request.Configuration, factorFileProvider); var exchangeHours = request.Security.Exchange.Hours; var enqueueable = new EnqueueableEnumerator <SubscriptionData>(true); var timeZoneOffsetProvider = new TimeZoneOffsetProvider(request.Security.Exchange.TimeZone, request.StartTimeUtc, request.EndTimeUtc); var subscription = new Subscription(request, enqueueable, timeZoneOffsetProvider); var config = subscription.Configuration; var lastTradableDate = DateTime.MinValue; decimal?currentScale = null; Func <int, bool> produce = (workBatchSize) => { try { var count = 0; while (enumerator.MoveNext()) { // subscription has been removed, no need to continue enumerating if (enqueueable.HasFinished) { enumerator.DisposeSafely(); return(false); } var data = enumerator.Current; var requestMode = config.DataNormalizationMode; var mode = requestMode != DataNormalizationMode.Raw ? requestMode : DataNormalizationMode.Adjusted; if (enablePriceScale && data?.Time.Date > lastTradableDate) { lastTradableDate = data.Time.Date; currentScale = GetScaleFactor(factorFile, mode, data.Time.Date); } SubscriptionData subscriptionData = SubscriptionData.Create( config, exchangeHours, subscription.OffsetProvider, data, mode, enablePriceScale ? currentScale : null); // drop the data into the back of the enqueueable enqueueable.Enqueue(subscriptionData); count++; // stop executing if added more data than the work batch size, we don't want to fill the ram if (count > workBatchSize) { return(true); } } } catch (Exception exception) { Log.Error(exception, $"Subscription worker task exception {request.Configuration}."); } // we made it here because MoveNext returned false or we exploded, stop the enqueueable enqueueable.Stop(); // we have to dispose of the enumerator enumerator.DisposeSafely(); return(false); }; WeightedWorkScheduler.Instance.QueueWork(produce, // if the subscription finished we return 0, so the work is prioritized and gets removed () => { if (enqueueable.HasFinished) { return(0); } var count = enqueueable.Count; return(count > WeightedWorkScheduler.MaxWorkWeight ? WeightedWorkScheduler.MaxWorkWeight : count); } ); return(subscription); }
/// <summary> /// Setups a new <see cref="Subscription"/> which will consume a blocking <see cref="EnqueueableEnumerator{T}"/> /// that will be feed by a worker task /// </summary> /// <param name="request">The subscription data request</param> /// <param name="enumerator">The data enumerator stack</param> /// <param name="factorFileProvider">The factor file provider</param> /// <param name="enablePriceScale">Enables price factoring</param> /// <returns>A new subscription instance ready to consume</returns> public static Subscription CreateAndScheduleWorker( SubscriptionRequest request, IEnumerator <BaseData> enumerator, IFactorFileProvider factorFileProvider, bool enablePriceScale) { var factorFile = GetFactorFileToUse(request.Configuration, factorFileProvider); var exchangeHours = request.Security.Exchange.Hours; var enqueueable = new EnqueueableEnumerator <SubscriptionData>(true); var timeZoneOffsetProvider = new TimeZoneOffsetProvider(request.Security.Exchange.TimeZone, request.StartTimeUtc, request.EndTimeUtc); var subscription = new Subscription(request, enqueueable, timeZoneOffsetProvider); var config = subscription.Configuration; var lastTradableDate = DateTime.MinValue; decimal?currentScale = null; Func <int, bool> produce = (workBatchSize) => { try { var count = 0; while (enumerator.MoveNext()) { // subscription has been removed, no need to continue enumerating if (enqueueable.HasFinished) { enumerator.DisposeSafely(); return(false); } var data = enumerator.Current; // Use our config filter to see if we should emit this // This currently catches Auxiliary data that we don't want to emit if (data != null && !config.ShouldEmitData(data)) { continue; } // In the event we have "Raw" configuration, we will force our subscription data // to precalculate adjusted data. The data will still be emitted as raw, but // if the config is changed at any point it can emit adjusted data as well // See SubscriptionData.Create() and PrecalculatedSubscriptionData for more var requestMode = config.DataNormalizationMode; var mode = requestMode != DataNormalizationMode.Raw ? requestMode : DataNormalizationMode.Adjusted; // We update our price scale factor when the date changes for non fill forward bars or if we haven't initialized yet. // We don't take into account auxiliary data because we don't scale it and because the underlying price data could be fill forwarded if (enablePriceScale && data?.Time.Date > lastTradableDate && data.DataType != MarketDataType.Auxiliary && (!data.IsFillForward || lastTradableDate == DateTime.MinValue)) { lastTradableDate = data.Time.Date; currentScale = GetScaleFactor(factorFile, mode, data.Time.Date); } SubscriptionData subscriptionData = SubscriptionData.Create( config, exchangeHours, subscription.OffsetProvider, data, mode, enablePriceScale ? currentScale : null); // drop the data into the back of the enqueueable enqueueable.Enqueue(subscriptionData); count++; // stop executing if added more data than the work batch size, we don't want to fill the ram if (count > workBatchSize) { return(true); } } } catch (Exception exception) { Log.Error(exception, $"Subscription worker task exception {request.Configuration}."); } // we made it here because MoveNext returned false or we exploded, stop the enqueueable enqueueable.Stop(); // we have to dispose of the enumerator enumerator.DisposeSafely(); return(false); }; WeightedWorkScheduler.Instance.QueueWork(config.Symbol, produce, // if the subscription finished we return 0, so the work is prioritized and gets removed () => { if (enqueueable.HasFinished) { return(0); } var count = enqueueable.Count; return(count > WeightedWorkScheduler.MaxWorkWeight ? WeightedWorkScheduler.MaxWorkWeight : count); } ); return(subscription); }