/// <summary> /// Assumed that all candles have the same AssetPair, PriceType, and Timeinterval /// </summary> public async Task InsertOrMergeAsync(IEnumerable <ICandle> candles, CandlePriceType priceType) { var partitionKey = CandleHistoryEntity.GeneratePartitionKey(priceType); // Despite of AzureTableStorage already split requests to chunks, // splits to the chunks here to reduse cost of operation timeout var candleByRowsChunks = candles .GroupBy(candle => CandleHistoryEntity.GenerateRowKey(candle.Timestamp, _timeInterval)) .Batch(100); foreach (var candleByRowsChunk in candleByRowsChunks) { // If we can't store the candles, we can't do anything else, so just retries until success await Policy .Handle <Exception>() .WaitAndRetryForeverAsync( retryAttempt => TimeSpan.FromSeconds(Math.Pow(2, retryAttempt)), (exception, timeSpan) => { var context = $"{_assetPairId}-{priceType}-{_timeInterval}"; return(_log.WriteErrorAsync("Persist candle rows chunk with retries", context, exception)); }) .ExecuteAsync(() => SaveCandlesBatchAsync(candleByRowsChunk, partitionKey)); } }
public async Task <int> DeleteCandlesAsync(IReadOnlyList <ICandle> candlesToDelete, CandlePriceType priceType) { if (candlesToDelete == null || !candlesToDelete.Any()) { throw new ArgumentException("Candles set should not be empty."); } var partitionKey = CandleHistoryEntity.GeneratePartitionKey(priceType); // Splitting to chunks, just like in InsertOrMergeAsync var candleByRowsChunks = candlesToDelete .GroupBy(candle => CandleHistoryEntity.GenerateRowKey(candle.Timestamp, _timeInterval)) .Batch(100); int deletedCandlesCount = 0; foreach (var candleByRowsChunk in candleByRowsChunks) { var candleByRows = candleByRowsChunk.ToDictionary(g => g.Key, g => g.AsEnumerable()); var existingEntities = (await _tableStorage.GetDataAsync(partitionKey, candleByRows.Keys)) .ToList(); if (!existingEntities.Any()) // Safety check { continue; } var emptyEntities = new List <CandleHistoryEntity>(); foreach (var entity in existingEntities) { deletedCandlesCount += entity.DeleteCandles(candleByRows[entity.RowKey]); // There may be a case when all of the entities' candles were deleted. We need also to delete such an entity itself. if (!entity.Candles.Any()) { emptyEntities.Add(entity); } } foreach (var entity in emptyEntities) { existingEntities.Remove(entity); } // No _healthService trackig here. Monitoring of candles deletion is performed on upper layers of logic. if (emptyEntities.Any()) { await _tableStorage.DeleteAsync(emptyEntities); } await _tableStorage.InsertOrReplaceBatchAsync(existingEntities); // For we do not have a ReplaceBatchAsync method in AzureTableStorage yet. } return(deletedCandlesCount); }
public async Task <ICandle> TryGetFirstCandleAsync(CandlePriceType priceType, CandleTimeInterval timeInterval) { var candleEntity = await _tableStorage.GetTopRecordAsync(CandleHistoryEntity.GeneratePartitionKey(priceType)); return(candleEntity ?.Candles .First() .ToCandle(_assetPairId, priceType, candleEntity.DateTime, timeInterval)); }
public async Task <int> ReplaceCandlesAsync(IEnumerable <ICandle> candlesToReplace, CandlePriceType priceType) { // ReSharper disable once PossibleMultipleEnumeration if (candlesToReplace == null || !candlesToReplace.Any()) { throw new ArgumentException("Candles set should not be empty."); } var partitionKey = CandleHistoryEntity.GeneratePartitionKey(priceType); // Splitting to chunks, just like in InsertOrMergeAsync // ReSharper disable once PossibleMultipleEnumeration var candleByRowsChunks = candlesToReplace .GroupBy(candle => CandleHistoryEntity.GenerateRowKey(candle.Timestamp, _timeInterval)) .Batch(100); int replacedCandlesCount = 0; foreach (var candleByRowsChunk in candleByRowsChunks) { var candleByRows = candleByRowsChunk.ToDictionary(g => g.Key, g => g.AsEnumerable()); var existingEntities = (await _tableStorage.GetDataAsync(partitionKey, candleByRows.Keys)) .ToList(); if (existingEntities.Count == 0) // Safety check { continue; } foreach (var entity in existingEntities) { replacedCandlesCount += entity.ReplaceCandles(candleByRows[entity.RowKey]); } // No _healthService trackig here. Monitoring of candles deletion is performed on upper layers of logic. await _tableStorage.InsertOrReplaceBatchAsync(existingEntities); // For we do not have a ReplaceBatchAsync method in AzureTableStorage yet. } return(replacedCandlesCount); }
private static TableQuery <CandleHistoryEntity> GetTableQuery( CandlePriceType priceType, CandleTimeInterval interval, DateTime from, DateTime to) { var partitionKey = CandleHistoryEntity.GeneratePartitionKey(priceType); var rowKeyFrom = CandleHistoryEntity.GenerateRowKey(from, interval); var rowKeyTo = CandleHistoryEntity.GenerateRowKey(to, interval); var pkeyFilter = TableQuery.GenerateFilterCondition("PartitionKey", QueryComparisons.Equal, partitionKey); var rowkeyFromFilter = TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.GreaterThanOrEqual, rowKeyFrom); var rowkeyToFilter = TableQuery.GenerateFilterCondition("RowKey", QueryComparisons.LessThanOrEqual, rowKeyTo); var rowkeyFilter = TableQuery.CombineFilters(rowkeyFromFilter, TableOperators.And, rowkeyToFilter); return(new TableQuery <CandleHistoryEntity> { FilterString = TableQuery.CombineFilters(pkeyFilter, TableOperators.And, rowkeyFilter) }); }