/// <summary> /// This method splits a point list into severall smaller lists and perform bulk calls on each list /// In parallel. /// </summary> private void GetRecordedValuesBulkParrallel(DataQuery query, AFTimeRange timeRange, int bulkPageSize, int maxDegOfParallel, int bulkParallelChunkSize, CancellationToken cancelToken) { _logger.WarnFormat("QUERY (BULK-P) # {5} - TAGS: {6} - PERIOD: {3} to {4} - MAX DEG. PAR. {0}, TAG_CHUNK_SIZE {1}, TAG_PAGE_SIZE {2},", maxDegOfParallel, bulkParallelChunkSize, bulkPageSize, timeRange.StartTime,timeRange.EndTime, query.QueryId, query.PiPoints.Count); // PARALLEL bulk var pointListList = query.PiPoints.ToList().ChunkBy(bulkParallelChunkSize); Parallel.ForEach(pointListList, new ParallelOptions { MaxDegreeOfParallelism = maxDegOfParallel,CancellationToken = cancelToken }, (pts,state,index) => { var stats=new StatisticsInfo(); stats.Stopwatch.Start(); PIPagingConfiguration pagingConfiguration = new PIPagingConfiguration(PIPageType.TagCount, bulkPageSize); PIPointList pointList = new PIPointList(pts); try { // _logger.InfoFormat("Bulk query"); IEnumerable<AFValues> bulkData = pointList.RecordedValues(timeRange, AFBoundaryType.Inside, String.Empty, false, pagingConfiguration).ToList(); if (_enableWrite) { var writeInfo=new WriteInfo() { Data = bulkData, StartTime = timeRange.StartTime, EndTime = timeRange.EndTime, ChunkId = query.ChunkId, SubChunkId= index }; _dataWriter.DataQueue.Add(writeInfo, cancelToken); } stats.EventsCount = bulkData.Sum(s=>s.Count); stats.Stopwatch.Stop(); stats.EventsInWritingQueue = _dataWriter.DataQueue.Count; Statistics.StatisticsQueue.Add(stats, cancelToken); } catch (OperationCanceledException ex) { _logger.Error(pagingConfiguration.Error); } catch (Exception ex) { _logger.Error(ex); } }); }
protected override void DoTask(CancellationToken cancelToken) { _logger.Info("Orchestrator started and ready to receive tags to send data queries to the DataReader"); // process the first intervall foreach (var dataQuery in IncomingPiPoints.GetConsumingEnumerable(cancelToken)) { dataQuery.StartTime = _datesIntervals[0]; dataQuery.EndTime = _datesIntervals[1]; dataQuery.QueryId = _queryId++; dataQuery.ChunkId = 1; // keep the taglist for the next time period query PointsToRead.Enqueue(dataQuery); _dataReader.GetQueriesQueue().Add(dataQuery, cancelToken); } _logger.Info("Orchestrator completed initial queries for all tags. Will continue for all remaining intervals."); // GetConsumingEnumarable() will resume and release the wait in the loop when all tags will be loaded. // once all the tags are loaded we can continue again with the other time periods // for each time period, triggers the read for all the tags for (var i = 0; i < _datesIntervals.Count - 1; i++) { // _logger.DebugFormat("Times:{0:G} - {1:G}", _datesIntervals[i].ToLocalTime(), _datesIntervals[i + 1].AddSeconds(-1).ToLocalTime()); if (cancelToken.IsCancellationRequested) break; foreach (var dataQuery in PointsToRead) { var newQuery = new DataQuery() { StartTime = _datesIntervals[i], EndTime = _datesIntervals[i + 1].AddSeconds(-1), // we remove one second to avoid getting duplicate values at this same time each time QueryId = _queryId++, PiPoints = dataQuery.PiPoints, ChunkId = i }; _dataReader.GetQueriesQueue().Add(newQuery, cancelToken); if (cancelToken.IsCancellationRequested) break; } } // we are done and no more data query will be added _dataReader.GetQueriesQueue().CompleteAdding(); _logger.Info("Orchestrator has completed its task. All queries were sent."); }
private void SendPointsForProcessing(CancellationToken cancelToken, List<PIPoint> pipoints) { // creates the query object witht the tag chunk to start processing the data var dataQuery = new DataQuery(); dataQuery.PiPoints.AddRange(pipoints); pipoints.Clear(); // enqueue the query to be processed _orchestrator.IncomingPiPoints.Add(dataQuery, cancelToken); _logger.InfoFormat("TagsLoader loaded {0} for data collection. Total {1} tags loaded.", _tagChunkSize, _tagCount); }