public void Merge(SampleCombiner combiner) { if (combiner == null) { throw new ArgumentNullException("combiner"); } if (combiner.internalSample.SampleType != this.internalSample.SampleType) { throw new InvalidDataException(string.Format( "Sample Type: {0} does not match combined sample type: {1}", combiner.internalSample.SampleType, this.internalSample.SampleType)); } this.AddSample(combiner.internalSample); this.internalSample.MachineCount = Math.Max(this.internalSample.MachineCount, combiner.internalSample.MachineCount); //merge the time ranges as well this.hasUpdatedTimeRange = true; this.timeRange = TimeRange.Merge(this.timeRange, combiner.timeRange); }
public static SampleCombiner Create(IEnumerable <DataSample> samples) { if (samples == null) { throw new ArgumentNullException("samples"); } SampleCombiner combiner = null; foreach (var sample in samples) { if (combiner == null) { combiner = new SampleCombiner(sample); } else { combiner.AddSample(sample); } } return(combiner); }
/// <summary> /// Add all samples from this query response. Combine overlapping time buckets as they are encountered. /// This method IS threadsafe against itself. /// </summary> /// <param name="response"></param> public void AddMachineResponse(CounterQueryResponse response) { if (response == null) { throw new ArgumentNullException("response"); } if (response.RequestDetails != null) { lock (this.requestDetails) { this.requestDetails.AddRange(response.RequestDetails); } } if (response.Samples == null) { return; } foreach (var sample in response.Samples) { var baseSample = sample; var hashKey = this.dimensionSet.CreateKey(sample.Dimensions); var sampleTimeRange = CreateTimeRange(baseSample); var rangesToRemove = new List<TimeRange>(); SampleCombiner combiner = null; SortedList<TimeRange, SampleCombiner> aggregatedBuckets; // grab the appropriate bucket list lock (this.dataDictionary) { if (!this.dataDictionary.TryGetValue(hashKey, out aggregatedBuckets)) { aggregatedBuckets = new SortedList<TimeRange, SampleCombiner>(); this.dataDictionary.Add(hashKey, aggregatedBuckets); } } lock (aggregatedBuckets) { // The buckets are ordered by start time - thus it is safe to merge and continue to // walk forward as we cannot ever merge which requires a backwards reprocess foreach (var bucket in aggregatedBuckets) { var existingRange = bucket.Key; // did we get past the end of the range we are interested in? if (existingRange.Start > sampleTimeRange.End) { break; } if (existingRange.IntersectsWith(sampleTimeRange)) { sampleTimeRange = TimeRange.Merge(sampleTimeRange, existingRange); rangesToRemove.Add(bucket.Key); // if this is the first merge, just add this sample if (combiner == null) { combiner = bucket.Value; combiner.AddSample(sample); combiner.MachineCount += SampleCombiner.ExtractMachineCount(sample); } else { // this is a N-merge (N > 1), thus sample is already accounted for in the combiner. Merge the values combiner.Merge(bucket.Value); } } } // if there was no merge, then create a new bucket with this sample if (combiner == null) { combiner = new SampleCombiner(sample) { MachineCount = SampleCombiner.ExtractMachineCount(sample) }; } // remove the merged items and add the new item foreach (var range in rangesToRemove) { aggregatedBuckets.Remove(range); } aggregatedBuckets.Add(sampleTimeRange, combiner); } } }
public static SampleCombiner Create(IEnumerable<DataSample> samples) { if (samples == null) { throw new ArgumentNullException("samples"); } SampleCombiner combiner = null; foreach (var sample in samples) { if (combiner == null) { combiner = new SampleCombiner(sample); } else { combiner.AddSample(sample); } } return combiner; }
/// <summary> /// Add all samples from this query response. Combine overlapping time buckets as they are encountered. /// This method IS threadsafe against itself. /// </summary> /// <param name="response"></param> public void AddMachineResponse(CounterQueryResponse response) { if (response == null) { throw new ArgumentNullException("response"); } if (response.RequestDetails != null) { lock (this.requestDetails) { this.requestDetails.AddRange(response.RequestDetails); } } if (response.Samples == null) { return; } foreach (var sample in response.Samples) { var baseSample = sample; var hashKey = this.dimensionSet.CreateKey(sample.Dimensions); var sampleTimeRange = CreateTimeRange(baseSample); var rangesToRemove = new List <TimeRange>(); SampleCombiner combiner = null; SortedList <TimeRange, SampleCombiner> aggregatedBuckets; // grab the appropriate bucket list lock (this.dataDictionary) { if (!this.dataDictionary.TryGetValue(hashKey, out aggregatedBuckets)) { aggregatedBuckets = new SortedList <TimeRange, SampleCombiner>(); this.dataDictionary.Add(hashKey, aggregatedBuckets); } } lock (aggregatedBuckets) { // The buckets are ordered by start time - thus it is safe to merge and continue to // walk forward as we cannot ever merge which requires a backwards reprocess foreach (var bucket in aggregatedBuckets) { var existingRange = bucket.Key; // did we get past the end of the range we are interested in? if (existingRange.Start > sampleTimeRange.End) { break; } if (existingRange.IntersectsWith(sampleTimeRange)) { sampleTimeRange = TimeRange.Merge(sampleTimeRange, existingRange); rangesToRemove.Add(bucket.Key); // if this is the first merge, just add this sample if (combiner == null) { combiner = bucket.Value; combiner.AddSample(sample); combiner.MachineCount += SampleCombiner.ExtractMachineCount(sample); } else { // this is a N-merge (N > 1), thus sample is already accounted for in the combiner. Merge the values combiner.Merge(bucket.Value); } } } // if there was no merge, then create a new bucket with this sample if (combiner == null) { combiner = new SampleCombiner(sample) { MachineCount = SampleCombiner.ExtractMachineCount(sample) }; } // remove the merged items and add the new item foreach (var range in rangesToRemove) { aggregatedBuckets.Remove(range); } aggregatedBuckets.Add(sampleTimeRange, combiner); } } }