public bool ReleaseOldestData(bool releaseLatest) { DataBucket <TInternal> releaseBucket = null; // Under heavy stress we want to unblock even if we can't find data to release. using (SharedLock.OpenShared(this.dataLock)) { for (var i = this.data.Count - 1; i >= (releaseLatest ? 0 : 1); --i) { var bucket = this.data.Values[i]; if (bucket.Loaded) { releaseBucket = bucket; break; } } } if (releaseBucket != null) { releaseBucket.ReleaseData(); return(true); } return(false); }
public void UpdateFromAggregator(IPersistedDataAggregator aggregator, DateTimeOffset start, DateTimeOffset end) { DataBucket <TInternal> updateBucket = null; using (SharedLock.OpenShared(this.dataLock)) { foreach (var bucket in this.data.Values) { if (bucket.StartTime == start && bucket.EndTime == end) { updateBucket = bucket; break; } } } if (updateBucket == null) { Events.Write.UnknownBucketCannotBeUpdated(this.Name, start, end); return; } if (updateBucket.Sealed) { Events.Write.SealedBucketCannotBeUpdated(this.Name, start, end); return; } var agg = aggregator as PersistedDataAggregator <TInternal>; var availableSources = new List <string>(); foreach (var source in agg.Sources) { switch (source.Status) { case PersistedDataSourceStatus.Unavailable: updateBucket.SetSourceUnavailable(source.Name); break; case PersistedDataSourceStatus.Available: availableSources.Add(source.Name); break; case PersistedDataSourceStatus.Unknown: break; default: throw new ArgumentException("Unexpected source status " + source.Status, "aggregator"); } } if (availableSources.Count > 0) { var aggregateData = agg.AcquireData(); updateBucket.UpdateDataFromSources(availableSources, agg.DimensionSet, aggregateData); } // XXX: Dump data back to disk for now (eases memory pressure) updateBucket.ReleaseData(); }
private void CompactBuckets(IList <DataBucket <TInternal> > buckets, DateTime newBucketTimeStamp, long rolledUpTimeSpanInTicks) { bool shouldRelease = true; var rolledUpBucket = new DataBucket <TInternal>(buckets, this.CreateOptimizedDimensionSet(), newBucketTimeStamp.ToLocalTime(), rolledUpTimeSpanInTicks, this.storagePath, this.properties.MemoryStreamManager); rolledUpBucket.Seal(); using (SharedLock.OpenExclusive(this.dataLock)) { foreach (var dataBucket in buckets) { shouldRelease &= !dataBucket.Loaded; if (!this.data.Remove(dataBucket.StartTime)) { throw new InvalidOperationException("Double compaction attempted on same bucket: " + this.Name + " " + dataBucket.StartTime.ToString()); } } this.data.Add(rolledUpBucket.StartTime, rolledUpBucket); } foreach (var dataBucket in buckets) { dataBucket.PermanentDelete(); dataBucket.Dispose(); } if (shouldRelease) { rolledUpBucket.ReleaseData(); } }