/// <summary> /// Releases the given value to the pool. /// In a few cases, the value is 'freed' instead of /// being released to the pool. If /// - The pool currently exceeds its max size OR /// - If the value does not map to a bucket that's /// currently maintained by the pool, OR /// - If the bucket for the value exceeds its /// maxLength, OR /// - If the value is not recognized by the pool /// Then, the value is 'freed'. /// </summary> /// <param name="value"> /// The value to release to the pool. /// </param> public void Release(T value) { Preconditions.CheckNotNull(value); int bucketedSize = GetBucketedSizeForValue(value); int sizeInBytes = GetSizeInBytes(bucketedSize); lock (_poolGate) { Bucket <T> bucket = GetBucket(bucketedSize); if (!InUseValues.Remove(value)) { // This value was not 'known' to the pool (i.e.) allocated via the pool. // Something is going wrong, so let's free the value and report soft error. #if DEBUG_MEMORY_POOL Debug.WriteLine($"release (free, value unrecognized) (object, size) = ({ value.GetHashCode() }, { bucketedSize })"); #endif // DEBUG_MEMORY_POOL Free(value); _poolStatsTracker.OnFree(sizeInBytes); } else { // free the value, if // - Pool exceeds maxSize // - There is no bucket for this value // - There is a bucket for this value, but it has exceeded its maxLength // - The value is not reusable // If no bucket was found for the value, simply free it // We should free the value if no bucket is found, or if the bucket length // cap is exceeded. // However, if the pool max size softcap is exceeded, it may not always be // best to free *this* value. if (bucket == null || bucket.IsMaxLengthExceeded() || IsMaxSizeSoftCapExceeded() || !IsReusable(value)) { if (bucket != null) { bucket.DecrementInUseCount(); } #if DEBUG_MEMORY_POOL Debug.WriteLine($"release (free) (object, size) = ({ value.GetHashCode() }, { bucketedSize })"); #endif // DEBUG_MEMORY_POOL Free(value); _usedCounter.Decrement(sizeInBytes); _poolStatsTracker.OnFree(sizeInBytes); } else { bucket.Release(value); _freeCounter.Increment(sizeInBytes); _usedCounter.Decrement(sizeInBytes); _poolStatsTracker.OnValueRelease(sizeInBytes); #if DEBUG_MEMORY_POOL Debug.WriteLine($"release (reuse) (object, size) = ({ value.GetHashCode() }, { bucketedSize })"); #endif // DEBUG_MEMORY_POOL } } LogStats(); } }
/// <summary> /// Gets a new 'value' from the pool, if available. /// Allocates a new value if necessary. /// If we need to perform an allocation, /// - If the pool size exceeds the max-size soft cap, /// then we attempt to trim the free portion of the pool. /// - If the pool size exceeds the max-size hard-cap /// (after trimming), then we throw an /// <see cref="PoolSizeViolationException"/>. /// Bucket length constraints are not considered in /// this function. /// </summary> /// <param name="size"> /// The logical size to allocate. /// </param> /// <returns>A new value.</returns> /// <exception cref="InvalidSizeException"> /// If the size of the value doesn't match the pool's /// constraints. /// </exception> public T Get(int size) { EnsurePoolSizeInvariant(); int bucketedSize = GetBucketedSize(size); int sizeInBytes = -1; lock (_poolGate) { Bucket <T> bucket = GetBucket(bucketedSize); if (bucket != null) { // Find an existing value that we can reuse T val = bucket.Get(); if (val != null) { Preconditions.CheckState(InUseValues.Add(val)); // It is possible that we got a 'larger' value than we asked for. // lets recompute size in bytes here bucketedSize = GetBucketedSizeForValue(val); sizeInBytes = GetSizeInBytes(bucketedSize); _usedCounter.Increment(sizeInBytes); _freeCounter.Decrement(sizeInBytes); _poolStatsTracker.OnValueReuse(sizeInBytes); LogStats(); #if DEBUG_MEMORY_POOL Debug.WriteLine($"get (reuse) (object, size) = ({ val.GetHashCode() }, { bucketedSize })"); #endif // DEBUG_MEMORY_POOL return(val); } // Fall through } // Check to see if we can allocate a value of the given size without // exceeding the hard cap sizeInBytes = GetSizeInBytes(bucketedSize); if (!CanAllocate(sizeInBytes)) { throw new PoolSizeViolationException( _poolParams.MaxSizeHardCap, _usedCounter.NumBytes, _freeCounter.NumBytes, sizeInBytes); } // Optimistically assume that allocation succeeds - if it fails, we // need to undo those changes _usedCounter.Increment(sizeInBytes); if (bucket != null) { bucket.IncrementInUseCount(); } } T value = default(T); try { // allocate the value outside the synchronized block, because // it can be pretty expensive we could have done the allocation // inside thesynchronized block, but that would have blocked out // other operations on the pool value = Alloc(bucketedSize); } catch (Exception) { // Assumption we made previously is not valid - allocation failed. // We need to fix internal counters. lock (_poolGate) { _usedCounter.Decrement(sizeInBytes); Bucket <T> bucket = GetBucket(bucketedSize); if (bucket != null) { bucket.DecrementInUseCount(); } } throw; } // NOTE: We checked for hard caps earlier, and then did the alloc above. // Now we need to update state - but it is possible that a concurrent // thread did a similar operation - with the result being that we're now // over the hard cap. We are willing to live with that situation - // especially since the trim call below should be able to trim back // memory usage. lock (_poolGate) { Preconditions.CheckState(InUseValues.Add(value)); // If we're over the pool's max size, try to trim the pool appropriately TrimToSoftCap(); _poolStatsTracker.OnAlloc(sizeInBytes); LogStats(); #if DEBUG_MEMORY_POOL Debug.WriteLine($"get (alloc) (object, size) = ({ value.GetHashCode() }, { bucketedSize })"); #endif // DEBUG_MEMORY_POOL } return(value); }