Ejemplo n.º 1
0
            public void Resize()
            {
                Debug.Assert(Monitor.IsEntered(_owner._lock));

                int newSize = HashHelpers.GetPrime(_buckets.Length * 2);

#if DEBUG
                newSize = _buckets.Length + 3;
#endif
                if (newSize <= _nextFreeEntry)
                {
                    throw new OutOfMemoryException();
                }

                Entry[] newEntries = new Entry[newSize];
                int[]   newBuckets = new int[newSize];
                for (int i = 0; i < newSize; i++)
                {
                    newBuckets[i] = -1;
                }

                // Note that we walk the bucket chains rather than iterating over _entries. This is because we allow for the possibility
                // of abandoned entries (with undefined contents) if a thread is killed between allocating an entry and linking it onto the
                // bucket chain.
                int newNextFreeEntry = 0;
                for (int bucket = 0; bucket < _buckets.Length; bucket++)
                {
                    for (int entry = _buckets[bucket]; entry != -1; entry = _entries[entry]._next)
                    {
                        newEntries[newNextFreeEntry]._key      = _entries[entry]._key;
                        newEntries[newNextFreeEntry]._value    = _entries[entry]._value;
                        newEntries[newNextFreeEntry]._hashCode = _entries[entry]._hashCode;
                        int newBucket = ComputeBucket(newEntries[newNextFreeEntry]._hashCode, newSize);
                        newEntries[newNextFreeEntry]._next = newBuckets[newBucket];
                        newBuckets[newBucket] = newNextFreeEntry;
                        newNextFreeEntry++;
                    }
                }

                // The assertion is "<=" rather than "==" because we allow an entry to "leak" until the next resize if
                // a thread died between the time between we allocated the entry and the time we link it into the bucket stack.
                Debug.Assert(newNextFreeEntry <= _nextFreeEntry);

                // The line that atomically installs the resize. If this thread is killed before this point,
                // the table remains full and the next guy attempting an add will have to redo the resize.
                _owner._container = new Container(_owner, newBuckets, newEntries, newNextFreeEntry);

                _owner._container.VerifyUnifierConsistency();
            }
            public void Resize()
            {
                Debug.Assert(_owner._lock.IsAcquired);

                // Before we actually grow the size of the table, figure out how much we can recover just by dropping entries with
                // expired weak references.
                int estimatedNumLiveEntries = 0;

                for (int bucket = 0; bucket < _buckets.Length; bucket++)
                {
                    for (int entry = _buckets[bucket]; entry != -1; entry = _entries[entry]._next)
                    {
                        // Check if the weakreference has expired.
                        V value;
                        if (_entries[entry]._weakValue.TryGetTarget(out value))
                        {
                            estimatedNumLiveEntries++;
                        }
                    }
                }
                double estimatedLivePercentage = ((double)estimatedNumLiveEntries) / ((double)(_entries.Length));
                int    newSize;

                if (estimatedLivePercentage < _growThreshold && (_entries.Length - estimatedNumLiveEntries) > _initialCapacity)
                {
                    newSize = _buckets.Length;
                }
                else
                {
                    newSize = HashHelpers.GetPrime(_buckets.Length * 2);
#if DEBUG
                    newSize = _buckets.Length + 3;
#endif
                    if (newSize <= _nextFreeEntry)
                    {
                        throw new OutOfMemoryException();
                    }
                }
                Entry[] newEntries = new Entry[newSize];
                int[]   newBuckets = new int[newSize];
                for (int i = 0; i < newSize; i++)
                {
                    newBuckets[i] = -1;
                }

                // Note that we walk the bucket chains rather than iterating over _entries. This is because we allow for the possibility
                // of abandoned entries (with undefined contents) if a thread is killed between allocating an entry and linking it onto the
                // bucket chain.
                int newNextFreeEntry = 0;
                for (int bucket = 0; bucket < _buckets.Length; bucket++)
                {
                    for (int entry = _buckets[bucket]; entry != -1; entry = _entries[entry]._next)
                    {
                        // Check if the weakreference has expired. If so, this is where we drop the entry altogether.
                        V value;
                        if (_entries[entry]._weakValue.TryGetTarget(out value))
                        {
                            newEntries[newNextFreeEntry]._weakValue = _entries[entry]._weakValue;
                            newEntries[newNextFreeEntry]._hashCode  = _entries[entry]._hashCode;
                            int newBucket = ComputeBucket(newEntries[newNextFreeEntry]._hashCode, newSize);
                            newEntries[newNextFreeEntry]._next = newBuckets[newBucket];
                            newBuckets[newBucket] = newNextFreeEntry;
                            newNextFreeEntry++;
                        }
                    }
                }

                // The assertion is "<=" rather than "==" because we allow an entry to "leak" until the next resize if
                // a thread died between the time between we allocated the entry and the time we link it into the bucket stack.
                // In addition, we don't bother copying entries where the weak reference has expired.
                Debug.Assert(newNextFreeEntry <= _nextFreeEntry);

                // The line that atomically installs the resize. If this thread is killed before this point,
                // the table remains full and the next guy attempting an add will have to redo the resize.
                _owner._container = new Container(_owner, newBuckets, newEntries, newNextFreeEntry);

                _owner._container.VerifyUnifierConsistency();
            }