public IHttpActionResult Putbucket(int id, bucket bucket) { if (!ModelState.IsValid) { return(BadRequest(ModelState)); } if (id != bucket.id) { return(BadRequest()); } db.Entry(bucket).State = EntityState.Modified; try { db.SaveChanges(); } catch (DbUpdateConcurrencyException) { if (!bucketExists(id)) { return(NotFound()); } else { throw; } } return(StatusCode(HttpStatusCode.NoContent)); }
public static void spill(bucket bucket) { int spill = bucket.Content - bucket.Capacety; bucket.Content = bucket.Capacety; Console.WriteLine("bucket has overflown, " + spill + " liter has been spilled"); }
public void Put(Datum datum) { var tp = datum.GetType(); m_LastTraffic = App.TimeSource.Now; lock (m_Data) { sources src; if (!m_Data.TryGetValue(tp, out src)) { src = new sources(); m_Data[tp] = src; } bucket b; if (!src.TryGetValue(datum.Source, out b)) { b = new bucket(); b.m_Data = new Datum[BUFFER_SIZE]; src[datum.Source] = b; } b.m_Data[b.m_Index] = datum; b.m_Index++; if (b.m_Index >= b.m_Data.Length) { b.m_Index = 0; } } }
int AddBucketSet() { bucket[] newbuckets = new bucket[GetCap() + primes[size]]; Array.Copy(buckets, newbuckets, buckets.Length); buckets = newbuckets; return(++size); }
private void rehash(int newsize, bool forceNewHashCode) { occupancy = 0; bucket[] newBuckets = new bucket[newsize]; int nb; for (nb = 0; nb < buckets.Length; nb++) { bucket oldb = buckets[nb]; if ((oldb.key != null) && (oldb.key != buckets)) { int hashcode = ((forceNewHashCode ? GetHash(oldb.key) : oldb.hash_coll) & 0x7FFFFFFF); putEntry(newBuckets, oldb.key, oldb.val, hashcode); } } isWriterInProgress = true; buckets = newBuckets; loadsize = (int)(loadFactor * newsize); UpdateVersion(); isWriterInProgress = false; }
private void rehash(int newsize) { // reset occupancy occupancy = 0; // Don't replace any internal state until we've finished adding to the new // bucket[]. This serves two purposes: // 1) Allow concurrent readers to see valid hashtable contents at all times // 2) Protect against an OutOfMemoryException while allocating this new // bucket[]. bucket[] newBuckets = new bucket[newsize]; // rehash table into new buckets int nb; for (nb = 0; nb < buckets.Length; nb++) { bucket oldb = buckets[nb]; if (oldb.val != null) { putEntry(newBuckets, oldb.key, oldb.val, oldb.hash_coll & 0x7FFFFFFF); } } // New bucket[] is good to go - replace buckets and other internal state. version++; buckets = newBuckets; loadsize = this.loadFactorPerc * newsize / 100; if (loadsize >= newsize) { loadsize = newsize - 1; } }
public IHttpActionResult Getbucket(int id) { bucket bucket = db.buckets.Find(id); if (bucket == null) { return(NotFound()); } return(Ok(bucket)); }
private void ctor(IEqualityComparer <T> comparer = null) { m_Comparer = comparer ?? EqualityComparer <T> .Default; m_Data = new bucket[BUCKET_COUNT]; for (var i = 0; i < m_Data.Length; i++) { m_Data[i] = new bucket(m_Comparer); } Task.Delay(VISIT_GRANULARITY_MS).ContinueWith(_ => visit()); }
public CappedSet(IEqualityComparer <T> comparer = null) { m_Comparer = comparer ?? EqualityComparer <T> .Default; m_Data = new bucket[BUCKET_COUNT]; for (var i = 0; i < m_Data.Length; i++) { m_Data[i] = new bucket(m_Comparer); } Task.Delay(THREAD_GRANULARITY_MS).ContinueWith(_ => visit()); }
public IHttpActionResult Postbucket(bucket bucket) { if (!ModelState.IsValid) { return(BadRequest(ModelState)); } db.buckets.Add(bucket); db.SaveChanges(); return(CreatedAtRoute("DefaultApi", new { id = bucket.id }, bucket)); }
public CompactSetOfLong(IEnumerable <long> items) { var list = items.ToList(); if (list.Count == 0) { return; } list.Sort(); var perBucket = (int)Math.Ceiling(Math.Sqrt(list.Count)); if (perBucket < 1) { perBucket = 1; } bucket curBucket = null; long prevItem = long.MaxValue; foreach (var item in list) { if (curBucket != null && item == prevItem) { continue; // already added } prevItem = item; // Do we need a new bucket? if (curBucket == null || curBucket.Count == perBucket || item > curBucket.To) { // Finalise previous bucket if (_buckets.Count > 0) { _buckets[_buckets.Count - 1].To = Math.Min(_buckets[_buckets.Count - 1].To, item - 1); } // Start a new bucket curBucket = new bucket { From = item, To = item + uint.MaxValue, Count = 0, Values = new uint[perBucket], }; _buckets.Add(curBucket); } // Add item curBucket.Values[curBucket.Count] = (uint)(item - curBucket.From); curBucket.Count++; Count++; } #if DEBUG checkConsistency(); #endif }
public static void Main() { int pos; bucket b; b = new bucket(); b.sum(100); b.add(200); b.add(300); b.add(400); b.add(500); pos = b.find(700); //System.Console.WriteLine("{0}",pos); b.show(pos); }
public IHttpActionResult Deletebucket(int id) { bucket bucket = db.buckets.Find(id); var products = db.products.Where(x => x.bucketId == id); if (bucket == null) { return(NotFound()); } db.products.RemoveRange(products); db.buckets.Remove(bucket); db.SaveChanges(); return(Ok(bucket)); }
public static symbol make_symbol( byte[] name ) { lock( table ) { int hash_number = foreign.get_hash_power_number( name, 12 ); bucket _bucket = table[hash_number]; if (_bucket == null) { symbol _symbol = new symbol( nil._nil, name ); table[hash_number] = new bucket( _symbol, null ); return _symbol; } for (bucket run = _bucket ; (run != null) ; run = run.next) if (foreign.bigloo_strcmp( run.symb.pname, name )) return run.symb; symbol result = new symbol( nil._nil, name ); table[hash_number] = new bucket( result, _bucket ); return result; } }
public bucket( symbol symb, bucket next ) { this.symb = symb; this.next = next; }
private void putEntry(bucket[][] newBuckets, int newBucketCount, Object key, Object nvalue, int hashcode) { #if DEBUG Contract.Assert(hashcode >= 0, "hashcode >= 0"); // make sure collision bit (sign bit) wasn't set. #endif uint seed = (uint)hashcode; uint incr = (uint)(1 + ((seed * HashPrime) % ((uint)newBucketCount - 1))); int bucketNumber = (int)(seed % (uint)newBucketCount); do { int superIndex = bucketNumber / lengthThreshold; int subIndedex = bucketNumber % lengthThreshold; if ((newBuckets[superIndex][subIndedex].key == null) || (newBuckets[superIndex][subIndedex].key == buckets)) { newBuckets[superIndex][subIndedex].val = nvalue; newBuckets[superIndex][subIndedex].key = key; newBuckets[superIndex][subIndedex].hash_coll |= hashcode; return; } if (newBuckets[superIndex][subIndedex].hash_coll >= 0) { newBuckets[superIndex][subIndedex].hash_coll |= unchecked((int)0x80000000); occupancy++; } bucketNumber = (int)(((long)bucketNumber + incr) % (uint)newBucketCount); } while (true); }
// // DeserializationEvent Listener // public virtual void OnDeserialization(Object sender) { if (buckets != null) { // Somebody had a dependency on this hashtable and fixed us up before the ObjectManager got to it. return; } SerializationInfo siInfo; HashHelpers.SerializationInfoTable.TryGetValue(this, out siInfo); if (siInfo == null) { throw new SerializationException(ResourceHelper.GetResourceString("Serialization_InvalidOnDeser")); } int hashsize = 0; IComparer c = null; #pragma warning disable 618 IHashCodeProvider hcp = null; #pragma warning restore 618 Object[] serKeys = null; Object[] serValues = null; SerializationInfoEnumerator enumerator = siInfo.GetEnumerator(); while (enumerator.MoveNext()) { switch (enumerator.Name) { case LoadFactorName: loadFactor = siInfo.GetSingle(LoadFactorName); break; case HashSizeName: hashsize = siInfo.GetInt32(HashSizeName); break; case KeyComparerName: _keycomparer = (IEqualityComparer)siInfo.GetValue(KeyComparerName, typeof(IEqualityComparer)); break; case ComparerName: c = (IComparer)siInfo.GetValue(ComparerName, typeof(IComparer)); break; case HashCodeProviderName: #pragma warning disable 618 hcp = (IHashCodeProvider)siInfo.GetValue(HashCodeProviderName, typeof(IHashCodeProvider)); #pragma warning restore 618 break; case KeysName: serKeys = (Object[])siInfo.GetValue(KeysName, typeof(Object[])); break; case ValuesName: serValues = (Object[])siInfo.GetValue(ValuesName, typeof(Object[])); break; } } loadsize = (int)(loadFactor * hashsize); // V1 object doesn't has _keycomparer field. if ((_keycomparer == null) && ((c != null) || (hcp != null))) { _keycomparer = new CompatibleComparer(c, hcp); } int superSize = hashsize / lengthThreshold + 1; buckets = new bucket[superSize][]; bucketCount = hashsize; int counter = hashsize; for (int i = 0; i < buckets.Length; i++) { buckets[i] = new bucket[counter < lengthThreshold ? counter : lengthThreshold]; counter -= lengthThreshold; } if (serKeys == null) { throw new SerializationException(ResourceHelper.GetResourceString("Serialization_MissingKeys")); } if (serValues == null) { throw new SerializationException(ResourceHelper.GetResourceString("Serialization_MissingValues")); } if (serKeys.Length != serValues.Length) { throw new SerializationException(ResourceHelper.GetResourceString("Serialization_KeyValueDifferentSizes")); } for (int i = 0; i < serKeys.Length; i++) { if (serKeys[i] == null) { throw new SerializationException(ResourceHelper.GetResourceString("Serialization_NullKey")); } Insert(serKeys[i], serValues[i], true); } version = siInfo.GetInt32(VersionName); HashHelpers.SerializationInfoTable.Remove(this); }
private void rehash(int newsize, bool forceNewHashCode) { occupancy = 0; bucket[] newBuckets = new bucket[newsize]; int nb; for (nb = 0; nb < buckets.Length; nb++) { bucket oldb = buckets[nb]; if ((oldb.key != null) && (oldb.key != buckets)) { int hashcode = ((forceNewHashCode ? GetHash(oldb.key) : oldb.hash_coll) & 0x7FFFFFFF); putEntry(newBuckets, oldb.key, oldb.val, hashcode); } } isWriterInProgress = true; buckets = newBuckets; loadsize = (int)(loadFactor * newsize); UpdateVersion(); isWriterInProgress = false; }
private void putEntry (bucket[] newBuckets, Object key, Object nvalue, int hashcode) { Contract.Assert(hashcode >= 0, "hashcode >= 0"); // make sure collision bit (sign bit) wasn't set. #if FEATURE_SERIALIZATION Contract.Assert(m_siInfo == null, "You are accessing a Hashtable before it is fully deserialized! Don't do this. It might break some consistency guarantee in the application."); #endif uint seed = (uint) hashcode; uint incr = (uint)(1 + (((seed >> 5) + 1) % ((uint)newBuckets.Length - 1))); int bucketNumber = (int) (seed % (uint)newBuckets.Length); do { if ((newBuckets[bucketNumber].key == null) || (newBuckets[bucketNumber].key == buckets)) { newBuckets[bucketNumber].val = nvalue; newBuckets[bucketNumber].key = key; newBuckets[bucketNumber].hash_coll |= hashcode; return; } if( newBuckets[bucketNumber].hash_coll >= 0 ) { newBuckets[bucketNumber].hash_coll |= unchecked((int)0x80000000); occupancy++; } bucketNumber = (int) (((long)bucketNumber + incr)% (uint)newBuckets.Length); } while (true); }
// Constructs a new hashtable with the given initial capacity and load // factor. The capacity argument serves as an indication of the // number of entries the hashtable will contain. When this number (or an // approximation) is known, specifying it in the constructor can eliminate // a number of resizing operations that would otherwise be performed when // elements are added to the hashtable. The loadFactor argument // indicates the maximum ratio of hashtable entries to hashtable buckets. // Smaller load factors cause faster average lookup times at the cost of // increased memory consumption. A load factor of 1.0 generally provides // the best balance between speed and size. // public HashVector(int capacity, float loadFactor) { if (capacity < 0) throw new ArgumentOutOfRangeException("capacity", ResourceHelper.GetResourceString("ArgumentOutOfRange_NeedNonNegNum")); if (!(loadFactor >= 0.1f && loadFactor <= 1.0f)) throw new ArgumentOutOfRangeException("loadFactor", ResourceHelper.GetResourceString("ArgumentOutOfRange_HashtableLoadFactor")); #if DEBUG Contract.EndContractBlock(); #endif // Based on perf work, .72 is the optimal load factor for this table. this.loadFactor = 0.72f * loadFactor; double rawsize = capacity / this.loadFactor; if (rawsize > Int32.MaxValue) throw new ArgumentException(ResourceHelper.GetResourceString("Arg_HTCapacityOverflow")); // Avoid awfully small sizes int hashsize = (rawsize > InitialSize) ? HashHelpers.GetPrime((int)rawsize) : InitialSize; int superSize = hashsize / lengthThreshold + 1; buckets = new bucket[superSize][]; bucketCount = hashsize; for (int i = 0; i < superSize; i++) { buckets[i] = new bucket[hashsize < lengthThreshold ? hashsize : lengthThreshold]; hashsize -= lengthThreshold; } loadsize = (int)(this.loadFactor * bucketCount); isWriterInProgress = false; // Based on the current algorithm, loadsize must be less than hashsize. // Contract.Assert(loadsize < hashsize, "Invalid hashtable loadsize!"); }
void Start() { cellQ = new List <Vector2>(400); qDist = new List <float>(400); cellSize = .2f; hCellNum = 25; vCellNum = 25; origin = new Vector3(-3, 3, 0); newPaint = false; newPaint2 = false; maxTimer = 60; Vector3 pos; //Quaternion angle = new Quaternion(0, 0, 0, 0); //Debug.Log(cellPrefab); GameObject backdrop = GameObject.CreatePrimitive(PrimitiveType.Cube); backdrop.transform.position = origin + 2.5f * Vector3.right + 2.5f * Vector3.down; backdrop.transform.localScale = new Vector3(5, 5, .01f); backdrop.GetComponent <Renderer>().material = new Material(Shader.Find("Sprites/Default")); backdrop.GetComponent <Renderer>().material.color = Color.white; backdrop.GetComponent <Renderer>().sortingLayerName = "Default"; backdrop.GetComponent <Renderer>().sortingOrder = 2; cells = new GameObject[hCellNum, vCellNum]; gotCell = new cell[hCellNum, vCellNum]; for (int i = 0; i < hCellNum; i++) { for (int j = 0; j < vCellNum; j++) { pos = origin + Vector3.right * cellSize * i + Vector3.down * cellSize * j; //newCell = Instantiate(cellPrefab, pos, angle) as UnityEngine.Object; //Debug.Log(newCell.GetComponent<cell>()); //GameObject c = GameObject.FindGameObjectWithTag("newCell"); //Debug.Log(newCell.GetType()); //cells[i][j] = newCell; newCell = GameObject.CreatePrimitive(PrimitiveType.Cube); newCell.transform.position = pos; newCell.transform.localScale = new Vector3(cellSize, cellSize, cellSize); newCell.GetComponent <Renderer>().material = new Material(Shader.Find("Sprites/Default")); //Debug.Log(newCell.GetType()); newCell.AddComponent <cell>(); //newCell.AddComponent<Collider2D>(); CHECK THIS LATER //moving to nontrigger paradigm //newCell.GetComponent<BoxCollider>().isTrigger = true; newCell.tag = "cell"; cells[i, j] = newCell; } } celLox = new Vector2[hCellNum, vCellNum]; for (int i = 0; i < hCellNum; i++) { for (int j = 0; j < vCellNum; j++) { celLox[i, j] = cells[i, j].transform.position; gotCell[i, j] = cells[i, j].GetComponent <cell>(); } } //p1 p1Brush = Instantiate(brushPrefab).GetComponent <brush>(); //Debug.Log(p1Brush); p1Brush.playerNum = 1; p1Input = Instantiate(inputHandlerPrefab).GetComponent <inputHandler>(); var initPos = new Vector3(-3, -3, 0); p1bucket = Instantiate(bucketPrefab, initPos, Quaternion.identity).GetComponent <bucket>(); p1bucket.playerNum = 1; //p1oom = false; p1color = Color.blue; p1Brush.myColor = Color.blue; //p2 p2Brush = Instantiate(brushPrefab).GetComponent <brush>(); //Debug.Log(p1Brush); p2Brush.playerNum = 2; p2Input = Instantiate(p2testInputPrefab).GetComponent <p2testInputHandler>(); initPos = new Vector3(3, -3, 0); p2bucket = Instantiate(bucketPrefab, initPos, Quaternion.identity).GetComponent <bucket>(); p2bucket.playerNum = 2; //p2oom = false; p2color = Color.magenta; p2Brush.myColor = Color.magenta; frame1 = true; cellQ.Clear(); qDist.Clear(); //bfsSort= }
// Increases the bucket count of this hashtable. This method is called from // the Insert method when the actual load factor of the hashtable reaches // the upper limit specified when the hashtable was constructed. The number // of buckets in the hashtable is increased to the smallest prime number // that is larger than twice the current number of buckets, and the entries // in the hashtable are redistributed into the new buckets using the cached // hashcodes. private void expand() { // Allocate new Array int oldhashsize = buckets.Length; int rawsize = 1 + oldhashsize * 2; if (rawsize < 0) throw new ArgumentException(Environment.GetResourceString("Arg_HTCapacityOverflow")); int hashsize = GetPrime (rawsize); // Don't replace any internal state until we've finished adding to the // new bucket[]. This serves two purposes: 1) Allow concurrent readers // to see valid hashtable contents at all times and 2) Protect against // an OutOfMemoryException while allocating this new bucket[]. bucket[] newBuckets = new bucket[hashsize]; // rehash table into new buckets int nb; for (nb = 0; nb < oldhashsize; nb++){ bucket oldb = buckets[nb]; if ((oldb.key != null) && (oldb.key != buckets)){ putEntry(newBuckets, oldb.key, oldb.val, oldb.hash_coll & 0x7FFFFFFF); } } // New bucket[] is good to go - replace buckets and other internal state. version++; buckets = newBuckets; loadsize = (int)(loadFactor * hashsize); if (loadsize >= hashsize) loadsize = hashsize-1; }
private void rehash( int newsize, bool forceNewHashCode ) { // reset occupancy occupancy=0; // Don't replace any internal state until we've finished adding to the // new bucket[]. This serves two purposes: // 1) Allow concurrent readers to see valid hashtable contents // at all times // 2) Protect against an OutOfMemoryException while allocating this // new bucket[]. bucket[] newBuckets = new bucket[newsize]; // rehash table into new buckets int nb; for (nb = 0; nb < buckets.Length; nb++){ bucket oldb = buckets[nb]; if ((oldb.key != null) && (oldb.key != buckets)) { int hashcode = ((forceNewHashCode ? GetHash(oldb.key) : oldb.hash_coll) & 0x7FFFFFFF); putEntry(newBuckets, oldb.key, oldb.val, hashcode); } } // New bucket[] is good to go - replace buckets and other internal state. #if !FEATURE_CORECLR Thread.BeginCriticalRegion(); #endif isWriterInProgress = true; buckets = newBuckets; loadsize = (int)(loadFactor * newsize); UpdateVersion(); isWriterInProgress = false; #if !FEATURE_CORECLR Thread.EndCriticalRegion(); #endif // minimun size of hashtable is 3 now and maximum loadFactor is 0.72 now. Contract.Assert(loadsize < newsize, "Our current implementaion means this is not possible."); return; }
private void putEntry (bucket[] newBuckets, Object key, Object nvalue, int hashcode) { BCLDebug.Assert(hashcode >= 0, "hashcode >= 0"); // make sure collision bit (sign bit) wasn't set. uint seed = (uint) hashcode; uint incr = (uint)(1 + (((seed >> 5) + 1) % ((uint)newBuckets.Length - 1))); do { int bucketNumber = (int) (seed % (uint)newBuckets.Length); if ((newBuckets[bucketNumber].key == null) || (newBuckets[bucketNumber].key == buckets)) { newBuckets[bucketNumber].val = nvalue; newBuckets[bucketNumber].key = key; newBuckets[bucketNumber].hash_coll |= hashcode; return; } newBuckets[bucketNumber].hash_coll |= unchecked((int)0x80000000); seed += incr; } while (true); }
private void rehash(int newsize) { // reset occupancy occupancy=0; // Don't replace any internal state until we've finished adding to the // new bucket[]. This serves two purposes: // 1) Allow concurrent readers to see valid hashtable contents // at all times // 2) Protect against an OutOfMemoryException while allocating this // new bucket[]. bucket[] newBuckets = new bucket[newsize]; // rehash table into new buckets int nb; for (nb = 0; nb < buckets.Length; nb++) { bucket oldb = buckets[nb]; if (oldb.val != null) { putEntry(newBuckets, oldb.key, oldb.val, oldb.hash_coll & 0x7FFFFFFF); } } // New bucket[] is good to go - replace buckets and other internal state. version++; buckets = newBuckets; loadsize = (int)(loadFactorPerc * newsize) / 100; if (loadsize >= newsize) { loadsize = newsize-1; } return; }
private void splitBucket(int index) { var bucket1 = _buckets[index]; var bucket2 = new bucket(); _buckets.Insert(index + 1, bucket2); var arr = bucket1.Values; var c = bucket1.Count; // Find the approximate median of this bucket uint min = arr[0]; uint max = arr[0]; for (int i = 1; i < c; i++) { if (min > arr[i]) { min = arr[i]; } if (max < arr[i]) { max = arr[i]; } } uint median = (min / 2) + (max / 2); uint eta = (max - min) / (uint)c; if (eta < 1) { eta = 1; } for (int i = 1; i < c; i++) { if (arr[i] > median) { median += eta; } else // disregard the == case for speed { median -= eta; } } if (median < min || median > max) { median = (min / 2) + (max / 2); } // Split up the values var vals1 = new List <long>(); var vals2 = new List <long>(); for (int i = 0; i < c; i++) { (arr[i] <= median ? vals1 : vals2).Add(arr[i] + bucket1.From); } // Update the bucket limits bucket2.To = bucket1.To; bucket1.To = median + bucket1.From; bucket2.From = bucket1.To + 1; // Expand lower From if possible bucket1.From = bucket1.To - uint.MaxValue; if (index > 0) { bucket1.From = Math.Max(bucket1.From, _buckets[index - 1].To + 1); } // Expand upper To if possible bucket2.To = bucket2.From + uint.MaxValue; if (index + 1 < _buckets.Count - 1) { bucket2.To = Math.Min(bucket2.To, _buckets[index + 2].From - 1); } #if DEBUG Ut.Assert(bucket1.From <= bucket1.To); Ut.Assert(bucket2.From <= bucket2.To); #endif // Populate the values bucket1.Count = vals1.Count; bucket2.Count = vals2.Count; bucket1.Values = new uint[Math.Max(vals1.Count * 11 / 10, 16)]; bucket2.Values = new uint[Math.Max(vals2.Count * 11 / 10, 16)]; for (int i = 0; i < vals1.Count; i++) { bucket1.Values[i] = (uint)(vals1[i] - bucket1.From); } for (int i = 0; i < vals2.Count; i++) { bucket2.Values[i] = (uint)(vals2[i] - bucket2.From); } }
private void putEntry(bucket[] newBuckets, int key, Object nvalue, int hashcode) { uint seed = (uint)hashcode; uint incr = (uint)(1 + (((seed >> 5) + 1) % ((uint)newBuckets.Length - 1))); do { int bucketNumber = (int)(seed % (uint)newBuckets.Length); if ((newBuckets[bucketNumber].val == null)) { newBuckets[bucketNumber].val = nvalue; newBuckets[bucketNumber].key = key; newBuckets[bucketNumber].hash_coll |= hashcode; return; } if (newBuckets[bucketNumber].hash_coll >= 0) { newBuckets[bucketNumber].hash_coll |= unchecked((int)0x80000000); occupancy++; } seed += incr; } while (true); }
public bool Add(long item) { lock (_buckets) { if (_buckets.Count == 0) { return(addFirst(item)); } int min = 0; int max = _buckets.Count - 1; while (min <= max) { int cur = (min + max) / 2; if (item < _buckets[cur].From) { max = cur - 1; } else if (item > _buckets[cur].To) { min = cur + 1; } else { var arr = _buckets[cur].Values; var c = _buckets[cur].Count; var tgt = (uint)(item - _buckets[cur].From); for (int i = 0; i < c; i++) { if (arr[i] == tgt) { return(false); } } if (arr.Length == c) { Array.Resize(ref arr, arr.Length * 4 / 3 + 1); _buckets[cur].Values = arr; } _buckets[cur].Values[_buckets[cur].Count] = tgt; _buckets[cur].Count++; if (_buckets[cur].Count > _buckets.Count) { splitBucket(cur); } Count++; return(true); } } // No suitable bucket found. We have to insert one between existing buckets, or at one of the two ends. Ut.Assert(min == max + 1); var bucket = new bucket(); if (min == 0) { // Append one at the start bucket.To = _buckets[0].From - 1; bucket.From = bucket.To - uint.MaxValue; if (item < bucket.From) { bucket.From = item - int.MaxValue; bucket.To = item + int.MaxValue; } _buckets.Insert(0, bucket); } else if (max == _buckets.Count - 1) { // Append one at the end bucket.From = _buckets[_buckets.Count - 1].To + 1; bucket.To = bucket.From + uint.MaxValue; if (item > bucket.To) { bucket.From = item - int.MaxValue; bucket.To = item + int.MaxValue; } _buckets.Add(bucket); } else { // Insert one between buckets max and max+1 bucket.From = Math.Max(item - int.MaxValue, _buckets[max].To + 1); bucket.To = Math.Min(item + int.MaxValue, _buckets[max + 1].From - 1); _buckets.Insert(max + 1, bucket); } bucket.Count = 1; bucket.Values = new uint[16]; bucket.Values[0] = (uint)(item - bucket.From); Count++; #if DEBUG Ut.Assert(bucket.From <= bucket.To); #endif return(true); } }
private void putEntry (bucket[] newBuckets, Object key, Object nvalue, int hashcode) { Contract.Assert(hashcode >= 0, "hashcode >= 0"); // make sure collision bit (sign bit) wasn't set. uint seed = (uint) hashcode; uint incr = (uint)(1 + ((seed * HashPrime) % ((uint)newBuckets.Length - 1))); int bucketNumber = (int) (seed % (uint)newBuckets.Length); do { if ((newBuckets[bucketNumber].key == null) || (newBuckets[bucketNumber].key == buckets)) { newBuckets[bucketNumber].val = nvalue; newBuckets[bucketNumber].key = key; newBuckets[bucketNumber].hash_coll |= hashcode; return; } if( newBuckets[bucketNumber].hash_coll >= 0 ) { newBuckets[bucketNumber].hash_coll |= unchecked((int)0x80000000); occupancy++; } bucketNumber = (int) (((long)bucketNumber + incr)% (uint)newBuckets.Length); } while (true); }
//在新数组内添加旧数组的一个元素 private void putEntry(bucket[] newBuckets, Object key, Object nvalue, int hashcode) { uint seed = (uint)hashcode; //h1 uint incr = (uint)(1 + (((seed >> 5) + 1) % ((uint)newBuckets.Length - 1))); //h2 int bn = (int)(seed % (uint)newBuckets.Length);//哈希地址 do { //当前位置为有冲突空位或无冲突空位时都可添加新元素 if ((newBuckets[bn].key == null) || (newBuckets[bn].key == buckets)) { //赋值 newBuckets[bn].val = nvalue; newBuckets[bn].key = key; newBuckets[bn].hash_coll |= hashcode; return; } //当前位置已存在其他元素时 if (newBuckets[bn].hash_coll >= 0) { //置hash_coll的高位为1 newBuckets[bn].hash_coll |= unchecked((int)0x80000000); } //二度哈希h1(key)+h2(key) bn = (int)(((long)bn + incr) % (uint)newBuckets.Length); } while (true); }
private void putEntry(bucket[] newBuckets, object key, object nvalue, int hashcode) { uint num = (uint)hashcode; uint num2 = (uint)(1 + (((num >> 5) + 1) % (newBuckets.Length - 1))); int index = (int)(num % newBuckets.Length); Label_0017: if ((newBuckets[index].key == null) || (newBuckets[index].key == this.buckets)) { newBuckets[index].val = nvalue; newBuckets[index].key = key; newBuckets[index].hash_coll |= hashcode; } else { if (newBuckets[index].hash_coll >= 0) { newBuckets[index].hash_coll |= -2147483648; this.occupancy++; } index = (int)(((ulong)index + (ulong)num2) % ((ulong)newBuckets.Length)); goto Label_0017; } }
//按新容量扩容 private void rehash(int newsize) { bucket[] newBuckets = new bucket[newsize]; for (int nb = 0; nb < buckets.Length; nb++) { bucket oldb = buckets[nb]; if ((oldb.key != null) && (oldb.key != buckets)) { putEntry(newBuckets, oldb.key, oldb.val, oldb.hash_coll & 0x7FFFFFFF); } } buckets = newBuckets; loadsize = (int)(loadFactor * newsize); return; }