public override long Add(object id) { long collisionIndex = _nextOffset++; _cache.set(collisionIndex, (( Number )id).longValue()); return(collisionIndex); }
public override void Run() { for (int o = 0; o < LAPS; o++) { for (long i = 0; i < COUNT; i++) { Array.set(i, i); assertEquals(i, Array.get(i)); } } }
public override void Run() { for (int o = 0; o < LAPS; o++) { for (long i = Contestant; i < COUNT; i += CONTESTANTS) { long value = outerInstance.Random.nextLong(); Array.set(i, value); assertEquals(value, Array.get(i)); } } }
/// <summary> /// Looks at max amount of configured memory (in constructor) and figures out for how many nodes their groups /// can be cached. Before the first call to this method all <seealso cref="incrementGroupCount(long)"/> calls /// must have been made. After a call to this there should be a sequence of <seealso cref="put(RelationshipGroupRecord)"/> /// calls to cache the groups. If this call returns a node id which is lower than the highest node id in the /// store then more rounds of caching should be performed after completing this round. /// </summary> /// <param name="fromNodeId"> inclusive </param> /// <returns> toNodeId exclusive </returns> public virtual long Prepare(long fromNodeId) { _cache.clear(); // this will have all the "first" bytes set to 0, which means !inUse this._fromNodeId = fromNodeId; // keep for use in put later on _highCacheId = 0; for (long nodeId = fromNodeId; nodeId < _highNodeId; nodeId++) { int count = GroupCount(nodeId); if (_highCacheId + count > _maxCacheLength) { // Cannot include this one, so up until the previous is good return(this._toNodeId = nodeId); } _offsets.set(Rebase(nodeId), _highCacheId); _highCacheId += count; } return(this._toNodeId = _highNodeId); }