/// <summary> /// Converts the set items to another type with equivalent hash codes. /// NOTE: This method is not threadsafe and the set cannot be safely used after conversion. /// </summary> /// <param name="convert">the function used to convert the items</param> /// <returns>The set with converted item set</returns> public ConcurrentBigSet <TNewItem> ConvertUnsafe <TNewItem>(Func <TItem, TNewItem> convert) { Contract.Requires(convert != null); var newItemsBuffer = new BigBuffer <TNewItem>(m_items.EntriesPerBufferBitWidth); var itemsCount = m_count; newItemsBuffer.Initialize(itemsCount, (startIndex, entryCount) => { TNewItem[] buffer = new TNewItem[entryCount]; var accessor = m_accessors.Items; int itemsIndex = startIndex; // entryIndex is index in specific buffer array whereas itemsIndex is an index into the big buffer // Ensure entryIndex does not go past the set of valid items by constraining it to be less // itemsCount - startIndex for (int entryIndex = 0; entryIndex < entryCount && itemsIndex < itemsCount; entryIndex++, itemsIndex++) { buffer[entryIndex] = convert(accessor[itemsIndex]); } return(buffer); }); return(new ConcurrentBigSet <TNewItem>( concurrencyLevel: m_locks.Length, backingItemsBuffer: newItemsBuffer, backingNodesBuffer: m_nodes, backingBuckets: m_buckets, nodeLength: m_nodeLength) { m_count = itemsCount, }); }
/// <summary> /// Creates and returns set by deserialization /// </summary> /// <param name="reader">general reader</param> /// <param name="itemReader">item reader</param> /// <param name="concurrencyLevel">the concurrency level (all values less than 1024 will be assumed to be 1024)</param> public static ConcurrentBigSet <TItem> Deserialize( BinaryReader reader, Func <TItem> itemReader, int concurrencyLevel = DefaultConcurrencyLevel) { var count = reader.ReadInt32(); var nodeLength = reader.ReadInt32(); var itemsPerBufferBitWidth = reader.ReadInt32(); var capacity = Math.Max(MinConcurrencyLevel, Math.Max(nodeLength, concurrencyLevel)); var items = new BigBuffer <TItem>(itemsPerBufferBitWidth); items.Initialize(capacity); var nodes = new BigBuffer <Node>(NodesPerEntryBufferBitWidth); nodes.Initialize(capacity); var itemsAccessor = items.GetAccessor(); var nodesAccessor = nodes.GetAccessor(); List <int> freeNodes = new List <int>(); for (int i = 0; i < nodeLength; i++) { var hashCode = reader.ReadInt32(); if (hashCode != Node.UnusedHashCode) { var next = reader.ReadInt32(); var item = itemReader(); nodesAccessor[i] = new Node(hashCode, next); itemsAccessor[i] = item; } else { freeNodes.Add(i); } } var buckets = Buckets.Deserialize(reader); var result = new ConcurrentBigSet <TItem>( concurrencyLevel, items, nodes, buckets, nodeLength); result.m_count = count; // distribute free nodes var accessors = result.m_accessors; foreach (var i in freeNodes) { var lockNo = result.GetLockNo(i); result.AddFreeNode(lockNo, i, ref accessors); } return(result); }
private Buckets(BigBuffer <int> buckets, int lastBucketIndex, int preSplitBucketsLength, int splitThreshold) { m_buckets = buckets; m_lastBucketIndex = lastBucketIndex; m_preSplitBucketsLength = preSplitBucketsLength; m_pendingSplitBucketCount = preSplitBucketsLength; IsSplitting = SPLITTING_TRUE; SplitThreshold = splitThreshold; m_splitBucketCursor = -1; }
/// <summary> /// Creates an instance. /// </summary> /// <param name="concurrencyLevel">the concurrency level (all values less than 1024 will be assumed to be 1024)</param> /// <param name="capacity">the initial capacity (ie number of buckets)</param> /// <param name="ratio">the desired ratio of items to buckets (must be greater than 0)</param> /// <param name="backingItemsBuffer">the backing storage for items</param> /// <param name="itemsPerEntryBufferBitWidth">the bit width of number of entries in a buffer (buffer size is 2^<paramref name="itemsPerEntryBufferBitWidth"/>)</param> public ConcurrentBigSet( int concurrencyLevel = DefaultConcurrencyLevel, int capacity = DefaultCapacity, int ratio = DefaultBucketToItemsRatio, BigBuffer <TItem> backingItemsBuffer = null, int itemsPerEntryBufferBitWidth = 12) : this(concurrencyLevel, backingItemsBuffer, null, null, nodeLength : 0, capacity : capacity, ratio : ratio, itemsPerEntryBufferBitWidth : itemsPerEntryBufferBitWidth) { Contract.Requires(concurrencyLevel >= 1); Contract.Requires(ratio >= 1); }
/// <summary> /// Class constructor /// </summary> public Buckets(int capacity, int ratio, BigBuffer <int> .BufferInitializer bucketsBufferInitializer = null, bool initializeSequentially = false) { Contract.Requires(capacity > 0); var buckets = new BigBuffer <int>(ItemsPerEntryBufferBitWidth, 1); capacity = buckets.Initialize(capacity, bucketsBufferInitializer ?? InitializeBucketBufferToInvalidHeadNodeIndices, initializeSequentially); m_buckets = buckets; Contract.Assume(((capacity - 1) & capacity) == 0, "capacity must be a power of two"); m_lastBucketIndex = capacity - 1; m_preSplitBucketsLength = capacity; SplitThreshold = checked (capacity * ratio); m_splitBucketCursor = int.MinValue; }
private Buckets(BigBuffer <int> .BufferInitializer bucketsBufferInitializer, int capacity, int lastBucketIndex, int pendingSplitBucketCount, int preSplitBucketsLength, int isSplitting, int splitThreshold, int splitBucketCursor) { Contract.Requires(bucketsBufferInitializer != null); var buckets = new BigBuffer <int>(ItemsPerEntryBufferBitWidth, 1); buckets.Initialize(capacity, bucketsBufferInitializer, initializeSequentially: true); m_buckets = buckets; m_lastBucketIndex = lastBucketIndex; m_preSplitBucketsLength = preSplitBucketsLength; m_pendingSplitBucketCount = pendingSplitBucketCount; IsSplitting = isSplitting; SplitThreshold = splitThreshold; m_splitBucketCursor = splitBucketCursor; }
private ConcurrentBigSet( int concurrencyLevel, BigBuffer <TItem> backingItemsBuffer, BigBuffer <Node> backingNodesBuffer, Buckets backingBuckets, int nodeLength, int capacity = DefaultCapacity, int ratio = DefaultBucketToItemsRatio, int itemsPerEntryBufferBitWidth = 12) { Contract.Requires(concurrencyLevel >= 1); Contract.Requires(ratio >= 1); concurrencyLevel = Math.Max(concurrencyLevel, MinConcurrencyLevel); if (concurrencyLevel > capacity) { capacity = concurrencyLevel; } var actualConcurrencyLevel = (int)Bits.HighestBitSet((uint)concurrencyLevel); var actualCapacity = (int)Bits.HighestBitSet((uint)capacity); capacity = capacity > actualCapacity ? actualCapacity << 1 : actualCapacity; concurrencyLevel = concurrencyLevel > actualConcurrencyLevel ? actualConcurrencyLevel << 1 : actualConcurrencyLevel; m_locks = new Locks(concurrencyLevel); // Create free node pointer for every lock m_freeNodes = new int[m_locks.Length]; for (int i = 0; i < m_freeNodes.Length; i++) { m_freeNodes[i] = -1; } m_lockBitMask = concurrencyLevel - 1; m_items = backingItemsBuffer ?? new BigBuffer <TItem>(itemsPerEntryBufferBitWidth); m_nodes = backingNodesBuffer ?? new BigBuffer <Node>(NodesPerEntryBufferBitWidth); m_buckets = backingBuckets ?? new Buckets(capacity, ratio); m_nodeLength = nodeLength; m_accessors = new Accessors(this); }
/// <summary> /// Constructor /// </summary> public Accessor(BigBuffer <TEntry> buffer) { Buffer = buffer; m_lastBufferNumber = -1; m_lastBuffer = null; }
public Accessors(ConcurrentBigSet <TItem> set) { Nodes = set.m_nodes.GetAccessor(); Items = set.m_items.GetAccessor(); }
/// <summary> /// Reserves an index in the backing buffer /// </summary> /// <param name="backingBuffer">the backing buffer used to ensure caller has access</param> /// <returns>the reserved index</returns> public int ReservedNextIndex(BigBuffer <TItem> backingBuffer) { Contract.Assert(backingBuffer == m_items, "ReservedNextIndex can only be called by owner of backing buffer for set"); return(GetTrailingFreeNodes()); }