void QueueSend(BufferRef bufferRef, IPEndPoint remoteEndPoint, Action <Udp, Exception> completion) { Contract.Requires(remoteEndPoint != null); Contract.Requires(bufferRef != null); try { SendRequest request = Recycler.Take(); Debug.Assert(request != null); request.Prepare(bufferRef, (sendRequest, exception) => completion?.Invoke(this, exception)); uv_buf_t[] bufs = request.Bufs; NativeMethods.UdpSend( request.InternalHandle, this.InternalHandle, remoteEndPoint, ref bufs); } catch (Exception exception) { Log.Error($"{this.HandleType} faulted.", exception); throw; } }
public void CouldOpenBucketsAndGetBuffers() { var path = TestUtils.GetPath(); var buckets = new SharedMemoryBuckets(path); var available = buckets.GetAvailableFreeSpace(); Console.WriteLine("AVAILABLE: " + available); for (int i = 0; i <= BufferRef.MaxBucketIdx; i++) { var br = BufferRef.Create(i, 1); var buffer = buckets[br]; var bufferSize = SharedMemoryPool.BucketIndexToBufferSize(i, 4096); buffer.Span.Fill(0); Assert.AreEqual(bufferSize, buffer.Length); } var sndSmall = BufferRef.Create(0, 40000); var buffer1 = buckets[sndSmall]; buffer1.Span.Fill(0); Assert.AreEqual(4096, buffer1.Length); buckets.Dispose(); }
protected virtual void Release() { BufferRef buf = this.buffer; this.buffer = null; this.completion = null; buf?.Dispose(); }
protected WriteBufferRequest(uv_req_type requestType) : base(requestType) { Contract.Requires( requestType == uv_req_type.UV_WRITE || requestType == uv_req_type.UV_UDP_SEND); this.handle = new RequestContext(requestType, 0, this); this.buffer = null; }
public void CouldWriteManyValuesToSeries() { var path = TestUtils.GetPath(); var table = new StreamBlockIndex(path, 512, LMDBEnvironmentFlags.WriteMap | LMDBEnvironmentFlags.NoSync, null); var streamId = (StreamLogId)10000; var series = table.GetBlockRecordSeries(streamId); var count = 50_000; using (Benchmark.Run("Writes", count)) { for (int i = 0; i < count; i++) { var lr1 = new StreamBlockRecord(BufferRef.Create(0, i + 1)) { Version = (ulong)i + 1 }; if (!table.TryAddBlockRecord(streamId, lr1)) { Assert.Fail(); } if (i + 1 != table.GetBlockCount(streamId)) { Assert.Fail(); } var last = series.Last; if (!last.IsPresent) { Assert.Fail(); } if (last.Present.Key != (ulong)i + 1) { Assert.Fail(); } if (last.Present.Value.BufferRef != lr1.BufferRef) { Assert.Fail(); } } } Benchmark.Dump(); table.Dispose(); }
void OnAllocateCallback(out uv_buf_t buf) { BufferRef bufferRef = this.pipeline.AllocateReadBuffer(); uv_buf_t[] bufs = bufferRef.GetBuffer(); #if DEBUG Contract.Assert(bufs != null && bufs.Length > 0); #endif // ReSharper disable once PossibleNullReferenceException buf = bufs[0]; }
protected internal void QueueWriteStream(byte[] array, int offset, int count, Action <StreamHandle, Exception> completion) { Contract.Requires(array != null && array.Length > 0); Contract.Requires(offset >= 0 && count > 0); Contract.Requires((offset + count) <= array.Length); ByteBuffer byteBuffer = UnpooledByteBuffer.From(array, offset, count); var bufferRef = new BufferRef(byteBuffer, false); this.pipeline.QueueWrite(bufferRef, completion); }
internal BufferRef AllocateReadBuffer() { ByteBuffer buffer = this.receiveBufferSizeEstimate.Allocate(this.allocator); Log.TraceFormat("{0} receive buffer allocated size = {1}", nameof(Pipeline), buffer.Count); var bufferRef = new BufferRef(buffer); this.bufferQueue.Enqueue(bufferRef); return(bufferRef); }
public void QueueSend(byte[] array, int offset, int count, IPEndPoint remoteEndPoint, Action <Udp, Exception> completion = null) { Contract.Requires(array != null); Contract.Requires(remoteEndPoint != null); IArrayBuffer <byte> buffer = Unpooled.WrappedBuffer(array, offset, count); var bufferRef = new BufferRef(buffer, buffer.ReaderIndex, count); this.QueueSend(bufferRef, remoteEndPoint, completion); }
public void QueueWriteStream(byte[] array, int offset, int count, Action <StreamHandle, Exception> completion) { Contract.Requires(array != null && array.Length > 0); Contract.Requires(offset >= 0 && count > 0); Contract.Requires((offset + count) <= array.Length); IArrayBuffer <byte> buffer = Unpooled.WrappedBuffer(array, offset, count); var bufferRef = new BufferRef(buffer, buffer.ReaderIndex, count); this.pipeline.QueueWrite(bufferRef, completion); }
public void CouldSetUnsetFlag() { var br = BufferRef.Create(2, 1); var brFlag = br.SetFlag(); Assert.AreEqual(2, brFlag.BucketIndex); Assert.AreEqual(1, brFlag.BufferIndex); Assert.IsTrue(brFlag.Flag); brFlag = br.ClearFlag(); Assert.AreEqual(2, brFlag.BucketIndex); Assert.AreEqual(1, brFlag.BufferIndex); Assert.IsFalse(brFlag.Flag); }
internal void Prepare(BufferRef bufferRef, Action <WriteBufferRequest, Exception> callback) { Contract.Requires(bufferRef != null); Contract.Requires(callback != null); if (this.buffer != null || !this.handle.IsValid) { throw new InvalidOperationException($"{nameof(WriteRequest)} status is invalid."); } this.completion = callback; this.buffer = bufferRef; }
public void QueueSend(WritableBuffer writableBuffer, IPEndPoint remoteEndPoint, Action <Udp, Exception> completion = null) { Contract.Requires(remoteEndPoint != null); if (writableBuffer.Index == 0) { return; } var bufferRef = new BufferRef(ref writableBuffer); this.QueueSend(bufferRef, remoteEndPoint, completion); }
public void QueueSend(byte[] array, int offset, int count, IPEndPoint remoteEndPoint, Action <Udp, Exception> completion = null) { Contract.Requires(array != null && array.Length > 0); Contract.Requires(offset >= 0 && count > 0); Contract.Requires((offset + count) <= array.Length); Contract.Requires(remoteEndPoint != null); ByteBuffer byteBuffer = UnpooledByteBuffer.From(array, offset, count); var bufferRef = new BufferRef(byteBuffer, false); this.QueueSend(bufferRef, remoteEndPoint, completion); }
internal BufferRef AllocateReadBuffer() { IArrayBuffer <byte> buffer = this.receiveBufferSizeEstimate.Allocate(this.allocator); if (Log.IsTraceEnabled) { Log.TraceFormat("{0} receive buffer allocated size = {1}", nameof(Pipeline), buffer.Capacity); } var bufferRef = new BufferRef(buffer, buffer.WriterIndex, buffer.WritableCount); this.bufferQueue.Enqueue(bufferRef); return(bufferRef); }
public void QueueWriteStream( WritableBuffer writableBuffer, Action <StreamHandle, Exception> completion) { Contract.Requires(completion != null); if (writableBuffer.Index == 0) { return; } var bufferRef = new BufferRef(ref writableBuffer); this.pipeline.QueueWrite(bufferRef, completion); }
void OnAllocateCallback(out uv_buf_t buf) { ByteBuffer buffer = this.allocator.Buffer(FixedBufferSize); Log.TraceFormat("{0} {1} receive buffer allocated size = {2}", this.HandleType, this.InternalHandle, buffer.Count); var bufferRef = new BufferRef(buffer); this.bufferQueue.Enqueue(bufferRef); uv_buf_t[] bufs = bufferRef.GetBuffer(); #if DEBUG Contract.Assert(bufs != null && bufs.Length > 0); #endif buf = bufs[0]; }
void OnAllocateCallback(out uv_buf_t buf) { IArrayBuffer <byte> buffer = this.allocator.Buffer(FixedBufferSize); if (Log.IsTraceEnabled) { Log.TraceFormat("{0} {1} receive buffer allocated size = {2}", this.HandleType, this.InternalHandle, buffer.Capacity); } var bufferRef = new BufferRef(buffer, buffer.WriterIndex, buffer.WritableCount); this.bufferQueue.Enqueue(bufferRef); uv_buf_t[] bufs = bufferRef.GetBuffer(); Debug.Assert(bufs != null && bufs.Length > 0); buf = bufs[0]; }
public void QueueWriteStream(WritableBuffer writableBuffer, Action <StreamHandle, Exception> completion) { Contract.Requires(completion != null); IArrayBuffer <byte> buffer = writableBuffer.ArrayBuffer; if (buffer == null || !buffer.IsReadable()) { return; } var bufferRef = new BufferRef(buffer, buffer.ReaderIndex, buffer.ReadableCount); this.pipeline.QueueWrite(bufferRef, completion); }
public void QueueSend(WritableBuffer writableBuffer, IPEndPoint remoteEndPoint, Action <Udp, Exception> completion = null) { Contract.Requires(remoteEndPoint != null); IArrayBuffer <byte> buffer = writableBuffer.ArrayBuffer; if (buffer == null || !buffer.IsReadable()) { return; } var bufferRef = new BufferRef(buffer, buffer.ReaderIndex, buffer.ReadableCount); this.QueueSend(bufferRef, remoteEndPoint, completion); }
internal void QueueWrite(BufferRef bufferRef, Action <StreamHandle, Exception> completion) { Contract.Requires(bufferRef != null); try { WriteRequest request = Recycler.Take(); request.Prepare(bufferRef, (writeRequest, exception) => completion?.Invoke(this.streamHandle, exception)); this.streamHandle.WriteStream(request); } catch (Exception exception) { Log.Error($"{nameof(Pipeline)} {this.streamHandle.HandleType} faulted.", exception); throw; } }
public void CouldUseSmallPages() { var path = TestUtils.GetPath(); var buckets = new SharedMemoryBuckets(path, pageSize: 32, 0); var tooBigBucket = BufferRef.Create(1, 1); Assert.Throws <ArgumentException>(() => { var _ = buckets[tooBigBucket]; }); var br = BufferRef.Create(0, 40000); var buffer1 = buckets[br]; buffer1.Span.Fill(0); Assert.AreEqual(32, buffer1.Length); buckets.Dispose(); }
public void CouldAddGetRecord() { var path = TestUtils.GetPath(); var table = new StreamBlockIndex(path, 16, LMDBEnvironmentFlags.WriteMap | LMDBEnvironmentFlags.NoSync, null); var streamId = (StreamLogId)1; var lr1 = new StreamBlockRecord(BufferRef.Create(0, 1)) { Version = 12 }; Assert.IsTrue(table.TryAddBlockRecord(streamId, lr1)); Assert.AreEqual(1, table.GetBlockCount((StreamLogId)1)); Assert.IsTrue(table.TryGetLast(streamId, false, out var last)); Assert.AreEqual(last.Version, 12); Assert.AreEqual(last.BufferRef, lr1.BufferRef); table.Dispose(); }
unsafe public override void AccumulateImpl(float *data, int start, int size, int prefBufSz, FPCMFactoryGenLimit pcmFactory) { // BYPASS IF NO POINT NOT TO //////////////////////////////////////////////////////////////////////////////// if (this.voices <= 1) { // This will probably rarely ever happen because it defeats the purpose of using // this node, but I'll take the optimization where I can get it. this.input.Accumulate(data, start, size, prefBufSz, pcmFactory); return; } // READ IN NEW INFORMATION BY APPENDING BUFFERS //////////////////////////////////////////////////////////////////////////////// int readAmt = size; while (readAmt > 0) { if (buffers.Count == 0) { FPCM firstEntry = pcmFactory.GetZeroedGlobalFPCM(0, prefBufSz); BufferedFPCM bfpmFirst = new BufferedFPCM(firstEntry, this.nextOffset); this.nextOffset += firstEntry.buffer.Length; this.buffers.Add(bfpmFirst); } int lastIdx = this.buffers.Count - 1; BufferedFPCM last = this.buffers[lastIdx]; int readBufAmt = Min(readAmt, last.readLeft); if (readBufAmt != 0) { int readHead = last.cachedLen - last.readLeft; // Read the new stuff requested float [] wbuf = last.buffer.buffer; fixed(float *pwbuf = wbuf) { this.input.Accumulate( &pwbuf[readHead], 0, readBufAmt, prefBufSz, pcmFactory); } // Update and save it back last.readLeft -= readBufAmt; this.buffers[lastIdx] = last; readAmt -= readBufAmt; } if (readAmt <= 0) { break; } // If it's not enough, create another buffer and continue. We // only create it here, because it will be filled the next // round-about in this loop. // // Exact same as above. FPCM newEntry = pcmFactory.GetZeroedGlobalFPCM(0, prefBufSz); BufferedFPCM bfpm = new BufferedFPCM(newEntry, this.nextOffset); this.nextOffset += newEntry.buffer.Length; this.buffers.Add(bfpm); } // WRITE INTO OUTPUT BUFFER //////////////////////////////////////////////////////////////////////////////// int lowestIndexVal = int.MaxValue; for (int buffIt = 0; buffIt < this.rbuffRef.Length; ++buffIt) { int bsize = size; int bstart = start; BufferRef br = this.rbuffRef[buffIt]; while (bsize > 0) { if (br.totalOffset < 0) { int toSkip = Min(-br.totalOffset, bsize); bsize -= toSkip; bstart += toSkip; br.totalOffset += toSkip; if (bsize <= 0) { break; } } int endOffset = br.totalOffset + bsize; BufferedFPCM buffer = this.buffers[br.idx]; float [] a = buffer.buffer.buffer; // Hmm, tic tac toe int endBuffer = buffer.cachedEnd; // Either going to read to the end of the buffer, or the // end of the requested stream read, whichever comes first int canReadLeft = Min(endOffset, endBuffer) - br.totalOffset; int startingOffset = br.totalOffset - buffer.offset; for (int i = 0; i < canReadLeft; ++i) { data[bstart + i] += br.atten * a[startingOffset + i]; } bstart += canReadLeft; bsize -= canReadLeft; br.totalOffset += canReadLeft; // If there's still more to read, that needs to be done // through the next buffer. if (bsize > 0) { ++br.idx; } } lowestIndexVal = Min(lowestIndexVal, br.idx); this.rbuffRef[buffIt] = br; } // MAINTENENCE, GET RID OF OLD GRODY STUFF //////////////////////////////////////////////////////////////////////////////// if (lowestIndexVal != 0) { for (int i = 0; i < lowestIndexVal; ++i) { this.buffers[i].buffer.Release(); } this.buffers.RemoveRange(0, lowestIndexVal); for (int i = 0; i < this.rbuffRef.Length; ++i) { this.rbuffRef[i].idx -= lowestIndexVal; } } }
public StreamBlockRecord(BufferRef bufferRef) { Version = default; Timestamp = default; BufferRef = bufferRef; }
public void CouldWriteAndReadFromStreamLogAsSeries() { var path = TestUtils.GetPath(); var table = new StreamBlockIndex(path, 16, LMDBEnvironmentFlags.WriteMap | LMDBEnvironmentFlags.NoSync, null); var streamId = (StreamLogId)10000; var series = table.GetBlockRecordSeries(streamId); var count = 1000; for (int i = 0; i < count; i++) { var lr1 = new StreamBlockRecord(BufferRef.Create(0, i + 1)) { Version = (ulong)i + 1, Timestamp = new Timestamp(i + 1) }; Assert.IsTrue(table.TryAddBlockRecord(streamId, lr1)); Assert.AreEqual(i + 1, table.GetBlockCount(streamId)); Assert.AreEqual(i + 1, series.Count()); var first = series.First; Assert.IsTrue(first.IsPresent); Assert.IsTrue(table.TryGetLast(streamId, true, out var lst)); Assert.AreEqual((ulong)i + 1, lst.Version); var last = series.Last; Assert.IsTrue(last.IsPresent); Assert.AreEqual(last.Present.Key, i + 1); Assert.AreEqual(last.Present.Value.BufferRef, lr1.BufferRef); } var j = 1; foreach (var kvp in series) { Assert.AreEqual(kvp.Key, j); Assert.AreEqual(kvp.Value.Version, j); Assert.AreEqual(kvp.Value.BufferRef, BufferRef.Create(0, j)); j++; } Assert.IsTrue(series.First.IsPresent); Assert.IsTrue(series.First.Present.Key == 1); Assert.IsTrue(series.Last.IsPresent); Assert.IsTrue(series.Last.Present.Key == (ulong)count); var c = series.GetEnumerator(); Assert.IsTrue(c.MoveFirst()); Assert.IsTrue(c.CurrentKey == 1); Assert.IsFalse(c.MovePrevious()); Assert.IsTrue(c.MoveNext()); Assert.IsTrue(c.CurrentKey == 2); Assert.IsTrue(c.MoveLast()); Assert.IsTrue(c.CurrentKey == (ulong)count); Assert.IsFalse(c.MoveNext()); Assert.IsTrue(c.MovePrevious()); Assert.IsTrue(c.CurrentKey == (ulong)count - 1); Assert.IsTrue(c.MoveAt(50, Lookup.EQ)); Assert.IsTrue(c.CurrentKey == 50); Assert.IsTrue(c.MoveAt((ulong)count + 10, Lookup.LE)); Assert.IsTrue(c.CurrentKey == (ulong)count); Assert.IsTrue(c.MoveAt(0, Lookup.GE)); Assert.IsTrue(c.CurrentKey == 1); Assert.IsTrue(c.MoveAt(50, Lookup.LE)); Assert.IsTrue(c.CurrentKey == 50); Assert.IsTrue(c.MoveAt(50, Lookup.LT)); Assert.IsTrue(c.CurrentKey == 49); Assert.IsTrue(c.MoveAt(50, Lookup.GE)); Assert.IsTrue(c.CurrentKey == 50); Assert.IsTrue(c.MoveAt(50, Lookup.GT)); Assert.IsTrue(c.CurrentKey == 51); var chunkRecord = c.CurrentValue; chunkRecord.BufferRef = default; var _ = table.UpdateBlockRecord(streamId, chunkRecord); // add more after update for (int i = count; i < count + 100; i++) { var lr1 = new StreamBlockRecord(BufferRef.Create(0, i + 1)) { Version = (ulong)i + 1 }; Assert.IsTrue(table.TryAddBlockRecord(streamId, lr1)); Assert.AreEqual(i + 1, table.GetBlockCount(streamId)); Assert.AreEqual(i + 1, series.Count()); var last = series.Last; Assert.IsTrue(last.IsPresent); Assert.AreEqual(last.Present.Key, i + 1); Assert.AreEqual(last.Present.Value.BufferRef, lr1.BufferRef); } Assert.IsTrue(c.MoveAt(chunkRecord.Version, Lookup.GE)); Assert.IsTrue(c.CurrentValue.BufferRef == default); // ready chunk test var lastVersion = series.Last.Present.Key; var readyChunk = new StreamBlockRecord() { Version = StreamBlockIndex.ReadyBlockVersion }; table.TryAddBlockRecord(streamId, readyChunk); // not visible before update Assert.AreEqual(lastVersion, series.Last.Present.Value.Version); table.TryGetLast(streamId, false, out var lastIsReady); Assert.AreEqual(StreamBlockIndex.ReadyBlockVersion, lastIsReady.Version); //var newVersion = lastVersion + 1; //Assert.IsTrue(table.UpdateReadyChunkVersion(streamId, newVersion)); //Assert.AreEqual(newVersion, series.Last.Present.Value.Version); c.Dispose(); table.Dispose(); }
public void CouldUpdateNextBlock() { var path = TestUtils.GetPath(); var table = new StreamBlockIndex(path, 512, LMDBEnvironmentFlags.WriteMap | LMDBEnvironmentFlags.NoSync, null); var streamId = (StreamLogId)10000; var series = table.GetBlockRecordSeries(streamId); var count = 1000; using (Benchmark.Run("Writes", count)) { for (int i = 0; i < count; i++) { var version = (ulong)i + 1; var chunk = new StreamBlockRecord(BufferRef.Create(0, i + 1)) { Version = StreamBlockIndex.ReadyBlockVersion }; if (!table.TryAddBlockRecord(streamId, chunk)) { Assert.Fail(); } if (!table.TryGetLast(streamId, false, out var lastReady)) { Assert.Fail(); } if (StreamBlockIndex.ReadyBlockVersion != lastReady.Version) { Assert.Fail(); } if (i == 254) { Console.WriteLine("stop"); } if (!table.UpdateReadyBlockVersion(streamId, version)) { Assert.Fail(); } if (i + 1 != table.GetBlockCount(streamId)) { Assert.Fail(); } var last = series.Last; if (!last.IsPresent) { Assert.Fail(); } if (last.Present.Key != (ulong)i + 1) { Assert.Fail(); } if (last.Present.Value.BufferRef != chunk.BufferRef) { Assert.Fail(); } } } Benchmark.Dump(); Console.WriteLine("Finished"); table.Dispose(); }
public void CouldUpdateChunk() { var path = TestUtils.GetPath(); var table = new StreamBlockIndex(path, 512, LMDBEnvironmentFlags.WriteMap | LMDBEnvironmentFlags.NoSync, null); var streamId = (StreamLogId)10000; var series = table.GetBlockRecordSeries(streamId); var count = 1000; using (Benchmark.Run("Writes", count)) { for (int i = 0; i < count; i++) { var version = (ulong)i + 1; var chunk = new StreamBlockRecord(BufferRef.Create(0, i + 1)) { Version = version }; if (!table.TryAddBlockRecord(streamId, chunk)) { Assert.Fail("!table.TryAddChunkRecord(streamId, chunk)"); } if (!table.TryGetLast(streamId, false, out var lastReady)) { Assert.Fail("!table.TryGetLast(streamId, false, out var lastReady)"); } if (version != lastReady.Version) { Assert.Fail("version != lastReady.Version"); } chunk.BufferRef = default; if (!table.UpdateBlockRecord(streamId, chunk)) { Assert.Fail("!table.UpdateChunk(streamId, chunk)"); } if (i + 1 != table.GetBlockCount(streamId)) { Assert.Fail("i + 1 != table.GetChunkCount(streamId)"); } var last = series.Last; if (!last.IsPresent) { Assert.Fail("!last.IsPresent"); } if (last.Present.Key != (ulong)i + 1) { Assert.Fail("last.Present.Key != (ulong)i + 1"); } if (last.Present.Value.BufferRef != chunk.BufferRef) { Assert.Fail("last.Present.Value.BufferRef != chunk.BufferRef"); } if (last.Present.Value.BufferRef != default) { Assert.Fail("last.Present.Value.BufferRef != default"); } } } Benchmark.Dump(); table.Dispose(); }
public void parse(Tag tag) { int size = get_int(tag[0]); int[] ivals = new int[7]; for (int i = 0; i < size; i++) { for (int k = 0; k < 7; k++) { ivals[k] = int.Parse(tag[2 + i * 9 + k].ToString()); } double add_time = get_double(tag[2 + i * 9 + 7]); string user = tag[2 + i * 9 + 8].ToString(); BufferRef bref = new BufferRef(ivals[0], ivals[1], ivals[2], ivals[3], ivals[4], ivals[5], ivals[6], add_time, user); parent_.buffer_refs.Add(bref); } }
public unsafe void BufferAccessBench() { Settings.DoAdditionalCorrectnessChecks = false; var path = TestUtils.GetPath(); var buckets = new SharedMemoryBuckets(path); var bufferCount = 100_000; // c.2800 = 12 MB (L3 cache on i7-8700) using (Benchmark.Run("Init (K)", bufferCount * 1000)) { for (int i = 0; i < bufferCount; i++) { var bi = i + 1; var br = BufferRef.Create(0, bi); var buffer = buckets[br]; buffer.WriteInt32(0, bi); } } var count = 10_000_000; for (int r = 0; r < 10; r++) { using (Benchmark.Run("Access Unsafe", count)) { for (int i = 0; i < count; i++) { var bi = 1 + (i % bufferCount); var br = BufferRef.Create(0, bi); var buffer = buckets.DangerousGet(br); if (buffer.Length != 4096) { Assert.Fail(); } if (bi != *(int *)buffer.Data) { // Assert.Fail($"bi [{bi}] != buffer.ReadInt32(0) [{buffer.ReadInt32(0)}]"); } } } using (Benchmark.Run("Access Safe", count)) { for (int i = 0; i < count; i++) { var bi = 1 + (i % bufferCount); var br = BufferRef.Create(0, bi); var buffer = buckets[br]; if (buffer.Length != 4096) { Assert.Fail(); } if (bi != *(int *)buffer.Data) { // Assert.Fail($"bi [{bi}] != buffer.ReadInt32(0) [{buffer.ReadInt32(0)}]"); } } } } Benchmark.Dump(); buckets.Dispose(); }