/// <summary> /// Async operations on FasterKV /// </summary> static async Task AsyncOperator(int id) { using var session = faster.For(new CacheFunctions()).NewSession<CacheFunctions>(id.ToString()); Random rand = new Random(id); bool batched = true; await Task.Yield(); var context = new CacheContext(); if (!batched) { // Single commit version - upsert each item and wait for commit // Needs high parallelism (NumParallelTasks) for perf // Needs separate commit thread to perform regular checkpoints while (true) { try { var key = new CacheKey(rand.Next()); var value = new CacheValue(rand.Next()); session.Upsert(ref key, ref value, context); await session.WaitForCommitAsync(); Interlocked.Increment(ref numOps); } catch (Exception ex) { Console.WriteLine($"{nameof(AsyncOperator)}({id}): {ex}"); } } } else { // Batched version - we enqueue many entries to memory, // then wait for commit periodically int count = 0; while (true) { var key = new CacheKey(rand.Next()); var value = new CacheValue(rand.Next()); session.Upsert(ref key, ref value, context); if (count++ % 100 == 0) { await session.WaitForCommitAsync(); Interlocked.Add(ref numOps, 100); } } } }
/// <summary> /// Async operations on FasterKV /// </summary> static async Task AsyncOperator(int id) { using var session = faster.For(new CacheFunctions()).NewSession <CacheFunctions>(id.ToString()); Random rand = new(id); bool batched = true; // whether we batch upserts on session bool asyncUpsert = false; // whether we use sync or async upsert calls bool waitForCommit = false; // whether we wait for commit after each operation (or batch) on this session int batchSize = 100; // batch size await Task.Yield(); var context = new CacheContext(); var taskBatch = new ValueTask <FasterKV <CacheKey, CacheValue> .UpsertAsyncResult <CacheInput, CacheOutput, CacheContext> > [batchSize]; long seqNo = 0; if (!batched) { // Single upsert at a time, optionally waiting for commit // Needs high parallelism (NumParallelTasks) for perf // Separate commit thread performs regular checkpoints while (true) { try { var key = new CacheKey(rand.Next()); var value = new CacheValue(rand.Next()); if (asyncUpsert) { var r = await session.UpsertAsync(ref key, ref value, context, seqNo ++); while (r.Status.IsPending) { r = await r.CompleteAsync(); } } else { session.Upsert(ref key, ref value, context, seqNo++); } if (waitForCommit) { await session.WaitForCommitAsync(); } Interlocked.Increment(ref numOps); } catch (Exception ex) { Console.WriteLine($"{nameof(AsyncOperator)}({id}): {ex}"); } } } else { // Batched version - we enqueue many entries to memory, // then wait for commit periodically int count = 0; while (true) { var key = new CacheKey(rand.Next()); var value = new CacheValue(rand.Next()); if (asyncUpsert) { taskBatch[count % batchSize] = session.UpsertAsync(ref key, ref value, context, seqNo++); } else { session.Upsert(ref key, ref value, context, seqNo++); } if (count++ % batchSize == 0) { if (asyncUpsert) { for (int i = 0; i < batchSize; i++) { var r = await taskBatch[i]; while (r.Status.IsPending) { r = await r.CompleteAsync(); } } } if (waitForCommit) { await session.WaitForCommitAsync(); } Interlocked.Add(ref numOps, batchSize); } } } }