private static void PopulateStore(int count) { using var s = h.For(new CacheFunctions(sizeTracker)).NewSession <CacheFunctions>(); Random r = new Random(0); Console.WriteLine("Writing random keys to fill cache"); for (int i = 0; i < count; i++) { int k = r.Next(DbSize); var key = new CacheKey(k, 1 + r.Next(MaxKeySize - 1)); var value = new CacheValue(1 + r.Next(MaxValueSize - 1), (byte)key.key); s.Upsert(ref key, ref value); } }
private static void RandomWorkload(int threadid) { Console.WriteLine("Issuing {0} random read workload of {1} reads from thread {2}", UseUniform ? "uniform" : "zipf", DbSize, threadid); using var session = h.For(new CacheFunctions(sizeTracker)).NewSession <CacheFunctions>(); var rnd = new Random(threadid); var zipf = new ZipfGenerator(rnd, DbSize, Theta); CacheValue output = default; int localStatusFound = 0, localStatusNotFound = 0; int i = 0; while (true) { if ((i % 256 == 0) && (i > 0)) { Interlocked.Add(ref statusFound, localStatusFound); Interlocked.Add(ref statusNotFound, localStatusNotFound); Interlocked.Add(ref totalReads, 256); localStatusFound = localStatusNotFound = 0; } int op = WritePercent == 0 ? 0 : rnd.Next(100); long k = UseUniform ? rnd.Next(DbSize) : zipf.Next(); var key = new CacheKey(k, 1 + rnd.Next(MaxKeySize - 1)); if (op < WritePercent) { var value = new CacheValue(1 + rnd.Next(MaxValueSize - 1), (byte)key.key); session.Upsert(ref key, ref value); } else { var status = session.Read(ref key, ref output); switch (status) { case Status.NOTFOUND: localStatusNotFound++; if (UpsertOnCacheMiss) { var value = new CacheValue(1 + rnd.Next(MaxValueSize - 1), (byte)key.key); session.Upsert(ref key, ref value); } break; case Status.OK: localStatusFound++; if (output.value[0] != (byte)key.key) { throw new Exception("Read error!"); } break; default: throw new Exception("Error!"); } } i++; } }
private static void RandomWorkload(int threadid) { Console.WriteLine("Issuing {0} random read workload of {1} reads from thread {2}", UseUniform ? "uniform" : "zipf", DbSize, threadid); using var session = h.For(new CacheFunctions()).NewSession <CacheFunctions>(); var rnd = new Random(threadid); var zipf = new ZipfGenerator(rnd, DbSize, Theta); int statusNotFound = 0; int statusFound = 0; CacheValue output = default; int i = 0; while (true) { if ((i % 256 == 0) && (i > 0)) { Interlocked.Add(ref totalReads, 256); if (i % (1024 * 1024 * 16) == 0) // report after every 16M ops { Console.WriteLine("Hit rate: {0:N2}; Evict count: {1}", statusFound / (double)(statusFound + statusNotFound), LogObserver.EvictCount); } } int op = WritePercent == 0 ? 0 : rnd.Next(100); long k = UseUniform ? rnd.Next(DbSize) : zipf.Next(); var key = new CacheKey(k); if (op < WritePercent) { var value = new CacheValue(k); session.Upsert(ref key, ref value); } else { var status = session.Read(ref key, ref output); switch (status) { case Status.NOTFOUND: statusNotFound++; if (UpsertOnCacheMiss) { var value = new CacheValue(k); session.Upsert(ref key, ref value); } break; case Status.OK: statusFound++; if (output.value != key.key) { throw new Exception("Read error!"); } break; default: throw new Exception("Error!"); } } i++; } }
private static void RandomWorkload(int threadid) { Console.WriteLine("Issuing {0} random read workload of {1} reads from thread {2}", UseUniform ? "uniform" : "zipf", DbSize, threadid); using var session = h.For(new CacheFunctions(sizeTracker)).NewSession <CacheFunctions>(); var rnd = new Random(threadid); var zipf = new ZipfGenerator(rnd, DbSize, Theta); int statusNotFound = 0; int statusFound = 0; CacheValue output = default; int i = 0; while (true) { if ((i % 256 == 0) && (i > 0)) { Interlocked.Add(ref totalReads, 256); if (i % (1024 * 1024 * 16) == 0) // report after every 16M ops { Console.WriteLine("Hit rate: {0:N2}; Memory footprint: {1:N2}KB", statusFound / (double)(statusFound + statusNotFound), sizeTracker.TotalSize / (double)1024); // GC -> report -> pause to verify accurate memory reporting /* * GC.Collect(); * GC.WaitForFullGCComplete(); * Console.WriteLine("Hit rate: {0:N2}; Memory footprint: {1:N2}KB", statusFound / (double)(statusFound + statusNotFound), sizeTracker.TotalSize / (double)1024); * Thread.Sleep(1000000); */ } } int op = WritePercent == 0 ? 0 : rnd.Next(100); long k = UseUniform ? rnd.Next(DbSize) : zipf.Next(); var key = new CacheKey(k, 1 + rnd.Next(MaxKeySize - 1)); if (op < WritePercent) { var value = new CacheValue(1 + rnd.Next(MaxValueSize - 1), (byte)key.key); session.Upsert(ref key, ref value); } else { var status = session.Read(ref key, ref output); switch (status) { case Status.NOTFOUND: statusNotFound++; if (UpsertOnCacheMiss) { var value = new CacheValue(1 + rnd.Next(MaxValueSize - 1), (byte)key.key); session.Upsert(ref key, ref value); } break; case Status.OK: statusFound++; if (output.value[0] != (byte)key.key) { throw new Exception("Read error!"); } break; default: throw new Exception("Error!"); } } i++; } }