// args[0]: dataFile // args[1]: opsFile // args[2]: partitionCount // args[3]: txCountPerExecutor static void YCSBAsyncTestWithMemoryVersionDb(string[] args) { int partitionCount = 4; int executorCount = partitionCount; int txCountPerExecutor = 200000; // 20w string dataFile = "ycsb_data_r.in"; const int recordCount = 1; //100w //string dataFile = "ycsb_data_m_r.in"; //const int recordCount = 1000000; // 500w //string dataFile = "ycsb_data_lg_r.in"; //const int recordCount = 5000000; // 1000w //string dataFile = "ycsb_data_hg_r.in"; //const int recordCount = 10000000; string operationFile = "ycsb_ops_r.in"; if (args.Length > 1) { dataFile = args[0]; operationFile = args[1]; partitionCount = Int32.Parse(args[2]); executorCount = partitionCount; txCountPerExecutor = args.Length > 3 ? Int32.Parse(args[3]) : txCountPerExecutor; } // these three settings are useless in SingletonVersionDb environment. const bool daemonMode = false; string[] tables = { YCSBAsyncBenchmarkTest.TABLE_ID, VersionDb.TX_TABLE }; int currentExecutorCount = 1; RedisVersionDb versionDb = RedisVersionDb.Instance(); // SingletonVersionDb versionDb = SingletonVersionDb.Instance(1); // SingletonPartitionedVersionDb versionDb = SingletonPartitionedVersionDb.Instance(1, true); YCSBAsyncBenchmarkTest test = new YCSBAsyncBenchmarkTest(recordCount, currentExecutorCount, txCountPerExecutor, versionDb, tables); test.Setup(dataFile, operationFile); for (; currentExecutorCount <= partitionCount; currentExecutorCount++) { if (currentExecutorCount > 1) { versionDb.AddPartition(currentExecutorCount); } test.ResetAndFillWorkerQueue(operationFile, currentExecutorCount); test.Run(); test.Stats(); } }
public TPCCAsyncBenchmark(int workerCount, int workloadCountPerWorker, List <List <Tuple <string, int> > > instances = null) { this.workerCount = workerCount; this.workloadCountPerWorker = workloadCountPerWorker; this.redisVersionDb = RedisVersionDb.Instance(); this.executorList = new List <TransactionExecutor>(); if (instances == null || instances.Count > workerCount) { throw new ArgumentException("instances mustn't be null and the size should be smaller or equal to executorCount"); } this.partitionedInstances = instances; }
internal void RunTxOnly() { VersionDb vdb = RedisVersionDb.Instance(); for (int i = 0; i < this.txTaskQueue.Length; i++) { long txId = vdb.InsertNewTx(); //vdb.UpdateCommitLowerBound(txId, 50); //vdb.SetAndGetCommitTime(txId, 90); //vdb.UpdateTxStatus(txId, TxStatus.Committed); this.FinishedTxs++; } }
static void LocalRedisBenchmarkTest() { int count = 100000, pipelineSize = 100; Random rand = new Random(); const string chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; Func <int, string> RandomString = (int length) => { return(new string(Enumerable.Repeat(chars, length) .Select(s => s[rand.Next(s.Length)]).ToArray())); }; Func <int, byte[]> RandomBytes = (int length) => { byte[] value = new byte[length]; rand.NextBytes(value); return(value); }; RedisVersionDb redisVersionDb = RedisVersionDb.Instance(); // Non-Pipeline Mode using (RedisClient client = redisVersionDb.RedisManager.GetClient(3, 0)) { long now = DateTime.Now.Ticks; for (int i = 0; i < count; i++) { string hashId = RandomString(3); byte[] key = BitConverter.GetBytes(3); byte[] value = RandomBytes(50); int type = rand.Next(0, 1); if (type == 0) { client.HSet(hashId, key, value); } else { client.HGet(hashId, key); } } long time = DateTime.Now.Ticks - now; int throughput = (int)((count * 1.0) / (time * 1.0 / 10000000)); Console.WriteLine("Redis Local Non Pipeline Throughput: {0} ops/s", throughput); } // Pipeline Mode using (RedisClient client = redisVersionDb.RedisManager.GetClient(3, 0)) { long now = DateTime.Now.Ticks; int i = 0; while (i < count) { using (IRedisPipeline pipeline = client.CreatePipeline()) { for (int j = 0; j < pipelineSize; j++) { string hashId = RandomString(3); byte[] key = BitConverter.GetBytes(3); byte[] value = RandomBytes(50); int type = rand.Next(0, 1); if (type == 0) { pipeline.QueueCommand( r => ((RedisNativeClient)r).HSet(hashId, key, value)); } else { pipeline.QueueCommand( r => ((RedisNativeClient)r).HGet(hashId, key)); } } pipeline.Flush(); } i += pipelineSize; } long time = DateTime.Now.Ticks - now; int throughput = (int)((count * 1.0) / (time * 1.0 / 10000000)); Console.WriteLine("Redis Local Pipeline({0}) Throughput: {1} ops/s", pipelineSize, throughput); } }
static void YCSBAsyncTestWithRedisVersionDb(string[] args) { BenchmarkTestConfig config = args.Length > 1 ? new BenchmarkTestConfig(args) : new BenchmarkTestConfig(); int partitionCount = config.WorkerCount; int executorCount = partitionCount; int txCountPerExecutor = config.WorkloadCount; int recordCount = config.RecordCount; string[] tables = { YCSBAsyncBenchmarkTest.TABLE_ID, VersionDb.TX_TABLE }; string[] readWriteHosts = new string[] { // config.RedisHost, // "8r285aybUZ7+rQ3QgpoorfFodT6+NMDQsxkdfOHAL9w=@txservice.redis.cache.windows.net:6379", // "xnke5SdHz5xcsBF+OlZPL7PdzI7Vz3De7ntGI2fIye0=@elastas.redis.cache.windows.net:6379", "127.0.0.1:6379", //"127.0.0.1:6380", //"127.0.0.1:6381", //"127.0.0.1:6382", //"127.0.0.1:6383", //"127.0.0.1:6384", //"127.0.0.1:6385", //"127.0.0.1:6386", //"127.0.0.1:6387", //"127.0.0.1:6388", //"127.0.0.1:6389", //"127.0.0.1:6390", //"127.0.0.1:6391", //"10.1.9.8:6380", //"10.1.9.9:6380", //"10.1.9.10:6380", //"10.1.9.7:6380", //"10.1.9.8:6381", //"10.1.9.9:6381", //"10.1.9.10:6381", //"10.1.9.7:6381", }; RedisVersionDb.PARTITIONS_PER_INSTANCE = config.WorkerPerRedisInstance; RedisVersionDb versionDb = RedisVersionDb.Instance(partitionCount, readWriteHosts, RedisVersionDbMode.Partition); if (config.MultiProcessMode) { versionDb.PhysicalTxPartitionByKey = key => { int range = TxRange.GetRange(key); return(range - range / YCSBAsyncBenchmarkTest.RANGE_OFFSET_PER_PROCESS * YCSBAsyncBenchmarkTest.RANGE_OFFSET_PER_PROCESS); }; } YCSBAsyncBenchmarkTest test = new YCSBAsyncBenchmarkTest(recordCount, executorCount, txCountPerExecutor, versionDb, tables, config); test.Setup(null, null); test.Run(); test.Stats(); }