private static void printTestBsonDocument(POCTestOptions testOpts) { //Sets up sample data don't remove int[] arr = new int[2]; arr[0] = testOpts.arrays[0]; arr[1] = testOpts.arrays[1]; var tr = new TestRecord(testOpts.numFields, testOpts.depth, testOpts.textFieldLen, 1, 12345678, POCTestOptions.NUMBER_SIZE, arr, testOpts.blobSize); //Console.Out.WriteLine(tr); String json = tr.internalDoc.ToJson(); StringBuilder newJson = new StringBuilder(); int arrays = 0; // Collapse inner newlines Boolean inquotes = false; for (int c = 0; c < json.Length; c++) { char inChar = json[c]; if (inChar == '[') { arrays++; } if (inChar == ']') { arrays--; } if (inChar == '"') { inquotes = !inquotes; } if (arrays > 1 && inChar == '\n') { continue; } if (arrays > 1 && !inquotes && inChar == ' ') { continue; } newJson.Append(json[c]); } logger.Info(newJson.ToString()); byte[] bsonBytes = tr.internalDoc.ToBson(); long length = bsonBytes.LongLength; logger.Info(String.Format("Records are {0:0.##} KB each as BSON", (float)length / 1024)); }
private void updateSingleRecord(List <WriteModel <BsonDocument> > bulkWriter, BsonDocument key) { // Key Query rotateCollection(); BsonDocument query = new BsonDocument(); BsonDocument change; if (key == null) { int range = sequence * testOpts.workingset / 100; int rest = sequence - range; int recordno = rest + getNextVal(range); query.Add("_id", new BsonDocument("w", workerID).Add("i", recordno)); } else { query.Add("_id", key); } int updateFields = (testOpts.updateFields <= testOpts.numFields) ? testOpts.updateFields : testOpts.numFields; if (updateFields == 1) { long changedfield = getNextVal((int)POCTestOptions.NUMBER_SIZE); BsonDocument fields = new BsonDocument("fld0", changedfield); change = new BsonDocument("$set", fields); } else { TestRecord tr = createNewRecord(); tr.internalDoc.Remove("_id"); change = new BsonDocument("$set", tr.internalDoc); } if (!testOpts.findandmodify) { bulkWriter.Add(new UpdateManyModel <BsonDocument>(query, change)); } else { coll.FindOneAndUpdate(query, change); //These are immediate not batches } testResults.RecordOpsDone("updates", 1); }
private void PrepareSystem(POCTestOptions testOpts, POCTestResults results) { IMongoDatabase db; IMongoCollection <BsonDocument> coll; //Create indexes and suchlike db = mongoClient.GetDatabase(testOpts.namespaces[0]); coll = db.GetCollection <BsonDocument>(testOpts.namespaces[1]); if (testOpts.emptyFirst) { db.DropCollection(testOpts.namespaces[1]); } TestRecord testRecord = new TestRecord(testOpts); List <String> fields = testRecord.listFields(); for (int x = 0; x < testOpts.secondaryidx; x++) { coll.Indexes.CreateOne(new BsonDocument(fields[x], 1)); } if (testOpts.fulltext) { var options = new CreateIndexOptions(); options.Background = true; var weights = new BsonDocument(); weights.Add("lorem", 15); weights.Add("_fulltext.text", 5); options.Weights = weights; var index = new BsonDocument(); index.Add("$**", "text"); coll.Indexes.CreateOne(index, options); } results.initialCount = coll.Count(new BsonDocument()); //Now have a look and see if we are sharded //And how many shards and make sure that the collection is sharded if (!testOpts.singleserver) { ConfigureSharding(testOpts); } }
public void run(Object arg) { // Use a bulk inserter - even if ony for one List <WriteModel <BsonDocument> > bulkWriter; try { bulkWriter = new List <WriteModel <BsonDocument> >(); int bulkops = 0; int c = 0; logger.Info("Worker thread " + workerID + " Started."); while (testResults.GetSecondsElapsed() < testOpts.duration) { if (isCancelled) { break; } c++; //Timer isn't granullar enough to sleep for each if (testOpts.opsPerSecond > 0) { double threads = testOpts.numThreads; double opsperthreadsecond = testOpts.opsPerSecond / threads; double sleeptimems = 1000 / opsperthreadsecond; if (c == 1) { //First time randomise Random r = new Random(); sleeptimems = r.Next((int)Math.Floor(sleeptimems)); } Thread.Sleep((int)Math.Floor(sleeptimems)); } if (!workflowed) { // Console.Out.WriteLine("Random op"); // Choose the type of op int allops = testOpts.insertops + testOpts.keyqueries + testOpts.updates + testOpts.rangequeries + testOpts.arrayupdates; int randop = getNextVal(allops); if (randop < testOpts.insertops) { insertNewRecord(bulkWriter); bulkops++; } else if (randop < testOpts.insertops + testOpts.keyqueries) { simpleKeyQuery(); } else if (randop < testOpts.insertops + testOpts.keyqueries + testOpts.rangequeries) { rangeQuery(); } else { // An in place single field update // fld 0 - set to random number updateSingleRecord(bulkWriter); if (!testOpts.findandmodify) { bulkops++; } } } else { // Following a preset workflow String wfop = workflow.Substring(workflowStep, workflowStep + 1); // Console.Out.WriteLine("Executing workflow op [" + workflow + // "] " + wfop); if (wfop.Equals("i")) { // Insert a new record, push it's key onto our stack TestRecord r = insertNewRecord(bulkWriter); keyStack.Add((BsonDocument)r.internalDoc.GetValue("_id")); bulkops++; // Console.Out.WriteLine("Insert"); } else if (wfop.Equals("u")) { if (keyStack.Count > 0) { updateSingleRecord(bulkWriter, keyStack[keyStack.Count - 1]); // Console.Out.WriteLine("Update"); if (!testOpts.findandmodify) { bulkops++; } } } else if (wfop.Equals("p")) { // Pop the top thing off the stack if (keyStack.Count > 0) { keyStack.RemoveAt(keyStack.Count - 1); } } else if (wfop.Equals("k")) { // Find a new record an put it on the stack BsonDocument r = simpleKeyQuery(); if (r != null) { keyStack.Add((BsonDocument)r.GetValue("_id")); } } // If we have reached the end of the wfops then reset workflowStep++; if (workflowStep >= workflow.Length) { workflowStep = 0; keyStack = new List <BsonDocument>(); } } if (c % testOpts.batchSize == 0) { if (bulkops > 0) { flushBulkOps(bulkWriter); bulkWriter.Clear(); bulkops = 0; // Check and see if we need to rejig sharding if (numShards != testOpts.numShards) { ReviewShards(); } } } } } catch (Exception e) { logger.Error("Error: " + e.Message); if (testOpts.debug) { logger.Debug(e.StackTrace); } } }