static void document_db_test() { Console.WriteLine("======================= Document DB Test =============================="); LayerManager raw_db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\main"); StepsDatabase db_broker = new StepsDatabase(raw_db); IStepsDocumentDB doc_db = db_broker.getDocumentDatabase(); doc_db.ensureIndex( new string[] { "name" } ); doc_db.ensureIndex(new string[] { "age"}); doc_db.Insert(new BsonDocument { { "_id" , "user1" }, { "name" , "David" }, { "age", 60 } }); doc_db.Insert(new BsonDocument { { "_id" , "user2" }, { "name" , "Tom" }, { "age", 32 } }); doc_db.Insert(new BsonDocument { { "_id" , "user3" }, { "name" , "Tom" }, { "age", 32 } }); raw_db.debugDump(); int count=0; foreach (var doc in doc_db.Find(new BsonDocument() )) { Console.WriteLine(" [{0}] = {1}", count++, doc.ToJson()); } var change_spec = new BsonDocument{ { "$inc" , new BsonDocument { { "age", 1 } } } }; Console.WriteLine("change spec = " + change_spec.ToJson()); doc_db.Update(new BsonDocument(), change_spec); raw_db.debugDump(); foreach (var doc in doc_db.Find(new BsonDocument () )) { Console.WriteLine(" [{0}] = {1}", count++, doc.ToJson()); } }
public static void waitUntilState(LayerManager db, ReplHandler srvr, ReplState state) { for (int x = 0; x < 20; x++) { if (srvr.State == state) { break; } Console.WriteLine("waiting for ({0}) to become {1}.. (currently: {2})", srvr.ToString(),state, srvr.State); Thread.Sleep(1000); } if (srvr.State != state) { db.debugDump(); Console.WriteLine("server({0}) failed to become {1}, aborting test", srvr.ToString(), state); Environment.Exit(1); } Console.WriteLine("Server ({0}) is now {1}!", srvr, state); }
public void T000_TestBasic_SnapshotTombstones() { var raw_db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\snapts"); var snap_db = new StepsStageSnapshot( new StepsStageSubset( new RecordKeyType_String("snapdb"), raw_db)); snap_db.setValue(new RecordKey().appendParsedKey("b/1"), RecordUpdate.DeletionTombstone()); snap_db.setValue(new RecordKey().appendParsedKey("a/1"), RecordUpdate.WithPayload("data1")); var snapshot = snap_db.getSnapshot(); snap_db.setValue(new RecordKey().appendParsedKey("a/1"), RecordUpdate.DeletionTombstone()); raw_db.debugDump(); int count = 0; foreach (var row in snap_db.scanForward(ScanRange<RecordKey>.All())) { Console.WriteLine("found record: " + row); count++; } Assert.AreEqual(0, count, "deletion tombstones didn't work in snapshot"); }
public void T04_SingleSegmentRootMetadataLogRecovery() { // TEST: test multiple segments flushed, and "log resumed" (walk .ROOT range map) // perform the previous test T03_SegmentLayerGetRecordApplicationOrder(); // ... and then perform a resume LayerManager db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\6"); String[] keys = { "test-1", "test-2", "test-3" }; String[] values = { "a-second", "b-second", "c-second" }; // verify that it has the same data as before the RESUME { // working segment should be empty for (int i = 0; i < keys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(keys[i]); // look directly in the working segment, they should be MISSING // This is testing the checkpoint as well. If log resume didn't // CHECKPOINT_DROP, then the values will be duplicated in the working segment. { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING"); } // assure the global query interface finds the NEW VALUES { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES"); Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES"); } } // now generate a BUNCH of new segments... { String[] secondkeys = { "second-test-1", "second-test-2", "second-test-3" }; String[] secondvalues = { "a-second", "b-second", "c-second" }; // put each new record in its OWN segment for (int i = 0; i < secondkeys.Length; i++) { LayerWriteGroup txn = db.newWriteGroup(); txn.setValueParsed(secondkeys[i], secondvalues[i]); txn.finish(); db.flushWorkingSegment(); } db.Dispose(); db.debugDump(); // RESUME db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\6"); // first test records should still be visible for (int i = 0; i < keys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(keys[i]); // look directly in the working segment, they should be MISSING // This is testing the checkpoint as well. If log resume didn't // CHECKPOINT_DROP, then the values will be duplicated in the working segment. { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING {0}", key); } // assure the global query interface finds the NEW VALUES { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES : {0}", key); Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES : {0}", key); } } db.debugDump(); // verify that the secondkeys/values are still in there for (int i = 0; i < secondkeys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(secondkeys[i]); // look directly in the working segment, they should be MISSING // This is testing the checkpoint as well. If log resume didn't // CHECKPOINT_DROP, then the values will be duplicated in the working segment. { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING"); } // assure the global query interface finds the NEW VALUES { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES, where is: " + key); Assert.AreEqual(secondvalues[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES"); } } } db.Dispose(); } }
public void T02_LayerSegmentFlushAndFreespaceModification() { String[] keys = { "test-1", "test-2", "test-3" }; String[] values = { "a", "b", "c" }; System.Text.ASCIIEncoding enc = new System.Text.ASCIIEncoding(); LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\5"); db.debugDump(); LayerWriteGroup txn = db.newWriteGroup(); for (int i = 0; i < keys.Length; i++) { txn.setValueParsed(keys[i], values[i]); } txn.finish(); db.flushWorkingSegment(); db.debugDump(); // assure that we checkpointed down to a single working segment Assert.AreEqual(1, db.segmentlayers.Count, "segment layer count"); // assure we allocated a new generation and gen0 range record (walk .ROOT range map) // TODO: move this test to RangemapManager, to remove this cross-dependency { RecordData data; RecordKey key = new RecordKey().appendParsedKey(".ROOT/VARS/NUMGENERATIONS"); Assert.AreEqual(GetStatus.PRESENT,db.getRecord(key, out data),"missing numgenerations record"); Assert.AreEqual("1", data.ReadDataAsString(),"generation count 1"); RecordUpdate update; Assert.AreEqual(GetStatus.PRESENT, db.workingSegment.getRecordUpdate(key, out update), "missing workingsegment numgenerations record"); Assert.AreEqual("1", enc.GetString(update.data), "generation count 2"); } #if false if (false) { RecordData data; Assert.AreEqual( GetStatus.PRESENT, db.getRecord(new RecordKey() .appendParsedKey(".ROOT/GEN") .appendKeyPart(new RecordKeyType_Long(0)) .appendKeyPart("</>"), out data), ".ROOT/GEN/0/</> key is missing"); } #endif // TODO: assure we subtracted the new range record from the freespace // assure the records we wrote are NOT in the working segment, but ARE in the next layer for (int i = 0; i < keys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(keys[i]); // look directly in the working segment, they should be **MISSING* { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.MISSING, status, "working segment should be clear"); } // assure the global query interface finds it { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "records should be found in layers, {0} missing", key); Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord()"); } } db.Dispose(); }
public void T001_MultiWorkingSegmentReadWrite() { LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\3"); var rk = new RecordKey().appendParsedKey(".data/a"); db.setValueParsed(".data/a", "1"); KeyValuePair<RecordKey, RecordData> record; try { record = db.FindNext(rk, true); Assert.AreEqual(rk, record.Key, "fetched key does not match"); } catch (KeyNotFoundException) { Assert.Fail("couldn't find 'a' record"); } int found_recs = 0; var scan_range = new ScanRange<RecordKey>(rk, RecordKey.AfterPrefix(rk), null); foreach (var row in db.scanForward(scan_range)) { found_recs++; } Assert.AreEqual(1, found_recs, "found the wrong number of records in working segment scan!"); db.DEBUG_addNewWorkingSegmentWithoutFlush(); db.setValueParsed(".data/b", "2"); Console.WriteLine(""); Console.WriteLine("--- contents --"); db.debugDump(); Console.WriteLine(""); // ------------------------------ try { var rkb = new RecordKey().appendParsedKey(".data/b"); record = db.FindNext(rkb, true); Assert.AreEqual(rkb, record.Key, "fetched key does not match (after flush)"); } catch (KeyNotFoundException) { Assert.Fail("couldn't find 'b' record"); } found_recs = 0; var rk_prefix = new RecordKey().appendParsedKey(".data"); foreach (var row in db.scanForward( new ScanRange<RecordKey>(rk_prefix, RecordKey.AfterPrefix(rk_prefix), null))) { found_recs++; } Assert.AreEqual(2, found_recs, "found the wrong number of records after working segment addition !"); }
public static void do_bringup_test(DbgGUI win) { LayerManager db = new LayerManager(InitMode.NEW_REGION,"c:\\BENDtst\\main"); db.setValueParsed("test/3","a"); db.setValueParsed("test/2","b"); db.setValueParsed("test/1","c"); db.debugDump(); db.flushWorkingSegment(); // this will flush and read the current segment Console.WriteLine("--- after flush"); db.debugDump(); dumpMergeCandidates(db); Console.WriteLine("--- check record read"); RecordData data; GetStatus status = db.getRecord(new RecordKey().appendParsedKey("test/3"), out data); System.Console.WriteLine("getRecord({0}) returned {1}", "test/3", data.ToString()); Console.WriteLine("--- make lots of segments"); db.setValueParsed("test/4", "d"); db.flushWorkingSegment(); db.setValueParsed("test/5", "e"); db.flushWorkingSegment(); db.setValueParsed("test/6", "f"); db.flushWorkingSegment(); db.debugDump(); System.Console.WriteLine("-------- dump keys ---------------------"); dumpAllDbRecords(db); dumpMergeCandidates(db); System.Console.WriteLine("-------- draw graphical debug ---------------------"); win.debugDump(db); System.Console.WriteLine("-------- PERFORMING A SINGLE MERGE ---------------------"); MergeCandidate mc; mc = db.rangemapmgr.mergeManager.getBestCandidate(); System.Console.WriteLine("MERGE :" + mc); db.performMerge(mc); dumpMergeCandidates(db); db.flushWorkingSegment(); db.debugDump(); dumpSegmentList(db); win.debugDump(db); System.Console.WriteLine("-------- SINGLE MERGE DONE, close/dispose ---------------------"); dumpSegmentList(db); dumpMergeCandidates(db); db.debugDump(); db.Dispose(); System.Console.WriteLine("-------- NOW RESUME ---------------------------------"); db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\main"); dumpSegmentList(db); dumpMergeCandidates(db); win.debugDump(db); db.debugDump(); System.Console.WriteLine("-------- NOW FINDNEXT ---------------------------------"); dumpAllDbRecords(db); win.debugDump(db); System.Console.WriteLine("-------- NOW MERGE ALL SEGMENTS ---------------------------------"); dumpSegmentList(db); db.mergeAllSegments(); db.debugDump(); win.debugDump(db); // stop(); // ------------------------- (( S T O P )) --------------------------------- System.Console.WriteLine("-------- NOW FINDNEXT (after merge) ---------------------------------"); dumpAllDbRecords(db); //System.Console.WriteLine("-------- Now run Readthreads Test ---------------------------------"); //A03_LayerManagerTests test = new A03_LayerManagerTests(); //test.T10_LayerManager_ReadThreads(); dumpMergeCandidates(db); win.debugDump(db); db.Dispose(); System.Console.WriteLine("-------- Write ALOT of data ---------------------------------"); int keysize = 20 ; int keycount = 1000000; int flush_period = 40000; int commit_period = 1000; bool random_order = true; double elapsed; DateTime start = DateTime.Now; int record_count = 0; db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\bigtest"); String value = ""; String keybase = "TestValueDataABC"; for (int x = 0; x < keysize / keybase.Length; x++) { value = value + keybase; } int seed = (int)DateTime.Now.Ticks; Random rnd = new Random(seed); System.Console.WriteLine("*** RANDOM SEED: " + seed); var write_group = db.newWriteGroup(); for (int x = 10000001; x < 10000001 + keycount; x++) { if (random_order) { write_group.setValueParsed("test/rnd/" + rnd.Next(), value); } else { write_group.setValueParsed("test/ordered/" + x, value); } record_count++; if (x % commit_period == 0) { write_group.finish(); write_group = db.newWriteGroup(); } if (x % flush_period == 0) { System.Console.WriteLine("*** RANDOM SEED: " + seed); write_group.finish(); write_group = db.newWriteGroup(); System.Console.WriteLine("start % 1000 cycle.."); db.flushWorkingSegment(); db.freespacemgr.debugDumbCurrentFreespace(); win.debugDump(db); dumpMergeCandidates(db); for (int mx = 0; mx < 30; mx++) { mc = db.rangemapmgr.mergeManager.getBestCandidate(); if (mc == null) { break; } if (mc.score() > (1.6 + (float)db.rangemapmgr.mergeManager.getMaxGeneration()/12.0f)) { System.Console.WriteLine("** best merge score too high: " + mc); break; } System.Console.WriteLine("merge " + mx + " : " + mc); win.debugDump(db, mc); db.performMerge(mc); System.Console.WriteLine("mergedone " + mx + " : " + mc); dumpSegmentList(db); dumpMergeCandidates(db); win.debugDump(db); } elapsed = (DateTime.Now - start).TotalMilliseconds / 1000.0; System.GC.Collect(); Console.WriteLine("*************************************************"); Console.WriteLine("*************************************************"); Console.WriteLine("*** merge cycle done {0} records so far, in {1} total time, {2} records/second", record_count,elapsed, (double)record_count/elapsed); Console.WriteLine("*************************************************"); Console.WriteLine("*************************************************"); } } System.Console.WriteLine("-------- Merge a bunch more ------------------"); for (int x = 0; x < 30; x++) { mc = db.rangemapmgr.mergeManager.getBestCandidate(); System.Console.WriteLine("merge : " + mc); if (mc == null) break; win.debugDump(db, mc); db.performMerge(mc); dumpSegmentList(db); dumpMergeCandidates(db); win.debugDump(db,null); } dumpSegmentList(db); elapsed = (DateTime.Now - start).TotalMilliseconds / 1000.0; Console.WriteLine("*************************************************"); Console.WriteLine("*************************************************"); Console.WriteLine("*** merge cycle done {0} records so far, in {1} total time, {2} records/second", record_count, elapsed, (double)record_count / elapsed); Console.WriteLine("*************************************************"); Console.WriteLine("*************************************************"); System.Console.WriteLine("** done."); Environment.Exit(0); }
static void do_test() { Console.WriteLine("ReplTest1 startup..."); LayerManager raw_db = new LayerManager(InitMode.NEW_REGION, @"C:\BENDtst\repl"); StepsDatabase db_factory = new StepsDatabase(raw_db); Console.WriteLine("----------------[ init two servers together, write some records ]-----------------"); ReplHandler repl_1 = db_factory.getReplicatedDatabase_Fresh("guid1"); waitUntilActive(raw_db, repl_1); repl_1.setValueParsed("a/1", "1"); ReplHandler repl_2 = db_factory.getReplicatedDatabase_Join("guid2", repl_1.getServerGuid()); waitUntilActive(raw_db, repl_2); repl_2.setValueParsed("a/2", "5"); Console.WriteLine("-----------------"); raw_db.debugDump(); repl_2.setValueParsed("a/2", "6"); Thread.Sleep(7000); raw_db.debugDump(); Console.WriteLine("-----------------[ remove one server, write some records ]----------------"); repl_2.Shutdown(); // wait until repl2 is really shutdown waitUntilState(raw_db, repl_2, ReplState.shutdown); // make sure our log does not continue from repl_2 logs repl_1.setValueParsed("c/1", "10"); repl_1.truncateLogs_Hack(); raw_db.debugDump(); Console.WriteLine("----------------[ reinit server 2 ]-----------------------------"); repl_2 = db_factory.getReplicatedDatabase_Resume("guid2"); waitUntilActive(raw_db, repl_2); Thread.Sleep(7000); raw_db.debugDump(); Thread.Sleep(7000); // Environment.Exit(1); // exit repl_2.setValueParsed("d/1", "20"); repl_1.setValueParsed("c/1", "10"); Thread.Sleep(1000); repl_2.truncateLogs_Hack(); Thread.Sleep(1000); repl_1.truncateLogs_Hack(); Console.WriteLine("----------------[ both logs should be truncated ]-----------------------------"); raw_db.debugDump(); Console.WriteLine("----------------[ create server 3 ]-----------------------------"); ReplHandler repl_3 = db_factory.getReplicatedDatabase_Join("guid3", "guid2"); Thread.Sleep(7000); raw_db.debugDump(); repl_3.setValueParsed("q/1", "10"); Thread.Sleep(7000); raw_db.debugDump(); Console.WriteLine("quitting.."); Environment.Exit(0); }
static void snapshot_test() { Console.WriteLine("======================= Snapshot DB Test =============================="); LayerManager raw_db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\main"); StepsDatabase db_broker = new StepsDatabase(raw_db); IStepsKVDB generic_db = db_broker.getSnapshotDatabase(); StepsStageSnapshot db = (StepsStageSnapshot)generic_db; db.setValue(new RecordKey().appendParsedKey("test/1"), RecordUpdate.WithPayload("blah-t0")); IStepsKVDB db_snap = db.getSnapshot(); db.setValue(new RecordKey().appendParsedKey("test/1"), RecordUpdate.WithPayload("blah-t1")); var key = new RecordKey().appendParsedKey("test/1"); raw_db.debugDump(); Console.WriteLine("-------------------[ top level readback ] -------------------"); foreach (var rec in db.scanForward(new ScanRange<RecordKey>(key,new ScanRange<RecordKey>.maxKey(),null))) { Console.WriteLine(rec); } Console.WriteLine("-------------------[ snapshot readback ] -------------------"); foreach (var rec in db_snap.scanForward(new ScanRange<RecordKey>(key, new ScanRange<RecordKey>.maxKey(), null))) { Console.WriteLine(rec); } }