public void T05_TestBackgroundFlushAndMergeConsistency() { // (1) one key to each of NUM_SEGMENTS separate segments // (2) setup a separate thread that just repeatedly checks each key // (3) perform a merge // (4) shutdown the threads and see if any detected a readback failure LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\7"); List<ValueCheckerThread> checkers = new List<ValueCheckerThread>(); int NUM_SEGMENTS = 7; int performed_iterations = 0; int TARGET_ITERATIONS = 10; try { for (int iter = 0; iter < TARGET_ITERATIONS; iter++) { // setup the initial keys and checker threads for (int x = 0; x < NUM_SEGMENTS; x++) { string key = "test-" + x; string value = "test-value-" + x; db.setValueParsed(key, value); // put it in it's own segment db.flushWorkingSegment(); // start a checking thread ValueCheckerThread checker = new ValueCheckerThread(db, key, value); Thread newthread = new Thread(checker.doValidate); newthread.Start(); checkers.Add(checker); } Thread.Sleep(5); // verify there are no errors foreach (ValueCheckerThread checker in checkers) { Console.WriteLine("Thread key:{0} checks:{1} errors:{2}", checker.key_to_check, checker.num_checks, checker.num_errors); } foreach (ValueCheckerThread checker in checkers) { Assert.AreEqual(0, checker.num_errors, "checker thread error, key(" + checker.key_to_check + ") error count != 0"); } // trigger a merge for (int x = 0; x < 20; x++) { db.mergeIfNeeded(); } Thread.Sleep(5); // verify there are no errors foreach (ValueCheckerThread checker in checkers) { Console.WriteLine("Thread key:{0} checks:{1} errors:{2}", checker.key_to_check, checker.num_checks, checker.num_errors); } foreach (ValueCheckerThread checker in checkers) { Assert.AreEqual(0, checker.num_errors, "checker thread error, key:" + checker.key_to_check); } // end the threads.. foreach (ValueCheckerThread checker in checkers) { checker.end(); } Thread.Sleep(10); foreach (ValueCheckerThread checker in checkers) { checker.waitForEnd(); } checkers.Clear(); Thread.Sleep(5); // delete the keys System.Console.WriteLine("======= Clearing for next run..."); for (int x = 0; x < NUM_SEGMENTS; x++) { string key = "test-" + x; string value = "test-value-" + x; db.setValue(new RecordKey().appendParsedKey(key), RecordUpdate.DeletionTombstone()); } db.flushWorkingSegment(); Thread.Sleep(5); db.mergeIfNeeded(); Thread.Sleep(5); System.Console.WriteLine("======= Cleared for next run..."); performed_iterations = iter; } } finally { System.Console.WriteLine("performed iterations = " + performed_iterations + " target iterations = " + TARGET_ITERATIONS); } }
internal ReadThreadsTest(int rec_count, int rec_per_segment) { this.TEST_RECORD_COUNT = rec_count; this.RECORDS_PER_SEGMENT = rec_per_segment; System.GC.Collect(); db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\10"); testdata = new SortedDictionary<string, string>(); testrows = new SortedDictionary<RecordKey, RecordUpdate>(); // generate some data for (int i=0;i<TEST_RECORD_COUNT;i++) { string key = "test/" + i.ToString(); string value = "data: " + key; testdata[key] = value; RecordKey rkey = new RecordKey().appendParsedKey(key); RecordUpdate rupdate = RecordUpdate.WithPayload(value); testrows[rkey] = rupdate; } // fill the db with some data. int pos = 0; foreach (KeyValuePair<RecordKey,RecordUpdate> kvp in testrows) { LayerWriteGroup txn = db.newWriteGroup(); txn.setValue(kvp.Key, kvp.Value); txn.finish(); pos++; if ((pos % RECORDS_PER_SEGMENT) == 0) { db.flushWorkingSegment(); } } db.flushWorkingSegment(); }
public void T04_SingleSegmentRootMetadataLogRecovery() { // TEST: test multiple segments flushed, and "log resumed" (walk .ROOT range map) // perform the previous test T03_SegmentLayerGetRecordApplicationOrder(); // ... and then perform a resume LayerManager db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\6"); String[] keys = { "test-1", "test-2", "test-3" }; String[] values = { "a-second", "b-second", "c-second" }; // verify that it has the same data as before the RESUME { // working segment should be empty for (int i = 0; i < keys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(keys[i]); // look directly in the working segment, they should be MISSING // This is testing the checkpoint as well. If log resume didn't // CHECKPOINT_DROP, then the values will be duplicated in the working segment. { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING"); } // assure the global query interface finds the NEW VALUES { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES"); Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES"); } } // now generate a BUNCH of new segments... { String[] secondkeys = { "second-test-1", "second-test-2", "second-test-3" }; String[] secondvalues = { "a-second", "b-second", "c-second" }; // put each new record in its OWN segment for (int i = 0; i < secondkeys.Length; i++) { LayerWriteGroup txn = db.newWriteGroup(); txn.setValueParsed(secondkeys[i], secondvalues[i]); txn.finish(); db.flushWorkingSegment(); } db.Dispose(); db.debugDump(); // RESUME db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\6"); // first test records should still be visible for (int i = 0; i < keys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(keys[i]); // look directly in the working segment, they should be MISSING // This is testing the checkpoint as well. If log resume didn't // CHECKPOINT_DROP, then the values will be duplicated in the working segment. { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING {0}", key); } // assure the global query interface finds the NEW VALUES { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES : {0}", key); Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES : {0}", key); } } db.debugDump(); // verify that the secondkeys/values are still in there for (int i = 0; i < secondkeys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(secondkeys[i]); // look directly in the working segment, they should be MISSING // This is testing the checkpoint as well. If log resume didn't // CHECKPOINT_DROP, then the values will be duplicated in the working segment. { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING"); } // assure the global query interface finds the NEW VALUES { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES, where is: " + key); Assert.AreEqual(secondvalues[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES"); } } } db.Dispose(); } }
public void T03_SegmentLayerGetRecordApplicationOrder() { // Assure that when records are written more than once, the updates are applied in the correct // order so we see the proper current data value LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\6"); { String[] keys = { "test-1", "test-2", "test-3" }; String[] values = { "a-first", "b-first", "c-first" }; LayerWriteGroup txn = db.newWriteGroup(); for (int i = 0; i < keys.Length; i++) { txn.setValueParsed(keys[i], values[i]); } txn.finish(); db.flushWorkingSegment(); // assure the records we wrote are NOT in the working segment, but ARE in the next layer for (int i = 0; i < keys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(keys[i]); // look directly in the working segment, they should be **MISSING* { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.MISSING, status, "working segment should be clear"); } // assure the global query interface finds it { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "records should be found in layers"); Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord()"); } } } // now write the same keys again with different values into the working segment { String[] keys = { "test-1", "test-2", "test-3" }; String[] values = { "a-second", "b-second", "c-second" }; System.Text.ASCIIEncoding enc = new System.Text.ASCIIEncoding(); LayerWriteGroup txn = db.newWriteGroup(); for (int i = 0; i < keys.Length; i++) { txn.setValueParsed(keys[i], values[i]); } txn.finish(); // assure that both the workingsegment and layermanager see the NEW VALUES for (int i = 0; i < keys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(keys[i]); // look directly in the working segment, they should be the NEW VALUES { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.PRESENT, status, "working segment should have NEW VALUES"); Assert.AreEqual(values[i], enc.GetString(update.data), "SegmentBuilder.getRecordUpdate should see NEW VALUES"); } // assure the global query interface finds the NEW VALUES { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES"); Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES"); } } // now flush the working segment (so we have two on-disk layers) db.flushWorkingSegment(); // assure we still see the NEW VALUES, but that the working segment is empty for (int i = 0; i < keys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(keys[i]); // look directly in the working segment, they should MISSING { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.MISSING, status, "working segment should have NO values"); } // assure the global query interface finds the NEW VALUES { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES"); Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES"); } } } db.Dispose(); }
public void T02_LayerSegmentFlushAndFreespaceModification() { String[] keys = { "test-1", "test-2", "test-3" }; String[] values = { "a", "b", "c" }; System.Text.ASCIIEncoding enc = new System.Text.ASCIIEncoding(); LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\5"); db.debugDump(); LayerWriteGroup txn = db.newWriteGroup(); for (int i = 0; i < keys.Length; i++) { txn.setValueParsed(keys[i], values[i]); } txn.finish(); db.flushWorkingSegment(); db.debugDump(); // assure that we checkpointed down to a single working segment Assert.AreEqual(1, db.segmentlayers.Count, "segment layer count"); // assure we allocated a new generation and gen0 range record (walk .ROOT range map) // TODO: move this test to RangemapManager, to remove this cross-dependency { RecordData data; RecordKey key = new RecordKey().appendParsedKey(".ROOT/VARS/NUMGENERATIONS"); Assert.AreEqual(GetStatus.PRESENT,db.getRecord(key, out data),"missing numgenerations record"); Assert.AreEqual("1", data.ReadDataAsString(),"generation count 1"); RecordUpdate update; Assert.AreEqual(GetStatus.PRESENT, db.workingSegment.getRecordUpdate(key, out update), "missing workingsegment numgenerations record"); Assert.AreEqual("1", enc.GetString(update.data), "generation count 2"); } #if false if (false) { RecordData data; Assert.AreEqual( GetStatus.PRESENT, db.getRecord(new RecordKey() .appendParsedKey(".ROOT/GEN") .appendKeyPart(new RecordKeyType_Long(0)) .appendKeyPart("</>"), out data), ".ROOT/GEN/0/</> key is missing"); } #endif // TODO: assure we subtracted the new range record from the freespace // assure the records we wrote are NOT in the working segment, but ARE in the next layer for (int i = 0; i < keys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(keys[i]); // look directly in the working segment, they should be **MISSING* { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.MISSING, status, "working segment should be clear"); } // assure the global query interface finds it { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "records should be found in layers, {0} missing", key); Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord()"); } } db.Dispose(); }
public void T001_WorkingSegmentReadWrite() { LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\3"); var rk = new RecordKey().appendParsedKey(".a"); db.setValueParsed(".a", "1"); KeyValuePair<RecordKey, RecordData> record; try { record = db.FindNext(rk, true); Assert.AreEqual(rk, record.Key, "fetched key does not match"); } catch (KeyNotFoundException) { Assert.Fail("couldn't find 'a' record"); } int found_recs = 0; var scan_range = new ScanRange<RecordKey>(rk, RecordKey.AfterPrefix(rk), null); foreach (var row in db.scanForward(scan_range)) { found_recs++; } Assert.AreEqual(1, found_recs, "found the wrong number of records in working segment scan!"); db.flushWorkingSegment(); try { record = db.FindNext(rk, true); Assert.AreEqual(rk, record.Key, "fetched key does not match (after flush)"); } catch (KeyNotFoundException) { Assert.Fail("couldn't find 'a' record"); } found_recs = 0; foreach (var row in db.scanForward( new ScanRange<RecordKey>(rk, RecordKey.AfterPrefix(rk), null))) { found_recs++; } Assert.AreEqual(1, found_recs, "found the wrong number of records after flush !"); }
public static void do_bringup_test(DbgGUI win) { LayerManager db = new LayerManager(InitMode.NEW_REGION,"c:\\BENDtst\\main"); db.setValueParsed("test/3","a"); db.setValueParsed("test/2","b"); db.setValueParsed("test/1","c"); db.debugDump(); db.flushWorkingSegment(); // this will flush and read the current segment Console.WriteLine("--- after flush"); db.debugDump(); dumpMergeCandidates(db); Console.WriteLine("--- check record read"); RecordData data; GetStatus status = db.getRecord(new RecordKey().appendParsedKey("test/3"), out data); System.Console.WriteLine("getRecord({0}) returned {1}", "test/3", data.ToString()); Console.WriteLine("--- make lots of segments"); db.setValueParsed("test/4", "d"); db.flushWorkingSegment(); db.setValueParsed("test/5", "e"); db.flushWorkingSegment(); db.setValueParsed("test/6", "f"); db.flushWorkingSegment(); db.debugDump(); System.Console.WriteLine("-------- dump keys ---------------------"); dumpAllDbRecords(db); dumpMergeCandidates(db); System.Console.WriteLine("-------- draw graphical debug ---------------------"); win.debugDump(db); System.Console.WriteLine("-------- PERFORMING A SINGLE MERGE ---------------------"); MergeCandidate mc; mc = db.rangemapmgr.mergeManager.getBestCandidate(); System.Console.WriteLine("MERGE :" + mc); db.performMerge(mc); dumpMergeCandidates(db); db.flushWorkingSegment(); db.debugDump(); dumpSegmentList(db); win.debugDump(db); System.Console.WriteLine("-------- SINGLE MERGE DONE, close/dispose ---------------------"); dumpSegmentList(db); dumpMergeCandidates(db); db.debugDump(); db.Dispose(); System.Console.WriteLine("-------- NOW RESUME ---------------------------------"); db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\main"); dumpSegmentList(db); dumpMergeCandidates(db); win.debugDump(db); db.debugDump(); System.Console.WriteLine("-------- NOW FINDNEXT ---------------------------------"); dumpAllDbRecords(db); win.debugDump(db); System.Console.WriteLine("-------- NOW MERGE ALL SEGMENTS ---------------------------------"); dumpSegmentList(db); db.mergeAllSegments(); db.debugDump(); win.debugDump(db); // stop(); // ------------------------- (( S T O P )) --------------------------------- System.Console.WriteLine("-------- NOW FINDNEXT (after merge) ---------------------------------"); dumpAllDbRecords(db); //System.Console.WriteLine("-------- Now run Readthreads Test ---------------------------------"); //A03_LayerManagerTests test = new A03_LayerManagerTests(); //test.T10_LayerManager_ReadThreads(); dumpMergeCandidates(db); win.debugDump(db); db.Dispose(); System.Console.WriteLine("-------- Write ALOT of data ---------------------------------"); int keysize = 20 ; int keycount = 1000000; int flush_period = 40000; int commit_period = 1000; bool random_order = true; double elapsed; DateTime start = DateTime.Now; int record_count = 0; db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\bigtest"); String value = ""; String keybase = "TestValueDataABC"; for (int x = 0; x < keysize / keybase.Length; x++) { value = value + keybase; } int seed = (int)DateTime.Now.Ticks; Random rnd = new Random(seed); System.Console.WriteLine("*** RANDOM SEED: " + seed); var write_group = db.newWriteGroup(); for (int x = 10000001; x < 10000001 + keycount; x++) { if (random_order) { write_group.setValueParsed("test/rnd/" + rnd.Next(), value); } else { write_group.setValueParsed("test/ordered/" + x, value); } record_count++; if (x % commit_period == 0) { write_group.finish(); write_group = db.newWriteGroup(); } if (x % flush_period == 0) { System.Console.WriteLine("*** RANDOM SEED: " + seed); write_group.finish(); write_group = db.newWriteGroup(); System.Console.WriteLine("start % 1000 cycle.."); db.flushWorkingSegment(); db.freespacemgr.debugDumbCurrentFreespace(); win.debugDump(db); dumpMergeCandidates(db); for (int mx = 0; mx < 30; mx++) { mc = db.rangemapmgr.mergeManager.getBestCandidate(); if (mc == null) { break; } if (mc.score() > (1.6 + (float)db.rangemapmgr.mergeManager.getMaxGeneration()/12.0f)) { System.Console.WriteLine("** best merge score too high: " + mc); break; } System.Console.WriteLine("merge " + mx + " : " + mc); win.debugDump(db, mc); db.performMerge(mc); System.Console.WriteLine("mergedone " + mx + " : " + mc); dumpSegmentList(db); dumpMergeCandidates(db); win.debugDump(db); } elapsed = (DateTime.Now - start).TotalMilliseconds / 1000.0; System.GC.Collect(); Console.WriteLine("*************************************************"); Console.WriteLine("*************************************************"); Console.WriteLine("*** merge cycle done {0} records so far, in {1} total time, {2} records/second", record_count,elapsed, (double)record_count/elapsed); Console.WriteLine("*************************************************"); Console.WriteLine("*************************************************"); } } System.Console.WriteLine("-------- Merge a bunch more ------------------"); for (int x = 0; x < 30; x++) { mc = db.rangemapmgr.mergeManager.getBestCandidate(); System.Console.WriteLine("merge : " + mc); if (mc == null) break; win.debugDump(db, mc); db.performMerge(mc); dumpSegmentList(db); dumpMergeCandidates(db); win.debugDump(db,null); } dumpSegmentList(db); elapsed = (DateTime.Now - start).TotalMilliseconds / 1000.0; Console.WriteLine("*************************************************"); Console.WriteLine("*************************************************"); Console.WriteLine("*** merge cycle done {0} records so far, in {1} total time, {2} records/second", record_count, elapsed, (double)record_count / elapsed); Console.WriteLine("*************************************************"); Console.WriteLine("*************************************************"); System.Console.WriteLine("** done."); Environment.Exit(0); }