public FreespaceManager(LayerManager store, int init_freelist = 0) { this.store = store; if (init_freelist != 0) { } else { RecordData data; if (store.getRecord(new RecordKey().appendParsedKey(".ROOT/FREELIST/HEAD"), out data) == GetStatus.MISSING) { throw new Exception("no freelist head!"); // TODO: fix this init hack // next_allocation = (int)(RootBlockHeader.ROOTBLOCK_SIZE + LogWriter.DEFAULT_LOG_SIZE); } else { next_allocation = Lsd.lsdToNumber(data.data); } } }
public void T04_SingleSegmentRootMetadataLogRecovery() { // TEST: test multiple segments flushed, and "log resumed" (walk .ROOT range map) // perform the previous test T03_SegmentLayerGetRecordApplicationOrder(); // ... and then perform a resume LayerManager db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\6"); String[] keys = { "test-1", "test-2", "test-3" }; String[] values = { "a-second", "b-second", "c-second" }; // verify that it has the same data as before the RESUME { // working segment should be empty for (int i = 0; i < keys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(keys[i]); // look directly in the working segment, they should be MISSING // This is testing the checkpoint as well. If log resume didn't // CHECKPOINT_DROP, then the values will be duplicated in the working segment. { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING"); } // assure the global query interface finds the NEW VALUES { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES"); Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES"); } } // now generate a BUNCH of new segments... { String[] secondkeys = { "second-test-1", "second-test-2", "second-test-3" }; String[] secondvalues = { "a-second", "b-second", "c-second" }; // put each new record in its OWN segment for (int i = 0; i < secondkeys.Length; i++) { LayerWriteGroup txn = db.newWriteGroup(); txn.setValueParsed(secondkeys[i], secondvalues[i]); txn.finish(); db.flushWorkingSegment(); } db.Dispose(); db.debugDump(); // RESUME db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\6"); // first test records should still be visible for (int i = 0; i < keys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(keys[i]); // look directly in the working segment, they should be MISSING // This is testing the checkpoint as well. If log resume didn't // CHECKPOINT_DROP, then the values will be duplicated in the working segment. { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING {0}", key); } // assure the global query interface finds the NEW VALUES { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES : {0}", key); Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES : {0}", key); } } db.debugDump(); // verify that the secondkeys/values are still in there for (int i = 0; i < secondkeys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(secondkeys[i]); // look directly in the working segment, they should be MISSING // This is testing the checkpoint as well. If log resume didn't // CHECKPOINT_DROP, then the values will be duplicated in the working segment. { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING"); } // assure the global query interface finds the NEW VALUES { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES, where is: " + key); Assert.AreEqual(secondvalues[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES"); } } } db.Dispose(); } }
public void T03_SegmentLayerGetRecordApplicationOrder() { // Assure that when records are written more than once, the updates are applied in the correct // order so we see the proper current data value LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\6"); { String[] keys = { "test-1", "test-2", "test-3" }; String[] values = { "a-first", "b-first", "c-first" }; LayerWriteGroup txn = db.newWriteGroup(); for (int i = 0; i < keys.Length; i++) { txn.setValueParsed(keys[i], values[i]); } txn.finish(); db.flushWorkingSegment(); // assure the records we wrote are NOT in the working segment, but ARE in the next layer for (int i = 0; i < keys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(keys[i]); // look directly in the working segment, they should be **MISSING* { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.MISSING, status, "working segment should be clear"); } // assure the global query interface finds it { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "records should be found in layers"); Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord()"); } } } // now write the same keys again with different values into the working segment { String[] keys = { "test-1", "test-2", "test-3" }; String[] values = { "a-second", "b-second", "c-second" }; System.Text.ASCIIEncoding enc = new System.Text.ASCIIEncoding(); LayerWriteGroup txn = db.newWriteGroup(); for (int i = 0; i < keys.Length; i++) { txn.setValueParsed(keys[i], values[i]); } txn.finish(); // assure that both the workingsegment and layermanager see the NEW VALUES for (int i = 0; i < keys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(keys[i]); // look directly in the working segment, they should be the NEW VALUES { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.PRESENT, status, "working segment should have NEW VALUES"); Assert.AreEqual(values[i], enc.GetString(update.data), "SegmentBuilder.getRecordUpdate should see NEW VALUES"); } // assure the global query interface finds the NEW VALUES { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES"); Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES"); } } // now flush the working segment (so we have two on-disk layers) db.flushWorkingSegment(); // assure we still see the NEW VALUES, but that the working segment is empty for (int i = 0; i < keys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(keys[i]); // look directly in the working segment, they should MISSING { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.MISSING, status, "working segment should have NO values"); } // assure the global query interface finds the NEW VALUES { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES"); Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES"); } } } db.Dispose(); }
public void T02_LayerSegmentFlushAndFreespaceModification() { String[] keys = { "test-1", "test-2", "test-3" }; String[] values = { "a", "b", "c" }; System.Text.ASCIIEncoding enc = new System.Text.ASCIIEncoding(); LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\5"); db.debugDump(); LayerWriteGroup txn = db.newWriteGroup(); for (int i = 0; i < keys.Length; i++) { txn.setValueParsed(keys[i], values[i]); } txn.finish(); db.flushWorkingSegment(); db.debugDump(); // assure that we checkpointed down to a single working segment Assert.AreEqual(1, db.segmentlayers.Count, "segment layer count"); // assure we allocated a new generation and gen0 range record (walk .ROOT range map) // TODO: move this test to RangemapManager, to remove this cross-dependency { RecordData data; RecordKey key = new RecordKey().appendParsedKey(".ROOT/VARS/NUMGENERATIONS"); Assert.AreEqual(GetStatus.PRESENT,db.getRecord(key, out data),"missing numgenerations record"); Assert.AreEqual("1", data.ReadDataAsString(),"generation count 1"); RecordUpdate update; Assert.AreEqual(GetStatus.PRESENT, db.workingSegment.getRecordUpdate(key, out update), "missing workingsegment numgenerations record"); Assert.AreEqual("1", enc.GetString(update.data), "generation count 2"); } #if false if (false) { RecordData data; Assert.AreEqual( GetStatus.PRESENT, db.getRecord(new RecordKey() .appendParsedKey(".ROOT/GEN") .appendKeyPart(new RecordKeyType_Long(0)) .appendKeyPart("</>"), out data), ".ROOT/GEN/0/</> key is missing"); } #endif // TODO: assure we subtracted the new range record from the freespace // assure the records we wrote are NOT in the working segment, but ARE in the next layer for (int i = 0; i < keys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(keys[i]); // look directly in the working segment, they should be **MISSING* { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.MISSING, status, "working segment should be clear"); } // assure the global query interface finds it { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "records should be found in layers, {0} missing", key); Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord()"); } } db.Dispose(); }
public void T01_LayerTxnLogResume() { String[] keys = { "test-1", "test-2", "test-3" }; String[] values = {"a","b","c" }; { LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\4"); LayerWriteGroup txn = db.newWriteGroup(); for (int i=0;i<keys.Length;i++) { txn.setValueParsed(keys[i],values[i]); } txn.finish(); // TODO: assure the freespace hasn't been affected // assure we have not committed any segments Assert.AreEqual(1, db.segmentlayers.Count, "shold be one memory segment"); Assert.AreEqual(db.segmentlayers[0], db.workingSegment, "memory segment should be the working segment"); // assure the working segment contains the right data // 3 test records, the NUMGENERATIONS record, and FREELIST/HEAD // TODO: make a more robust way to do this test (i.e. count non .ROOT records) Assert.AreEqual(5, db.workingSegment.RowCount, "record count match 1"); db.Dispose(); } { LayerManager db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\4"); System.Text.ASCIIEncoding enc = new System.Text.ASCIIEncoding(); // assure we still have not committed any segments Assert.AreEqual(1, db.segmentlayers.Count, "after resume: one memory segment"); Assert.AreEqual(db.segmentlayers[0], db.workingSegment, "after resume: working segment setup"); // assure the working segment contains the right data Assert.AreEqual(5, db.workingSegment.RowCount, "after resume: record count == 5"); for (int i = 0; i < keys.Length; i++) { RecordKey key = new RecordKey(); key.appendKeyPart(new RecordKeyType_String(keys[i])); // look directly in the working segment { RecordUpdate update; GetStatus status = db.workingSegment.getRecordUpdate(key, out update); Assert.AreEqual(GetStatus.PRESENT, status, "SegmentBuilder.getRecordUpdate({0})", key); Assert.AreEqual(values[i], enc.GetString(update.data), "SegmentBuilder.getRecordUpdate({0})",key); } // assure the global query interface finds it { RecordData data; GetStatus status = db.getRecord(key, out data); Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager.getRecord({0})", key); Assert.AreEqual(values[i], enc.GetString(data.data), "LayerManager.getRecord({0})",key); } } // cleanup db.Dispose(); } }
public static void do_bringup_test(DbgGUI win) { LayerManager db = new LayerManager(InitMode.NEW_REGION,"c:\\BENDtst\\main"); db.setValueParsed("test/3","a"); db.setValueParsed("test/2","b"); db.setValueParsed("test/1","c"); db.debugDump(); db.flushWorkingSegment(); // this will flush and read the current segment Console.WriteLine("--- after flush"); db.debugDump(); dumpMergeCandidates(db); Console.WriteLine("--- check record read"); RecordData data; GetStatus status = db.getRecord(new RecordKey().appendParsedKey("test/3"), out data); System.Console.WriteLine("getRecord({0}) returned {1}", "test/3", data.ToString()); Console.WriteLine("--- make lots of segments"); db.setValueParsed("test/4", "d"); db.flushWorkingSegment(); db.setValueParsed("test/5", "e"); db.flushWorkingSegment(); db.setValueParsed("test/6", "f"); db.flushWorkingSegment(); db.debugDump(); System.Console.WriteLine("-------- dump keys ---------------------"); dumpAllDbRecords(db); dumpMergeCandidates(db); System.Console.WriteLine("-------- draw graphical debug ---------------------"); win.debugDump(db); System.Console.WriteLine("-------- PERFORMING A SINGLE MERGE ---------------------"); MergeCandidate mc; mc = db.rangemapmgr.mergeManager.getBestCandidate(); System.Console.WriteLine("MERGE :" + mc); db.performMerge(mc); dumpMergeCandidates(db); db.flushWorkingSegment(); db.debugDump(); dumpSegmentList(db); win.debugDump(db); System.Console.WriteLine("-------- SINGLE MERGE DONE, close/dispose ---------------------"); dumpSegmentList(db); dumpMergeCandidates(db); db.debugDump(); db.Dispose(); System.Console.WriteLine("-------- NOW RESUME ---------------------------------"); db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\main"); dumpSegmentList(db); dumpMergeCandidates(db); win.debugDump(db); db.debugDump(); System.Console.WriteLine("-------- NOW FINDNEXT ---------------------------------"); dumpAllDbRecords(db); win.debugDump(db); System.Console.WriteLine("-------- NOW MERGE ALL SEGMENTS ---------------------------------"); dumpSegmentList(db); db.mergeAllSegments(); db.debugDump(); win.debugDump(db); // stop(); // ------------------------- (( S T O P )) --------------------------------- System.Console.WriteLine("-------- NOW FINDNEXT (after merge) ---------------------------------"); dumpAllDbRecords(db); //System.Console.WriteLine("-------- Now run Readthreads Test ---------------------------------"); //A03_LayerManagerTests test = new A03_LayerManagerTests(); //test.T10_LayerManager_ReadThreads(); dumpMergeCandidates(db); win.debugDump(db); db.Dispose(); System.Console.WriteLine("-------- Write ALOT of data ---------------------------------"); int keysize = 20 ; int keycount = 1000000; int flush_period = 40000; int commit_period = 1000; bool random_order = true; double elapsed; DateTime start = DateTime.Now; int record_count = 0; db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\bigtest"); String value = ""; String keybase = "TestValueDataABC"; for (int x = 0; x < keysize / keybase.Length; x++) { value = value + keybase; } int seed = (int)DateTime.Now.Ticks; Random rnd = new Random(seed); System.Console.WriteLine("*** RANDOM SEED: " + seed); var write_group = db.newWriteGroup(); for (int x = 10000001; x < 10000001 + keycount; x++) { if (random_order) { write_group.setValueParsed("test/rnd/" + rnd.Next(), value); } else { write_group.setValueParsed("test/ordered/" + x, value); } record_count++; if (x % commit_period == 0) { write_group.finish(); write_group = db.newWriteGroup(); } if (x % flush_period == 0) { System.Console.WriteLine("*** RANDOM SEED: " + seed); write_group.finish(); write_group = db.newWriteGroup(); System.Console.WriteLine("start % 1000 cycle.."); db.flushWorkingSegment(); db.freespacemgr.debugDumbCurrentFreespace(); win.debugDump(db); dumpMergeCandidates(db); for (int mx = 0; mx < 30; mx++) { mc = db.rangemapmgr.mergeManager.getBestCandidate(); if (mc == null) { break; } if (mc.score() > (1.6 + (float)db.rangemapmgr.mergeManager.getMaxGeneration()/12.0f)) { System.Console.WriteLine("** best merge score too high: " + mc); break; } System.Console.WriteLine("merge " + mx + " : " + mc); win.debugDump(db, mc); db.performMerge(mc); System.Console.WriteLine("mergedone " + mx + " : " + mc); dumpSegmentList(db); dumpMergeCandidates(db); win.debugDump(db); } elapsed = (DateTime.Now - start).TotalMilliseconds / 1000.0; System.GC.Collect(); Console.WriteLine("*************************************************"); Console.WriteLine("*************************************************"); Console.WriteLine("*** merge cycle done {0} records so far, in {1} total time, {2} records/second", record_count,elapsed, (double)record_count/elapsed); Console.WriteLine("*************************************************"); Console.WriteLine("*************************************************"); } } System.Console.WriteLine("-------- Merge a bunch more ------------------"); for (int x = 0; x < 30; x++) { mc = db.rangemapmgr.mergeManager.getBestCandidate(); System.Console.WriteLine("merge : " + mc); if (mc == null) break; win.debugDump(db, mc); db.performMerge(mc); dumpSegmentList(db); dumpMergeCandidates(db); win.debugDump(db,null); } dumpSegmentList(db); elapsed = (DateTime.Now - start).TotalMilliseconds / 1000.0; Console.WriteLine("*************************************************"); Console.WriteLine("*************************************************"); Console.WriteLine("*** merge cycle done {0} records so far, in {1} total time, {2} records/second", record_count, elapsed, (double)record_count / elapsed); Console.WriteLine("*************************************************"); Console.WriteLine("*************************************************"); System.Console.WriteLine("** done."); Environment.Exit(0); }