Esempio n. 1
0
        static void fetchHitsTest()
        {
            LayerManager db = new LayerManager(InitMode.RESUME, @"c:\EmailTest\DB");

            Console.WriteLine("====================== FETCH HITS TEST =======================");
            Console.WriteLine("====================== FETCH HITS TEST =======================");
            Console.WriteLine("====================== FETCH HITS TEST =======================");
            Console.WriteLine("====================== FETCH HITS TEST =======================");
            Console.WriteLine("====================== FETCH HITS TEST =======================");
            Console.WriteLine("====================== FETCH HITS TEST =======================");
            Console.WriteLine("====================== FETCH HITS TEST =======================");

            var kprefix = new RecordKey().appendParsedKey(".zdata/index/jeske");

            var first_row = db.FindNext(kprefix, true);

            Console.WriteLine("First foudn key: {0}", first_row);

            #if false
            int count = 0;
            foreach (var hit in db.scanForward(new ScanRange<RecordKey>(kprefix, RecordKey.AfterPrefix(kprefix), null))) {
                Console.WriteLine(hit);
                count++;
            }
            Console.WriteLine("scanned {0} hits", count);
            #endif
        }
 public static LayerMaintenanceThread startMaintThread(LayerManager db)
 {
     LayerMaintenanceThread worker = new LayerMaintenanceThread(db);
     Thread workerThread = new Thread(worker.doWork);
     workerThread.Name = "LayerMaintenanceThread";
     workerThread.Start();
     return worker;
 }
Esempio n. 3
0
 static void dumpAllRows()
 {
     int count = 0;
     LayerManager db = new LayerManager(InitMode.RESUME, @"c:\EmailTest\DB");
     foreach (var row in db.scanForward(null)) {
         count++;
         Console.WriteLine(row);
     }
     Console.WriteLine("{0} rows", count);
 }
Esempio n. 4
0
 static void countAllSegments()
 {
     int count = 0;
     LayerManager db = new LayerManager(InitMode.RESUME, @"c:\EmailTest\DB");
     foreach (var seg in db.listAllSegments()) {
         count++;
         Console.WriteLine(seg);
     }
     Console.WriteLine("{0} segments", count);
 }
Esempio n. 5
0
        public LayerWriteGroup(LayerManager _layer, WriteGroupType type = DEFAULT_WG_TYPE)
        {
            this.mylayer = _layer;
            this.tsn = _layer.tsnidgen.nextTimestamp();
            this.type = type;

            mylayer.pending_txns.Add(tsn, new WeakReference<LayerWriteGroup>(this));  // track pending transactions without preventing collection

            // TODO: store the stack backtrace of who created this if we're in debug mode
        }
Esempio n. 6
0
        static void document_db_test()
        {
            Console.WriteLine("======================= Document DB Test ==============================");

            LayerManager raw_db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\main");
            StepsDatabase db_broker = new StepsDatabase(raw_db);

            IStepsDocumentDB doc_db = db_broker.getDocumentDatabase();

            doc_db.ensureIndex( new string[] { "name" } );
            doc_db.ensureIndex(new string[] { "age"});

            doc_db.Insert(new BsonDocument {
                { "_id" , "user1" },
                { "name" , "David" },
                { "age", 60 }

            });

            doc_db.Insert(new BsonDocument {
                { "_id" , "user2" },
                { "name" , "Tom" },
                { "age", 32 }
            });

            doc_db.Insert(new BsonDocument {
                { "_id" , "user3" },
                { "name" , "Tom" },
                { "age", 32 }
            });

            raw_db.debugDump();

            int count=0;
            foreach (var doc in doc_db.Find(new BsonDocument() )) {
                    Console.WriteLine(" [{0}] = {1}", count++, doc.ToJson());
            }

            var change_spec = new BsonDocument{
                { "$inc" , new BsonDocument { { "age", 1 } } }
                };

            Console.WriteLine("change spec = " + change_spec.ToJson());

            doc_db.Update(new BsonDocument(), change_spec);

            raw_db.debugDump();

            foreach (var doc in doc_db.Find(new BsonDocument () )) {
                Console.WriteLine(" [{0}] = {1}", count++, doc.ToJson());
            }
        }
            internal WriteThreadsTest(int num_values=10, int checkpoint_interval_rowcount=50, bool withMerge=false)
            {
                System.GC.Collect();
                db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\11");
                // db.startMaintThread();
                this.checkpoint_interval = checkpoint_interval_rowcount;
                this.withMerge = withMerge;

                // init sequential data-values...
                datavalues = new int[num_values];
                for (int i = 0; i < num_values; i++) {
                    datavalues[i] = i;
                }

                // then shuffle them..
                Random rnd = new Random();
                rnd.Shuffle(datavalues);
            }
Esempio n. 8
0
        static void Main(string[] args)
        {
            bool isInitialStartup = false;

            // (1) read the config file

            string DBPATH = @"c:\BENDtst\main";

            // ... if we are doing an initial startup...

            if (isInitialStartup) {
                // DO initial database setup and then end...
                LayerManager new_db = new LayerManager(InitMode.NEW_REGION, DBPATH);

                // ...

                return;
            }

            // (2) startup a snapshot/replica/document database

            LayerManager raw_db = new LayerManager(InitMode.RESUME, DBPATH);
            StepsDatabase db_broker = new StepsDatabase(raw_db);

            // how do we address subsetting / databases / collections???
            IStepsDocumentDB doc_db = db_broker.getDocumentDatabase();

            // (3) startup the web-status interface

            StepsStatusServer myStatusServer = new StepsStatusServer(81, raw_db);
            Thread thread = new Thread(new ThreadStart(myStatusServer.listen));
            thread.Start();

            // (4) start the REST api handler (listening for client connections)

            StepsRestAPIServer myServer = new StepsRestAPIServer(5985, doc_db);
            myServer.listen(); // (main runloop)

            // !! SHUTDOWN !!

            thread.Abort();
        }
Esempio n. 9
0
        public FreespaceManager(LayerManager store, int init_freelist = 0)
        {
            this.store = store;

            if (init_freelist != 0) {
            } else {

                RecordData data;
                if (store.getRecord(new RecordKey().appendParsedKey(".ROOT/FREELIST/HEAD"), out data) == GetStatus.MISSING) {

                    throw new Exception("no freelist head!");

                    // TODO: fix this init hack
                    // next_allocation = (int)(RootBlockHeader.ROOTBLOCK_SIZE + LogWriter.DEFAULT_LOG_SIZE);

                } else {
                    next_allocation = Lsd.lsdToNumber(data.data);
                }
            }
        }
Esempio n. 10
0
        public static void waitUntilState(LayerManager db, ReplHandler srvr, ReplState state)
        {
            for (int x = 0; x < 20; x++) {
                if (srvr.State == state) {
                    break;
                }
                Console.WriteLine("waiting for ({0}) to become {1}.. (currently: {2})",
                    srvr.ToString(),state, srvr.State);

                Thread.Sleep(1000);
            }
            if (srvr.State != state) {
                db.debugDump();
                Console.WriteLine("server({0}) failed to become {1}, aborting test",
                    srvr.ToString(), state);

                Environment.Exit(1);
            }

            Console.WriteLine("Server ({0}) is now {1}!", srvr, state);
        }
Esempio n. 11
0
        public void refreshFromDb(LayerManager db, MergeCandidate mc = null)
        {
            var seg = new List<SegmentDescriptor>();
            // this is much faster than using listAllSegments
            foreach(var kvp in db.rangemapmgr.mergeManager.segmentInfo) {
                seg.Add(kvp.Key);
            }

            segments = seg;
            this.lastmerge = mc;

            // we should be doing this, but .Keys is not implemented in BDSkipList
            // segments.AddRange(db.rangemapmgr.mergeManager.segmentInfo.Keys);
            // segments.AddRange(db.listAllSegments());
            this.Invoke((MethodInvoker) delegate() {
                try {
                    this.Refresh();
                } catch (Exception e) {
                    System.Console.WriteLine("######" + e.ToString());
                    throw e;
                }
                });
        }
Esempio n. 12
0
        public void T01_LayerTxnLogResume()
        {
            String[] keys = { "test-1", "test-2", "test-3" };
            String[] values = {"a","b","c" };

            {
                LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\4");

                LayerWriteGroup txn = db.newWriteGroup();
                for (int i=0;i<keys.Length;i++) {
                    txn.setValueParsed(keys[i],values[i]);
                }
                txn.finish();

                // TODO: assure the freespace hasn't been affected

                // assure we have not committed any segments
                Assert.AreEqual(1, db.segmentlayers.Count, "shold be one memory segment");
                Assert.AreEqual(db.segmentlayers[0], db.workingSegment, "memory segment should be the working segment");

                // assure the working segment contains the right data
                // 3 test records,  the NUMGENERATIONS record, and FREELIST/HEAD
                // TODO: make a more robust way to do this test (i.e. count non .ROOT records)
                Assert.AreEqual(5, db.workingSegment.RowCount, "record count match 1");
                db.Dispose();
            }

            {
                LayerManager db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\4");
                System.Text.ASCIIEncoding enc = new System.Text.ASCIIEncoding();

                // assure we still have not committed any segments
                Assert.AreEqual(1, db.segmentlayers.Count, "after resume: one memory segment");
                Assert.AreEqual(db.segmentlayers[0], db.workingSegment, "after resume: working segment setup");

                // assure the working segment contains the right data
                Assert.AreEqual(5, db.workingSegment.RowCount, "after resume: record count == 5");
                for (int i = 0; i < keys.Length; i++) {
                    RecordKey key = new RecordKey();
                    key.appendKeyPart(new RecordKeyType_String(keys[i]));

                    // look directly in the working segment
                    {

                        RecordUpdate update;
                        GetStatus status = db.workingSegment.getRecordUpdate(key, out update);
                        Assert.AreEqual(GetStatus.PRESENT, status, "SegmentBuilder.getRecordUpdate({0})", key);
                        Assert.AreEqual(values[i], enc.GetString(update.data), "SegmentBuilder.getRecordUpdate({0})",key);
                    }

                    // assure the global query interface finds it
                    {
                        RecordData data;
                        GetStatus status = db.getRecord(key, out data);
                        Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager.getRecord({0})", key);
                        Assert.AreEqual(values[i], enc.GetString(data.data), "LayerManager.getRecord({0})",key);
                    }
                }

                // cleanup
                db.Dispose();
            }
        }
Esempio n. 13
0
        public void T002_ScanDirections()
        {
            LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\3");

            var rk_a = new RecordKey().appendParsedKey(".a");
            var rk_b = new RecordKey().appendParsedKey(".b");
            string[] keys = { ".a", ".b" };
            foreach (var key in keys) {
                db.setValueParsed(key, "valueof:" + key);
            }

            {
                var rec = db.FindNext(rk_a, false);
                Assert.AreEqual(rk_b, rec.Key, "simple FindNext");
            }

            {
                var rec = db.FindPrev(rk_b, false);
                Assert.AreEqual(rk_a, rec.Key, "simple FindPrev");
            }

            var scan_range = new ScanRange<RecordKey>(rk_a, rk_b, null);

            // scan forward
            int count = 0;
            foreach (var row in db.scanForward(scan_range)) {
                Console.WriteLine("forward scan: " + row);
                if (count == keys.Length) {
                    Assert.Fail("too many keys returned in scan");
                }
                Assert.AreEqual(new RecordKeyType_String(keys[count]), row.Key.key_parts[0], "forward scan mistake");
                count++;
            }
            if (count != keys.Length) {
                Assert.Fail("not enough keys returned in scan");
            }

            // scan backward

            count = keys.Length;
            foreach (var row in db.scanBackward(scan_range)) {
                Console.WriteLine("backward scan: " + row);
                if (count == 0) {
                    Assert.Fail("too many keys returned in scan backward");
                }
                count--;
                Assert.AreEqual(new RecordKeyType_String(keys[count]), row.Key.key_parts[0], "backward scan mistake");
            }
            if (count != 0) {
                Assert.Fail("not enough keys returned in scan");
            }
        }
Esempio n. 14
0
        public void T001_WorkingSegmentReadWrite()
        {
            LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\3");

            var rk = new RecordKey().appendParsedKey(".a");
            db.setValueParsed(".a", "1");
            KeyValuePair<RecordKey, RecordData> record;

            try {
                record = db.FindNext(rk, true);
                Assert.AreEqual(rk, record.Key, "fetched key does not match");
            } catch (KeyNotFoundException) {
                Assert.Fail("couldn't find 'a' record");
            }

            int found_recs = 0;
            var scan_range = new ScanRange<RecordKey>(rk, RecordKey.AfterPrefix(rk), null);

            foreach (var row in db.scanForward(scan_range)) {
                found_recs++;
            }
            Assert.AreEqual(1, found_recs, "found the wrong number of records in working segment scan!");

            db.flushWorkingSegment();

            try {
                record = db.FindNext(rk, true);
                Assert.AreEqual(rk, record.Key, "fetched key does not match (after flush)");
            } catch (KeyNotFoundException) {
                Assert.Fail("couldn't find 'a' record");
            }

            found_recs = 0;
            foreach (var row in db.scanForward(
                new ScanRange<RecordKey>(rk, RecordKey.AfterPrefix(rk), null))) {
                found_recs++;
            }
            Assert.AreEqual(1, found_recs, "found the wrong number of records after flush !");
        }
Esempio n. 15
0
        public void T001_MultiWorkingSegmentReadWrite()
        {
            LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\3");

            var rk = new RecordKey().appendParsedKey(".data/a");
            db.setValueParsed(".data/a", "1");
            KeyValuePair<RecordKey, RecordData> record;

            try {
                record = db.FindNext(rk, true);
                Assert.AreEqual(rk, record.Key, "fetched key does not match");
            } catch (KeyNotFoundException) {
                Assert.Fail("couldn't find 'a' record");
            }

            int found_recs = 0;
            var scan_range = new ScanRange<RecordKey>(rk, RecordKey.AfterPrefix(rk), null);

            foreach (var row in db.scanForward(scan_range)) {
                found_recs++;
            }
            Assert.AreEqual(1, found_recs, "found the wrong number of records in working segment scan!");

            db.DEBUG_addNewWorkingSegmentWithoutFlush();

            db.setValueParsed(".data/b", "2");

            Console.WriteLine("");
            Console.WriteLine("--- contents --");
            db.debugDump();
            Console.WriteLine("");

            // ------------------------------

            try {
                var rkb = new RecordKey().appendParsedKey(".data/b");
                record = db.FindNext(rkb, true);
                Assert.AreEqual(rkb, record.Key, "fetched key does not match (after flush)");
            } catch (KeyNotFoundException) {
                Assert.Fail("couldn't find 'b' record");
            }

            found_recs = 0;
            var rk_prefix = new RecordKey().appendParsedKey(".data");
            foreach (var row in db.scanForward(
                new ScanRange<RecordKey>(rk_prefix, RecordKey.AfterPrefix(rk_prefix), null))) {
                found_recs++;
            }
            Assert.AreEqual(2, found_recs, "found the wrong number of records after working segment addition !");
        }
Esempio n. 16
0
 public void T001_FullScanWithOnlyWorkingSegment()
 {
     LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\31");
     db.setValueParsed("test/1", "a");
     Assert.Fail("test not implemented");
 }
Esempio n. 17
0
 public ValueCheckerThread(LayerManager db, string key_to_check, string value_to_expect)
 {
     this.db = db;
     this.key_to_check = key_to_check;
     this.value_to_expect = value_to_expect;
 }
Esempio n. 18
0
            internal ReadThreadsTest(int rec_count, int rec_per_segment)
            {
                this.TEST_RECORD_COUNT = rec_count;
                this.RECORDS_PER_SEGMENT = rec_per_segment;
                System.GC.Collect();
                db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\10");
                testdata = new SortedDictionary<string, string>();
                testrows = new SortedDictionary<RecordKey, RecordUpdate>();

                // generate some data
                for (int i=0;i<TEST_RECORD_COUNT;i++) {
                    string key = "test/" + i.ToString();
                    string value = "data: " + key;
                    testdata[key] = value;

                    RecordKey rkey = new RecordKey().appendParsedKey(key);
                    RecordUpdate rupdate = RecordUpdate.WithPayload(value);

                    testrows[rkey] = rupdate;
                }

                // fill the db with some data.
                int pos = 0;
                foreach (KeyValuePair<RecordKey,RecordUpdate> kvp in testrows) {
                    LayerWriteGroup txn = db.newWriteGroup();
                    txn.setValue(kvp.Key, kvp.Value);
                    txn.finish();
                    pos++;

                    if ((pos % RECORDS_PER_SEGMENT) == 0) {
                       db.flushWorkingSegment();
                    }
                }
                db.flushWorkingSegment();
            }
Esempio n. 19
0
        public void T04_SingleSegmentRootMetadataLogRecovery()
        {
            // TEST: test multiple segments flushed, and "log resumed"  (walk .ROOT range map)

            // perform the previous test
            T03_SegmentLayerGetRecordApplicationOrder();

            // ... and then perform a resume
            LayerManager db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\6");

            String[] keys = { "test-1", "test-2", "test-3" };
            String[] values = { "a-second", "b-second", "c-second" };

            // verify that it has the same data as before the RESUME
            {
                // working segment should be empty
                for (int i = 0; i < keys.Length; i++) {
                    RecordKey key = new RecordKey();
                    key.appendKeyPart(keys[i]);

                    // look directly in the working segment, they should be MISSING
                    //   This is testing the checkpoint as well. If log resume didn't
                    //   CHECKPOINT_DROP, then the values will be duplicated in the working segment.
                    {
                        RecordUpdate update;
                        GetStatus status =
                            db.workingSegment.getRecordUpdate(key, out update);
                        Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING");
                    }

                    // assure the global query interface finds the NEW VALUES
                    {
                        RecordData data;
                        GetStatus status = db.getRecord(key, out data);
                        Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES");
                        Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES");
                    }
                }

                // now generate a BUNCH of new segments...
                {
                    String[] secondkeys = { "second-test-1", "second-test-2", "second-test-3" };
                    String[] secondvalues = { "a-second", "b-second", "c-second" };

                    // put each new record in its OWN segment
                    for (int i = 0; i < secondkeys.Length; i++) {
                        LayerWriteGroup txn = db.newWriteGroup();
                        txn.setValueParsed(secondkeys[i], secondvalues[i]);
                        txn.finish();
                        db.flushWorkingSegment();
                    }

                    db.Dispose();

                    db.debugDump();

                    // RESUME
                    db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\6");

                    // first test records should still be visible
                    for (int i = 0; i < keys.Length; i++) {
                        RecordKey key = new RecordKey();
                        key.appendKeyPart(keys[i]);

                        // look directly in the working segment, they should be MISSING
                        //   This is testing the checkpoint as well. If log resume didn't
                        //   CHECKPOINT_DROP, then the values will be duplicated in the working segment.
                        {
                            RecordUpdate update;
                            GetStatus status =
                                db.workingSegment.getRecordUpdate(key, out update);
                            Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING {0}", key);
                        }

                        // assure the global query interface finds the NEW VALUES
                        {
                            RecordData data;
                            GetStatus status = db.getRecord(key, out data);
                            Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES : {0}", key);
                            Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES : {0}", key);
                        }
                    }
                    db.debugDump();
                    // verify that the secondkeys/values are still in there
                    for (int i = 0; i < secondkeys.Length; i++) {
                        RecordKey key = new RecordKey();
                        key.appendKeyPart(secondkeys[i]);

                        // look directly in the working segment, they should be MISSING
                        //   This is testing the checkpoint as well. If log resume didn't
                        //   CHECKPOINT_DROP, then the values will be duplicated in the working segment.
                        {
                            RecordUpdate update;
                            GetStatus status =
                                db.workingSegment.getRecordUpdate(key, out update);
                            Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING");
                        }

                        // assure the global query interface finds the NEW VALUES
                        {
                            RecordData data;
                            GetStatus status = db.getRecord(key, out data);
                            Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES, where is: " + key);
                            Assert.AreEqual(secondvalues[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES");
                        }
                    }

                }
                db.Dispose();
            }
        }
Esempio n. 20
0
        public void T02_LayerSegmentFlushAndFreespaceModification()
        {
            String[] keys = { "test-1", "test-2", "test-3" };
            String[] values = { "a", "b", "c" };
            System.Text.ASCIIEncoding enc = new System.Text.ASCIIEncoding();

            LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\5");

            db.debugDump();

            LayerWriteGroup txn = db.newWriteGroup();
            for (int i = 0; i < keys.Length; i++) {
                txn.setValueParsed(keys[i], values[i]);
            }
            txn.finish();
            db.flushWorkingSegment();
            db.debugDump();

            // assure that we checkpointed down to a single working segment
            Assert.AreEqual(1, db.segmentlayers.Count, "segment layer count");

            // assure we allocated a new generation and gen0 range record (walk .ROOT range map)
            // TODO: move this test to RangemapManager, to remove this cross-dependency
            {
                RecordData data;
                RecordKey key = new RecordKey().appendParsedKey(".ROOT/VARS/NUMGENERATIONS");
                Assert.AreEqual(GetStatus.PRESENT,db.getRecord(key, out data),"missing numgenerations record");
                Assert.AreEqual("1", data.ReadDataAsString(),"generation count 1");

                RecordUpdate update;
                Assert.AreEqual(GetStatus.PRESENT,
                    db.workingSegment.getRecordUpdate(key, out update), "missing workingsegment numgenerations record");
                    Assert.AreEqual("1", enc.GetString(update.data), "generation count 2");
            }

            #if false
            if (false) {
                RecordData data;
                Assert.AreEqual(
                    GetStatus.PRESENT,
                    db.getRecord(new RecordKey()
                    .appendParsedKey(".ROOT/GEN")
                    .appendKeyPart(new RecordKeyType_Long(0))
                    .appendKeyPart("</>"), out data),
                    ".ROOT/GEN/0/</>  key is missing");
            }
            #endif

            // TODO: assure we subtracted the new range record from the freespace

            // assure the records we wrote are NOT in the working segment, but ARE in the next layer
            for (int i = 0; i < keys.Length; i++) {
                RecordKey key = new RecordKey();
                key.appendKeyPart(keys[i]);

                // look directly in the working segment, they should be **MISSING*
                {
                    RecordUpdate update;
                    GetStatus status =
                        db.workingSegment.getRecordUpdate(key, out update);
                    Assert.AreEqual(GetStatus.MISSING, status, "working segment should be clear");
                }

                // assure the global query interface finds it
                {
                    RecordData data;
                    GetStatus status = db.getRecord(key, out data);
                    Assert.AreEqual(GetStatus.PRESENT, status, "records should be found in layers, {0} missing", key);
                    Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord()");
                }
            }

            db.Dispose();
        }
Esempio n. 21
0
        public void T03_SegmentLayerGetRecordApplicationOrder()
        {
            // Assure that when records are written more than once, the updates are applied in the correct
            // order so we see the proper current data value

            LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\6");

            {
                String[] keys = { "test-1", "test-2", "test-3" };
                String[] values = { "a-first", "b-first", "c-first" };

                LayerWriteGroup txn = db.newWriteGroup();
                for (int i = 0; i < keys.Length; i++) {
                    txn.setValueParsed(keys[i], values[i]);
                }
                txn.finish();
                db.flushWorkingSegment();

                // assure the records we wrote are NOT in the working segment, but ARE in the next layer
                for (int i = 0; i < keys.Length; i++) {
                    RecordKey key = new RecordKey();
                    key.appendKeyPart(keys[i]);

                    // look directly in the working segment, they should be **MISSING*
                    {
                        RecordUpdate update;
                        GetStatus status =
                            db.workingSegment.getRecordUpdate(key, out update);
                        Assert.AreEqual(GetStatus.MISSING, status, "working segment should be clear");
                    }

                    // assure the global query interface finds it
                    {
                        RecordData data;
                        GetStatus status = db.getRecord(key, out data);
                        Assert.AreEqual(GetStatus.PRESENT, status, "records should be found in layers");
                        Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord()");
                    }
                }
            }

            // now write the same keys again with different values into the working segment
            {
                String[] keys = { "test-1", "test-2", "test-3" };
                String[] values = { "a-second", "b-second", "c-second" };
                System.Text.ASCIIEncoding enc = new System.Text.ASCIIEncoding();

                LayerWriteGroup txn = db.newWriteGroup();
                for (int i = 0; i < keys.Length; i++) {
                    txn.setValueParsed(keys[i], values[i]);
                }
                txn.finish();

                // assure that both the workingsegment and layermanager see the NEW VALUES
                for (int i = 0; i < keys.Length; i++) {
                    RecordKey key = new RecordKey();
                    key.appendKeyPart(keys[i]);

                    // look directly in the working segment, they should be the NEW VALUES
                    {
                        RecordUpdate update;
                        GetStatus status =
                            db.workingSegment.getRecordUpdate(key, out update);
                        Assert.AreEqual(GetStatus.PRESENT, status, "working segment should have NEW VALUES");
                        Assert.AreEqual(values[i], enc.GetString(update.data), "SegmentBuilder.getRecordUpdate should see NEW VALUES");
                    }

                    // assure the global query interface finds the NEW VALUES
                    {
                        RecordData data;
                        GetStatus status = db.getRecord(key, out data);
                        Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES");
                        Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES");
                    }
                }

                // now flush the working segment (so we have two on-disk layers)
                db.flushWorkingSegment();

                // assure we still see the NEW VALUES, but that the working segment is empty
                for (int i = 0; i < keys.Length; i++) {
                    RecordKey key = new RecordKey();
                    key.appendKeyPart(keys[i]);

                    // look directly in the working segment, they should MISSING
                    {
                        RecordUpdate update;
                        GetStatus status =
                            db.workingSegment.getRecordUpdate(key, out update);
                        Assert.AreEqual(GetStatus.MISSING, status, "working segment should have NO values");
                    }

                    // assure the global query interface finds the NEW VALUES
                    {
                        RecordData data;
                        GetStatus status = db.getRecord(key, out data);
                        Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES");
                        Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES");
                    }
                }
            }
            db.Dispose();
        }
Esempio n. 22
0
 public static void dumpAllDbRecords(LayerManager db)
 {
     foreach (var kv in db.scanForward(null)) {
         System.Console.WriteLine("  nfound: {0} -> {1}",kv.Key,kv.Value);
     }
 }
Esempio n. 23
0
        public void T05_TestBackgroundFlushAndMergeConsistency()
        {
            // (1) one key to each of NUM_SEGMENTS separate segments
            // (2) setup a separate thread that just repeatedly checks each key
            // (3) perform a merge
            // (4) shutdown the threads and see if any detected a readback failure

            LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\7");
            List<ValueCheckerThread> checkers = new List<ValueCheckerThread>();
            int NUM_SEGMENTS = 7;
            int performed_iterations = 0;
            int TARGET_ITERATIONS = 10;
            try {
                for (int iter = 0; iter < TARGET_ITERATIONS; iter++) {

                    // setup the initial keys and checker threads
                    for (int x = 0; x < NUM_SEGMENTS; x++) {
                        string key = "test-" + x;
                        string value = "test-value-" + x;
                        db.setValueParsed(key, value);

                        // put it in it's own segment
                        db.flushWorkingSegment();

                        // start a checking thread
                        ValueCheckerThread checker = new ValueCheckerThread(db, key, value);
                        Thread newthread = new Thread(checker.doValidate);
                        newthread.Start();
                        checkers.Add(checker);
                    }
                    Thread.Sleep(5);

                    // verify there are no errors
                    foreach (ValueCheckerThread checker in checkers) {
                        Console.WriteLine("Thread  key:{0}  checks:{1}  errors:{2}", checker.key_to_check, checker.num_checks, checker.num_errors);
                    }
                    foreach (ValueCheckerThread checker in checkers) {
                        Assert.AreEqual(0, checker.num_errors, "checker thread error, key(" + checker.key_to_check + ") error count != 0");
                    }

                    // trigger a merge

                    for (int x = 0; x < 20; x++) {
                        db.mergeIfNeeded();
                    }
                    Thread.Sleep(5);

                    // verify there are no errors
                    foreach (ValueCheckerThread checker in checkers) {
                        Console.WriteLine("Thread  key:{0}  checks:{1}  errors:{2}", checker.key_to_check, checker.num_checks, checker.num_errors);
                    }
                    foreach (ValueCheckerThread checker in checkers) {
                        Assert.AreEqual(0, checker.num_errors, "checker thread error, key:" + checker.key_to_check);
                    }

                    // end the threads..
                    foreach (ValueCheckerThread checker in checkers) {
                        checker.end();
                    }
                    Thread.Sleep(10);
                    foreach (ValueCheckerThread checker in checkers) {
                        checker.waitForEnd();
                    }
                    checkers.Clear();

                    Thread.Sleep(5);

                    // delete the keys
                    System.Console.WriteLine("======= Clearing for next run...");

                    for (int x = 0; x < NUM_SEGMENTS; x++) {
                        string key = "test-" + x;
                        string value = "test-value-" + x;
                        db.setValue(new RecordKey().appendParsedKey(key), RecordUpdate.DeletionTombstone());
                    }
                    db.flushWorkingSegment();
                    Thread.Sleep(5);
                    db.mergeIfNeeded();
                    Thread.Sleep(5);
                    System.Console.WriteLine("======= Cleared for next run...");
                    performed_iterations = iter;
                }
            } finally {
                System.Console.WriteLine("performed iterations = " + performed_iterations + " target iterations = " + TARGET_ITERATIONS);
            }
        }
Esempio n. 24
0
        public static void dumpAllDbRecords_old(LayerManager db)
        {
            RecordKey next_key = new RecordKey();
            RecordKey fkey = null;
            RecordData fdata = null;
            while (db.getNextRecord(next_key, ref fkey, ref fdata) == GetStatus.PRESENT) {
                next_key = fkey;

                System.Console.WriteLine("  found: {0} -> {1}", fkey.ToString(), fdata.ToString());

            }
        }
Esempio n. 25
0
 public void Dispose()
 {
     if (db != null) { db.Dispose(); db = null; }
 }
Esempio n. 26
0
 public static void dumpMergeCandidates(LayerManager db)
 {
     MergeManager_Incremental mm = db.rangemapmgr.mergeManager;
     System.Console.WriteLine("-- dumpMergeCandidates");
     foreach (var mc in mm.prioritizedMergeCandidates) {
         System.Console.WriteLine("  " + mc.ToString());
     }
 }
Esempio n. 27
0
        public void T000_EmptyLayerInitAndResume()
        {
            LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\3");

            Assert.AreEqual(1, db.segmentlayers.Count, "should be one memory segment layer");
            Assert.AreEqual(db.segmentlayers[0], db.workingSegment, "the working segment should be the same as the memory segment layer");
            Assert.AreEqual(2, db.workingSegment.RowCount, "should be one row"); // expecting only the boostrap NUMGENERATIONS and FREELIST/HEAD records

            // TEST: log is empty
            // TEST: freespace record established!
        }
Esempio n. 28
0
        public static void dumpSegmentList(LayerManager db)
        {
            #if true
                // this is the slow method

                var walk = db.rangemapmgr.mergeManager.segmentInfo.GetEnumerator();

                bool discrepancy = false;

                foreach (var seg in db.listAllSegments()) {

                    // Assert.AreEqual(true, walk.MoveNext(), "mergemanager missing record!");
                    // Assert.AreEqual(0, walk.Current.Key.CompareTo(seg), "mergemanager and db.listAllSegments have different data!");
                    if (walk.MoveNext()) {
                        if (walk.Current.Key.CompareTo(seg) != 0) {
                            discrepancy = true;
                            Console.WriteLine("  mismatch: db{0} mm{1}", seg, walk.Current.Key);
                        }
                    } else { discrepancy = true; }

                    System.Console.WriteLine("db gen{0} start({1}) end({2})", seg.generation, seg.start_key, seg.end_key);
                }

                if (discrepancy) {
                    foreach (var seginfo in db.rangemapmgr.mergeManager.segmentInfo) {
                        var seg = seginfo.Key;
                        System.Console.WriteLine("mm gen{0} start({1}) end({2})", seg.generation, seg.start_key, seg.end_key);
                    }
                    throw new Exception("mergemanager and db.listAllSegments have different data!");
                }
            #else
                // this is the fast method
                foreach (var seginfo in db.rangemapmgr.mergeManager.segmentInfo) {
                    var seg = seginfo.Key;
                    System.Console.WriteLine("fgen{0} start({1}) end({2})", seg.generation, seg.start_key, seg.end_key);
                }
            #endif
        }
Esempio n. 29
0
        public void T000_TestBasic_SnapshotTombstones()
        {
            var raw_db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\snapts");
            var snap_db = new StepsStageSnapshot(
               new StepsStageSubset(
                   new RecordKeyType_String("snapdb"),
                   raw_db));

            snap_db.setValue(new RecordKey().appendParsedKey("b/1"), RecordUpdate.DeletionTombstone());
            snap_db.setValue(new RecordKey().appendParsedKey("a/1"), RecordUpdate.WithPayload("data1"));

            var snapshot = snap_db.getSnapshot();

            snap_db.setValue(new RecordKey().appendParsedKey("a/1"), RecordUpdate.DeletionTombstone());

            raw_db.debugDump();

            int count = 0;
            foreach (var row in snap_db.scanForward(ScanRange<RecordKey>.All())) {
                Console.WriteLine("found record: " + row);
                count++;
            }
            Assert.AreEqual(0, count, "deletion tombstones didn't work in snapshot");
        }
Esempio n. 30
0
        public static void do_bringup_test(DbgGUI win)
        {
            LayerManager db = new LayerManager(InitMode.NEW_REGION,"c:\\BENDtst\\main");

            db.setValueParsed("test/3","a");
            db.setValueParsed("test/2","b");
            db.setValueParsed("test/1","c");
            db.debugDump();

            db.flushWorkingSegment();    // this will flush and read the current segment
            Console.WriteLine("--- after flush");
            db.debugDump();
            dumpMergeCandidates(db);

            Console.WriteLine("--- check record read");
            RecordData data;
            GetStatus status = db.getRecord(new RecordKey().appendParsedKey("test/3"), out data);
            System.Console.WriteLine("getRecord({0}) returned {1}", "test/3", data.ToString());

            Console.WriteLine("--- make lots of segments");
            db.setValueParsed("test/4", "d");
            db.flushWorkingSegment();
            db.setValueParsed("test/5", "e");
            db.flushWorkingSegment();
            db.setValueParsed("test/6", "f");
            db.flushWorkingSegment();
            db.debugDump();

            System.Console.WriteLine("-------- dump keys ---------------------");
            dumpAllDbRecords(db);
            dumpMergeCandidates(db);

            System.Console.WriteLine("-------- draw graphical debug ---------------------");
            win.debugDump(db);

            System.Console.WriteLine("-------- PERFORMING A SINGLE MERGE ---------------------");

            MergeCandidate mc;
            mc = db.rangemapmgr.mergeManager.getBestCandidate();
            System.Console.WriteLine("MERGE :" + mc);
            db.performMerge(mc);
            dumpMergeCandidates(db);

            db.flushWorkingSegment();
            db.debugDump();
            dumpSegmentList(db);
            win.debugDump(db);

            System.Console.WriteLine("-------- SINGLE MERGE DONE, close/dispose ---------------------");

            dumpSegmentList(db);
            dumpMergeCandidates(db);
            db.debugDump();
            db.Dispose();

            System.Console.WriteLine("-------- NOW RESUME ---------------------------------");
            db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\main");
            dumpSegmentList(db);
            dumpMergeCandidates(db);
            win.debugDump(db);
            db.debugDump();

            System.Console.WriteLine("-------- NOW FINDNEXT ---------------------------------");
            dumpAllDbRecords(db);
            win.debugDump(db);

            System.Console.WriteLine("-------- NOW MERGE ALL SEGMENTS ---------------------------------");
            dumpSegmentList(db);
            db.mergeAllSegments();
            db.debugDump();
            win.debugDump(db);

            // stop(); // ------------------------- ((  S   T   O   P  )) ---------------------------------

            System.Console.WriteLine("-------- NOW FINDNEXT (after merge) ---------------------------------");
            dumpAllDbRecords(db);

            //System.Console.WriteLine("-------- Now run Readthreads Test ---------------------------------");
            //A03_LayerManagerTests test = new A03_LayerManagerTests();
            //test.T10_LayerManager_ReadThreads();

            dumpMergeCandidates(db);
            win.debugDump(db);
            db.Dispose();

            System.Console.WriteLine("-------- Write ALOT of data ---------------------------------");

            int keysize = 20
                ;
            int keycount = 1000000;
            int flush_period = 40000;
            int commit_period = 1000;
            bool random_order = true;

            double elapsed;

            DateTime start = DateTime.Now;
            int record_count = 0;

            db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\bigtest");
            String value = "";
            String keybase = "TestValueDataABC";
            for (int x = 0; x < keysize / keybase.Length; x++) { value = value + keybase; }
            int seed = (int)DateTime.Now.Ticks;
            Random rnd = new Random(seed);

            System.Console.WriteLine("*** RANDOM SEED: " + seed);
            var write_group = db.newWriteGroup();

            for (int x = 10000001; x < 10000001 + keycount; x++) {
                if (random_order) {
                    write_group.setValueParsed("test/rnd/" + rnd.Next(), value);
                } else {
                    write_group.setValueParsed("test/ordered/" + x, value);
                }
                record_count++;

                if (x % commit_period == 0) { write_group.finish(); write_group = db.newWriteGroup(); }

                if (x % flush_period == 0) {
                    System.Console.WriteLine("*** RANDOM SEED: " + seed);
                    write_group.finish(); write_group = db.newWriteGroup();
                    System.Console.WriteLine("start % 1000 cycle..");
                    db.flushWorkingSegment();

                    db.freespacemgr.debugDumbCurrentFreespace();
                    win.debugDump(db);
                    dumpMergeCandidates(db);

                    for (int mx = 0; mx < 30; mx++) {

                        mc = db.rangemapmgr.mergeManager.getBestCandidate();
                        if (mc == null) { break; }
                        if (mc.score() > (1.6 + (float)db.rangemapmgr.mergeManager.getMaxGeneration()/12.0f)) {
                            System.Console.WriteLine("** best merge score too high: " + mc);
                            break;
                        }
                        System.Console.WriteLine("merge " + mx + " : " + mc);

                        win.debugDump(db, mc);
                        db.performMerge(mc);
                        System.Console.WriteLine("mergedone " + mx + " : " + mc);

                        dumpSegmentList(db);
                        dumpMergeCandidates(db);
                        win.debugDump(db);
                    }

                    elapsed = (DateTime.Now - start).TotalMilliseconds / 1000.0;
                    System.GC.Collect();
                    Console.WriteLine("*************************************************");
                    Console.WriteLine("*************************************************");
                    Console.WriteLine("*** merge cycle done  {0} records so far, in {1} total time, {2} records/second",
                             record_count,elapsed, (double)record_count/elapsed);
                    Console.WriteLine("*************************************************");
                    Console.WriteLine("*************************************************");

                }
            }

            System.Console.WriteLine("-------- Merge a bunch more ------------------");

            for (int x = 0; x < 30; x++) {
                mc = db.rangemapmgr.mergeManager.getBestCandidate();
                System.Console.WriteLine("merge : " + mc);
                if (mc == null) break;
                win.debugDump(db, mc);
                db.performMerge(mc);

                dumpSegmentList(db);
                dumpMergeCandidates(db);
                win.debugDump(db,null);
            }

            dumpSegmentList(db);
            elapsed = (DateTime.Now - start).TotalMilliseconds / 1000.0;

            Console.WriteLine("*************************************************");
            Console.WriteLine("*************************************************");
            Console.WriteLine("*** merge cycle done  {0} records so far, in {1} total time, {2} records/second",
                     record_count, elapsed, (double)record_count / elapsed);
            Console.WriteLine("*************************************************");
            Console.WriteLine("*************************************************");

            System.Console.WriteLine("** done.");
            Environment.Exit(0);
        }