Esempio n. 1
0
        public void setValue(RecordKey skey, RecordUpdate supdate)
        {
            checkActive();

            // (1) write our repl log entry

            DateTime now = DateTime.Now;
            long logstamp = id_gen.nextTimestamp();

            RecordKey logkey = new RecordKey()
                .appendKeyPart("_logs")
                .appendKeyPart(ctx.server_guid)
                .appendKeyPart(new RecordKeyType_Long(logstamp));

            // (1.1) pack the key/value together into the log entry
            byte[] packed_update;
            {
                MemoryStream writer = new MemoryStream();
                // TODO: this seems like a really inefficient way to write out a key
                ISegmentBlockEncoder encoder = new SegmentBlockBasicEncoder();
                encoder.setStream(writer);
                encoder.add(skey, supdate);
                encoder.flush();
                packed_update = writer.ToArray();
            }
            RecordUpdate logupdate = RecordUpdate.WithPayload(packed_update);

            Console.WriteLine("writing log entry: {0} -> [ {1} = {2} ]",
                logkey, skey, supdate);
            next_stage.setValue(logkey, logupdate);

            // (2) trigger the repl notifier that there is a new entry to push
            pusher.wakeUpLogSleepers();

            // (2) write the record key
            Console.WriteLine("writing data entry: {0} = {1}",
                skey, supdate);
            RecordKey private_record_key = new RecordKey()
                .appendKeyPart("_data");
            foreach (var part in skey.key_parts) {
                private_record_key.appendKeyPart(part);
            }
            next_stage.setValue(private_record_key, supdate);
        }
Esempio n. 2
0
        internal void applyLogEntry(string from_server_guid, long logstamp, RecordUpdate logdata)
        {
            // (0) unpack the data
            BlockAccessor ba = new BlockAccessor(logdata.data);
            ISegmentBlockDecoder decoder = new SegmentBlockBasicDecoder(ba);

            // (1) add it to our copy of that server's log

            this._recordLogEntry(from_server_guid, logstamp, logdata);
            // (2) add it to the database

            foreach (var kvp in decoder.sortedWalk()) {
                RecordKey local_data_key = new RecordKey()
                    .appendKeyPart("_data");
                foreach (var part in kvp.Key.key_parts) {
                    local_data_key.appendKeyPart(part);
                }
                next_stage.setValue(local_data_key, kvp.Value);
            }
        }
Esempio n. 3
0
        public void T03_SegmentLayerGetRecordApplicationOrder()
        {
            // Assure that when records are written more than once, the updates are applied in the correct
            // order so we see the proper current data value

            LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\6");

            {
                String[] keys = { "test-1", "test-2", "test-3" };
                String[] values = { "a-first", "b-first", "c-first" };

                LayerWriteGroup txn = db.newWriteGroup();
                for (int i = 0; i < keys.Length; i++) {
                    txn.setValueParsed(keys[i], values[i]);
                }
                txn.finish();
                db.flushWorkingSegment();

                // assure the records we wrote are NOT in the working segment, but ARE in the next layer
                for (int i = 0; i < keys.Length; i++) {
                    RecordKey key = new RecordKey();
                    key.appendKeyPart(keys[i]);

                    // look directly in the working segment, they should be **MISSING*
                    {
                        RecordUpdate update;
                        GetStatus status =
                            db.workingSegment.getRecordUpdate(key, out update);
                        Assert.AreEqual(GetStatus.MISSING, status, "working segment should be clear");
                    }

                    // assure the global query interface finds it
                    {
                        RecordData data;
                        GetStatus status = db.getRecord(key, out data);
                        Assert.AreEqual(GetStatus.PRESENT, status, "records should be found in layers");
                        Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord()");
                    }
                }
            }

            // now write the same keys again with different values into the working segment
            {
                String[] keys = { "test-1", "test-2", "test-3" };
                String[] values = { "a-second", "b-second", "c-second" };
                System.Text.ASCIIEncoding enc = new System.Text.ASCIIEncoding();

                LayerWriteGroup txn = db.newWriteGroup();
                for (int i = 0; i < keys.Length; i++) {
                    txn.setValueParsed(keys[i], values[i]);
                }
                txn.finish();

                // assure that both the workingsegment and layermanager see the NEW VALUES
                for (int i = 0; i < keys.Length; i++) {
                    RecordKey key = new RecordKey();
                    key.appendKeyPart(keys[i]);

                    // look directly in the working segment, they should be the NEW VALUES
                    {
                        RecordUpdate update;
                        GetStatus status =
                            db.workingSegment.getRecordUpdate(key, out update);
                        Assert.AreEqual(GetStatus.PRESENT, status, "working segment should have NEW VALUES");
                        Assert.AreEqual(values[i], enc.GetString(update.data), "SegmentBuilder.getRecordUpdate should see NEW VALUES");
                    }

                    // assure the global query interface finds the NEW VALUES
                    {
                        RecordData data;
                        GetStatus status = db.getRecord(key, out data);
                        Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES");
                        Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES");
                    }
                }

                // now flush the working segment (so we have two on-disk layers)
                db.flushWorkingSegment();

                // assure we still see the NEW VALUES, but that the working segment is empty
                for (int i = 0; i < keys.Length; i++) {
                    RecordKey key = new RecordKey();
                    key.appendKeyPart(keys[i]);

                    // look directly in the working segment, they should MISSING
                    {
                        RecordUpdate update;
                        GetStatus status =
                            db.workingSegment.getRecordUpdate(key, out update);
                        Assert.AreEqual(GetStatus.MISSING, status, "working segment should have NO values");
                    }

                    // assure the global query interface finds the NEW VALUES
                    {
                        RecordData data;
                        GetStatus status = db.getRecord(key, out data);
                        Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES");
                        Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES");
                    }
                }
            }
            db.Dispose();
        }
Esempio n. 4
0
        public void T04_SingleSegmentRootMetadataLogRecovery()
        {
            // TEST: test multiple segments flushed, and "log resumed"  (walk .ROOT range map)

            // perform the previous test
            T03_SegmentLayerGetRecordApplicationOrder();

            // ... and then perform a resume
            LayerManager db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\6");

            String[] keys = { "test-1", "test-2", "test-3" };
            String[] values = { "a-second", "b-second", "c-second" };

            // verify that it has the same data as before the RESUME
            {
                // working segment should be empty
                for (int i = 0; i < keys.Length; i++) {
                    RecordKey key = new RecordKey();
                    key.appendKeyPart(keys[i]);

                    // look directly in the working segment, they should be MISSING
                    //   This is testing the checkpoint as well. If log resume didn't
                    //   CHECKPOINT_DROP, then the values will be duplicated in the working segment.
                    {
                        RecordUpdate update;
                        GetStatus status =
                            db.workingSegment.getRecordUpdate(key, out update);
                        Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING");
                    }

                    // assure the global query interface finds the NEW VALUES
                    {
                        RecordData data;
                        GetStatus status = db.getRecord(key, out data);
                        Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES");
                        Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES");
                    }
                }

                // now generate a BUNCH of new segments...
                {
                    String[] secondkeys = { "second-test-1", "second-test-2", "second-test-3" };
                    String[] secondvalues = { "a-second", "b-second", "c-second" };

                    // put each new record in its OWN segment
                    for (int i = 0; i < secondkeys.Length; i++) {
                        LayerWriteGroup txn = db.newWriteGroup();
                        txn.setValueParsed(secondkeys[i], secondvalues[i]);
                        txn.finish();
                        db.flushWorkingSegment();
                    }

                    db.Dispose();

                    db.debugDump();

                    // RESUME
                    db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\6");

                    // first test records should still be visible
                    for (int i = 0; i < keys.Length; i++) {
                        RecordKey key = new RecordKey();
                        key.appendKeyPart(keys[i]);

                        // look directly in the working segment, they should be MISSING
                        //   This is testing the checkpoint as well. If log resume didn't
                        //   CHECKPOINT_DROP, then the values will be duplicated in the working segment.
                        {
                            RecordUpdate update;
                            GetStatus status =
                                db.workingSegment.getRecordUpdate(key, out update);
                            Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING {0}", key);
                        }

                        // assure the global query interface finds the NEW VALUES
                        {
                            RecordData data;
                            GetStatus status = db.getRecord(key, out data);
                            Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES : {0}", key);
                            Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES : {0}", key);
                        }
                    }
                    db.debugDump();
                    // verify that the secondkeys/values are still in there
                    for (int i = 0; i < secondkeys.Length; i++) {
                        RecordKey key = new RecordKey();
                        key.appendKeyPart(secondkeys[i]);

                        // look directly in the working segment, they should be MISSING
                        //   This is testing the checkpoint as well. If log resume didn't
                        //   CHECKPOINT_DROP, then the values will be duplicated in the working segment.
                        {
                            RecordUpdate update;
                            GetStatus status =
                                db.workingSegment.getRecordUpdate(key, out update);
                            Assert.AreEqual(GetStatus.MISSING, status, "working segment should be MISSING");
                        }

                        // assure the global query interface finds the NEW VALUES
                        {
                            RecordData data;
                            GetStatus status = db.getRecord(key, out data);
                            Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager should see NEW VALUES, where is: " + key);
                            Assert.AreEqual(secondvalues[i], data.ReadDataAsString(), "LayerManager.getRecord() should see NEW VALUES");
                        }
                    }

                }
                db.Dispose();
            }
        }
Esempio n. 5
0
        public void T01_LayerTxnLogResume()
        {
            String[] keys = { "test-1", "test-2", "test-3" };
            String[] values = {"a","b","c" };

            {
                LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\4");

                LayerWriteGroup txn = db.newWriteGroup();
                for (int i=0;i<keys.Length;i++) {
                    txn.setValueParsed(keys[i],values[i]);
                }
                txn.finish();

                // TODO: assure the freespace hasn't been affected

                // assure we have not committed any segments
                Assert.AreEqual(1, db.segmentlayers.Count, "shold be one memory segment");
                Assert.AreEqual(db.segmentlayers[0], db.workingSegment, "memory segment should be the working segment");

                // assure the working segment contains the right data
                // 3 test records,  the NUMGENERATIONS record, and FREELIST/HEAD
                // TODO: make a more robust way to do this test (i.e. count non .ROOT records)
                Assert.AreEqual(5, db.workingSegment.RowCount, "record count match 1");
                db.Dispose();
            }

            {
                LayerManager db = new LayerManager(InitMode.RESUME, "c:\\BENDtst\\4");
                System.Text.ASCIIEncoding enc = new System.Text.ASCIIEncoding();

                // assure we still have not committed any segments
                Assert.AreEqual(1, db.segmentlayers.Count, "after resume: one memory segment");
                Assert.AreEqual(db.segmentlayers[0], db.workingSegment, "after resume: working segment setup");

                // assure the working segment contains the right data
                Assert.AreEqual(5, db.workingSegment.RowCount, "after resume: record count == 5");
                for (int i = 0; i < keys.Length; i++) {
                    RecordKey key = new RecordKey();
                    key.appendKeyPart(new RecordKeyType_String(keys[i]));

                    // look directly in the working segment
                    {

                        RecordUpdate update;
                        GetStatus status = db.workingSegment.getRecordUpdate(key, out update);
                        Assert.AreEqual(GetStatus.PRESENT, status, "SegmentBuilder.getRecordUpdate({0})", key);
                        Assert.AreEqual(values[i], enc.GetString(update.data), "SegmentBuilder.getRecordUpdate({0})",key);
                    }

                    // assure the global query interface finds it
                    {
                        RecordData data;
                        GetStatus status = db.getRecord(key, out data);
                        Assert.AreEqual(GetStatus.PRESENT, status, "LayerManager.getRecord({0})", key);
                        Assert.AreEqual(values[i], enc.GetString(data.data), "LayerManager.getRecord({0})",key);
                    }
                }

                // cleanup
                db.Dispose();
            }
        }
Esempio n. 6
0
        public void T02_LayerSegmentFlushAndFreespaceModification()
        {
            String[] keys = { "test-1", "test-2", "test-3" };
            String[] values = { "a", "b", "c" };
            System.Text.ASCIIEncoding enc = new System.Text.ASCIIEncoding();

            LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\5");

            db.debugDump();

            LayerWriteGroup txn = db.newWriteGroup();
            for (int i = 0; i < keys.Length; i++) {
                txn.setValueParsed(keys[i], values[i]);
            }
            txn.finish();
            db.flushWorkingSegment();
            db.debugDump();

            // assure that we checkpointed down to a single working segment
            Assert.AreEqual(1, db.segmentlayers.Count, "segment layer count");

            // assure we allocated a new generation and gen0 range record (walk .ROOT range map)
            // TODO: move this test to RangemapManager, to remove this cross-dependency
            {
                RecordData data;
                RecordKey key = new RecordKey().appendParsedKey(".ROOT/VARS/NUMGENERATIONS");
                Assert.AreEqual(GetStatus.PRESENT,db.getRecord(key, out data),"missing numgenerations record");
                Assert.AreEqual("1", data.ReadDataAsString(),"generation count 1");

                RecordUpdate update;
                Assert.AreEqual(GetStatus.PRESENT,
                    db.workingSegment.getRecordUpdate(key, out update), "missing workingsegment numgenerations record");
                    Assert.AreEqual("1", enc.GetString(update.data), "generation count 2");
            }

            #if false
            if (false) {
                RecordData data;
                Assert.AreEqual(
                    GetStatus.PRESENT,
                    db.getRecord(new RecordKey()
                    .appendParsedKey(".ROOT/GEN")
                    .appendKeyPart(new RecordKeyType_Long(0))
                    .appendKeyPart("</>"), out data),
                    ".ROOT/GEN/0/</>  key is missing");
            }
            #endif

            // TODO: assure we subtracted the new range record from the freespace

            // assure the records we wrote are NOT in the working segment, but ARE in the next layer
            for (int i = 0; i < keys.Length; i++) {
                RecordKey key = new RecordKey();
                key.appendKeyPart(keys[i]);

                // look directly in the working segment, they should be **MISSING*
                {
                    RecordUpdate update;
                    GetStatus status =
                        db.workingSegment.getRecordUpdate(key, out update);
                    Assert.AreEqual(GetStatus.MISSING, status, "working segment should be clear");
                }

                // assure the global query interface finds it
                {
                    RecordData data;
                    GetStatus status = db.getRecord(key, out data);
                    Assert.AreEqual(GetStatus.PRESENT, status, "records should be found in layers, {0} missing", key);
                    Assert.AreEqual(values[i], data.ReadDataAsString(), "LayerManager.getRecord()");
                }
            }

            db.Dispose();
        }
Esempio n. 7
0
        public void mapSegment(LayerWriteGroup tx, int use_gen, 
            RecordKey start_key, RecordKey end_key, IRegion reader)
        {
            if (! (tx.type == LayerWriteGroup.WriteGroupType.DISK_ATOMIC_NOFLUSH ||
                   tx.type == LayerWriteGroup.WriteGroupType.DISK_ATOMIC_FLUSH)) {
                       throw new Exception("NewUnusedSegment.mapSegment() must be provided an ATOMIC write group");
            }

            // remove the pending entry
            RecordKey key = new RecordKey().appendParsedKey(".ROOT/FREELIST/PENDING");
            key.appendKeyPart(new RecordKeyType_Long(reader.getStartAddress()));
            tx.setValue(key, RecordUpdate.DeletionTombstone());

            // add the new map
            tx.mylayer.rangemapmgr.mapGenerationToRegion(tx, use_gen, start_key, end_key, reader);
        }
Esempio n. 8
0
        public void T10_RecordKeyEncodeDecodeBugTest()
        {
            // test encode/decode with byte[] parts

            // 92 43 0
            byte[] chars = { 92, 43, 0 };
            {

                System.Text.ASCIIEncoding enc = new System.Text.ASCIIEncoding();
                String keystring = enc.GetString(chars);
                enc.GetBytes(keystring);

                Assert.AreEqual(chars, enc.GetBytes(keystring), "string encoding not reversible");

                RecordKey key = new RecordKey();
                key.appendKeyPart(chars);
                byte[] data = key.encode();
                Assert.AreEqual(key, new RecordKey(data), "check encode/decode with binary data");

                // check nested key

                var wrap_key = new RecordKey().appendKeyPart(key.encode());
                byte[] wrap_encoded = wrap_key.encode();

                RecordKey wrap_decoded = new RecordKey(wrap_encoded);

            }
        }
Esempio n. 9
0
        // move the pending address into the freelist
        private void handleRegionSafeToFree(long start_addr, FreespaceExtent extent, LayerWriteGroup wg)
        {
            System.Console.WriteLine("*\n*\n*\n* handleRegionSafeToFree {0} \n*\n*\n*", start_addr);
            // (1) remove pending entry
            wg.setValue(pendingKeyForAddr(start_addr), RecordUpdate.DeletionTombstone());

            // (2) write real freelist entry (TODO: merge with neighboring entries)
            {
                RecordKey key = new RecordKey().appendParsedKey(".ROOT/FREELIST/EXTENTS");
                key.appendKeyPart(new RecordKeyType_Long(extent.end_addr));
                wg.setValue(key, RecordUpdate.WithPayload(extent.pack()));
            }
            wg.finish();
        }
Esempio n. 10
0
 private RecordKey pendingKeyForAddr(long start_addr)
 {
     RecordKey key = new RecordKey().appendParsedKey(".ROOT/FREELIST/PENDING");
     key.appendKeyPart(new RecordKeyType_Long(start_addr));
     return key;
 }
        public void setValue(RecordKey key, RecordUpdate update)
        {
            // RecordKey key = key.clone();
            if (this.is_frozen) {
                throw new Exception("snapshot not writable! " + this.frozen_at_timestamp);
            }

            // (1) get our timestamp
            long timestamp = id_gen.nextTimestamp();
            // (2) add our timestamp attribute to the end of the keyspace
            key.appendKeyPart(new RecordKeyType_AttributeTimestamp(timestamp));
            next_stage.setValue(key, update);
        }
Esempio n. 12
0
 public RecordKey genLowestKey()
 {
     RecordKey key = new RecordKey();
     foreach (QualifierBase part in key_part_qualifiers) {
         key.appendKeyPart(part.genLowestKeyTest().ToString());
     }
     return key;
 }
Esempio n. 13
0
        public void T06_RecordKeyDelimiterEscape()
        {
            string DELIM = new String(RecordKey.PRINTED_DELIMITER, 1);

            RecordKey key1 = new RecordKey();
            key1.appendKeyParts("1", "2", "3");
            Assert.AreEqual(3, key1.numParts());
            RecordKey dkey1 = new RecordKey(key1.encode());
            Assert.AreEqual(3, dkey1.numParts(), "dkey1 delimiter decode");

            RecordKey key2 = new RecordKey();
            key2.appendKeyPart("1" + DELIM + "2" + DELIM + "3");
            Assert.AreEqual(1, key2.numParts());
            RecordKey dkey2 = new RecordKey(key2.encode());
            Assert.AreEqual(1, dkey2.numParts(), "dkey2 delimiter decode");

            // key2 > key1
            Assert.AreEqual(1, key2.CompareTo(key1));
            Assert.AreEqual(-1, key1.CompareTo(key2));
        }
Esempio n. 14
0
        internal IEnumerable<KeyValuePair<RecordKey, RecordData>> fetchLogEntries(
                        string log_server_guid,
                        RecordKeyType log_start_key,
                        int limit = -1,
                        bool block = false)
        {
            var rk_start = new RecordKey()
                .appendKeyPart("_logs")
                .appendKeyPart(log_server_guid);

            if (!log_start_key.Equals("")) {
                rk_start.appendKeyPart(log_start_key);
            }

            var rk_end = new RecordKey()
                .appendKeyPart("_logs")
                .appendKeyPart(log_server_guid);

            var scanrange = new ScanRange<RecordKey>(rk_start, RecordKey.AfterPrefix(rk_end), null);

            Console.WriteLine(" fetchLogEntries (block:{3}) for ({0}): start {1}  end {2}",
                log_server_guid, rk_start, rk_end, block);

            bool matched_first = false;
            int count = 0;

             retry_log_fetch:

            foreach (var logrow in next_stage.scanForward(scanrange)) {
                if (!matched_first) {
                    // the first logrow needs to match the log_start_key, or there was a gap in the log!!
                    var logstamp = logrow.Key.key_parts[2];
                    if (logstamp.CompareTo(log_start_key) != 0) {
                        throw new LogException(
                            String.Format("log start gap! guid:{0} log_start_key:{1} logstamp:{2}",
                               log_server_guid,log_start_key,logstamp));
                    }
                    matched_first = true;
                    continue;
                }
                yield return logrow;
                count++;

                // if we're limiting the number of return rows...
                if (limit != -1) {
                    if (count > limit) {
                        yield break;
                    }
                }
            }

            if (!matched_first) {
                throw new LogException("no log entries!");
            }
            // if we only matched one log row, then it should be the matching first row.

            if ((count == 0) && block) {
                Console.WriteLine("++++++++ block on log tail");
                lock (this.logWaiters) {
                    Monitor.Wait(this.logWaiters);
                }
                Console.WriteLine("++++++++ wakeup from log tail");
                goto retry_log_fetch;
            }
        }
Esempio n. 15
0
        public NewUnusedSegment allocateNewSegment(LayerWriteGroup tx, int length)
        {
            // use one big nasty lock to prevent race conditions
            lock (this) {

                // try to find an extent with enough space to carve off a chunk
                foreach (var rec in store.scanForward(new ScanRange<RecordKey>(freelist_prefix,
                                RecordKey.AfterPrefix(freelist_prefix), null))) {
                    FreespaceExtent extent = FreespaceExtent.unpack(rec.Value.data);

                    if (extent.length() == length) {
                        // the extent is exactly the right size... make it pending
                        LayerWriteGroup makepending_wg =
                            tx.mylayer.newWriteGroup(type: LayerWriteGroup.WriteGroupType.DISK_ATOMIC_NOFLUSH);

                        // add a pending entry for this block
                        {
                            RecordKey key = new RecordKey().appendParsedKey(".ROOT/FREELIST/PENDING");
                            key.appendKeyPart(new RecordKeyType_Long(extent.start_addr));
                            makepending_wg.setValue(key, RecordUpdate.WithPayload(extent.pack()));
                        }

                        // remove the freelist entry
                        {
                            RecordKey key = new RecordKey().appendParsedKey(".ROOT/FREELIST/EXTENTS");
                            key.appendKeyPart(new RecordKeyType_Long(extent.end_addr));
                            makepending_wg.setValue(key, RecordUpdate.DeletionTombstone());
                        }

                        makepending_wg.finish();
                        return new NewUnusedSegment(store, extent);
                    } else if (extent.length() > length) {

                        // TODO: carve a piece off the extent and return the pending piece

                    }
                }

                // if we can't find a free segment, grow the heap
                return growHeap(tx, length);

                // TODO: then carve a segment out of the new grown heap

            }
        }
Esempio n. 16
0
 public void setValue(RecordKey key, RecordUpdate update)
 {
     if (this.is_frozen) {
         throw new Exception("snapshot not writable! " + this.frozen_at_snapshotnumber);
     }
     // add our snapshot_number to the end of the keyspace
     key.appendKeyPart(new RecordKeyType_AttributeTimestamp(this.current_snapshot));
     // wrap the update into a sub-update, mostly because tombstones need to be "real" records
     // to us
     var sub_update = RecordUpdate.WithPayload(update.encode());
     next_stage.setValue(key, sub_update);
 }
Esempio n. 17
0
        public void T02b_RecordKeyNesting()
        {
            String[] parts1 = { "aaaa", "bbbb" };
            String[] parts2 = { "xxxx", "yyyy", "zzzz" };

            RecordKey nestedkey = new RecordKey();
            nestedkey.appendKeyParts(parts2);

            RecordKey parentkey = new RecordKey();
            parentkey.appendKeyPart(parts1[0]); // "aaaa"
            parentkey.appendKeyPart(nestedkey.encode());  // (xxxx,yyyy,zzzz)
            parentkey.appendKeyPart(parts1[1]); // "bbbb"

            RecordKey decodedkey = new RecordKey(parentkey.encode());

            Assert.AreEqual(decodedkey.ToString(), parentkey.ToString(), "tostring versions of keys don't match");
            Assert.AreEqual(decodedkey.numParts(), parentkey.numParts(), "nested delimiters are changing number of keyparts");

            Assert.AreEqual(decodedkey, parentkey, "nested key encode/decode error");
        }