public KeyValuePair<RecordKey, RecordData> FindNext(IComparable<RecordKey> keytest, bool equal_ok) { var rangekey = new ScanRange<RecordKey>(keytest, new ScanRange<RecordKey>.maxKey(), null); foreach (var rec in this.scanForward(rangekey)) { if (!equal_ok && keytest.CompareTo(rec.Key) == 0) { continue; } return rec; } throw new KeyNotFoundException("SubSetStage.FindNext: no record found after: " + keytest + " equal_ok:" + equal_ok); }
private ISelectObjectContentEventStream GetSelectObjectContentEventStream( string bucketName, string key, string selectQuery, InputSerialization inputSerialization, OutputSerialization outputSerialization, ScanRange scanRange = null) { if (string.IsNullOrWhiteSpace(bucketName)) { throw new ArgumentNullException("bucketName"); } if (string.IsNullOrWhiteSpace(key)) { throw new ArgumentNullException("key"); } if (string.IsNullOrWhiteSpace(selectQuery)) { throw new ArgumentNullException("selectQuery"); } if (inputSerialization == null) { throw new ArgumentNullException("inputSerialization"); } if (outputSerialization == null) { throw new ArgumentNullException("outputSerialization"); } SelectObjectContentRequest selectObjectContentRequest = new SelectObjectContentRequest() { Bucket = bucketName, Key = key, ExpressionType = ExpressionType.SQL, Expression = selectQuery, InputSerialization = inputSerialization, OutputSerialization = outputSerialization }; if (scanRange != null) { selectObjectContentRequest.ScanRange = scanRange; } return(Client.SelectObjectContent(selectObjectContentRequest).Payload); }
private byte[] fetchLogEntries_block(string log_server_guid, string log_start_key, string log_end_key) { var rk_start = new RecordKey() .appendKeyPart("_logs") .appendKeyPart(log_server_guid) .appendKeyPart(log_start_key); var rk_end = RecordKey.AfterPrefix(new RecordKey() .appendKeyPart("_logs") .appendKeyPart(log_server_guid) .appendKeyPart(log_end_key)); var scanrange = new ScanRange<RecordKey>(rk_start, rk_end, null); byte[] packed_log_records; { MemoryStream writer = new MemoryStream(); // TODO: this seems like a really inefficient way to write out a key ISegmentBlockEncoder encoder = new SegmentBlockBasicEncoder(); encoder.setStream(writer); foreach (var logrow in next_stage.scanForward(scanrange)) { encoder.add(logrow.Key, RecordUpdate.WithPayload(logrow.Value.data)); } encoder.flush(); packed_log_records = writer.ToArray(); } // IF there are no log entries... BLOCK! return packed_log_records; }
internal IEnumerable<LogStatus> getStatusForLogs() { var seeds_prefix = new RecordKey() .appendParsedKey("_config/seeds"); var scanrange = new ScanRange<RecordKey>(seeds_prefix, RecordKey.AfterPrefix(seeds_prefix), null); yield return getStatusForLog(ctx.server_guid); // be sure to include myself foreach (var seed_row in next_stage.scanForward(scanrange)) { RecordKeyType last_keypart = seed_row.Key.key_parts[seed_row.Key.key_parts.Count - 1]; string server_guid = ((RecordKeyType_String)last_keypart).GetString(); if (server_guid.Equals(ctx.server_guid)) { continue; } // skip ourselves yield return getStatusForLog(server_guid); } }
internal IEnumerable<KeyValuePair<RecordKey, RecordData>> fetchLogEntries( string log_server_guid, RecordKeyType log_start_key, int limit = -1, bool block = false) { var rk_start = new RecordKey() .appendKeyPart("_logs") .appendKeyPart(log_server_guid); if (!log_start_key.Equals("")) { rk_start.appendKeyPart(log_start_key); } var rk_end = new RecordKey() .appendKeyPart("_logs") .appendKeyPart(log_server_guid); var scanrange = new ScanRange<RecordKey>(rk_start, RecordKey.AfterPrefix(rk_end), null); Console.WriteLine(" fetchLogEntries (block:{3}) for ({0}): start {1} end {2}", log_server_guid, rk_start, rk_end, block); bool matched_first = false; int count = 0; retry_log_fetch: foreach (var logrow in next_stage.scanForward(scanrange)) { if (!matched_first) { // the first logrow needs to match the log_start_key, or there was a gap in the log!! var logstamp = logrow.Key.key_parts[2]; if (logstamp.CompareTo(log_start_key) != 0) { throw new LogException( String.Format("log start gap! guid:{0} log_start_key:{1} logstamp:{2}", log_server_guid,log_start_key,logstamp)); } matched_first = true; continue; } yield return logrow; count++; // if we're limiting the number of return rows... if (limit != -1) { if (count > limit) { yield break; } } } if (!matched_first) { throw new LogException("no log entries!"); } // if we only matched one log row, then it should be the matching first row. if ((count == 0) && block) { Console.WriteLine("++++++++ block on log tail"); lock (this.logWaiters) { Monitor.Wait(this.logWaiters); } Console.WriteLine("++++++++ wakeup from log tail"); goto retry_log_fetch; } }
public void truncateLogs_Hack() { // we want to erase all log entries except the last to cause a gap // force others to full rebuild Console.WriteLine("*** ReplHandler({0}): truncateLogs_Hack!!", this.ctx.server_guid); foreach (var ls in this.getStatusForLogs()) { var scan_old_log_entries = new ScanRange<RecordKey>( new RecordKey().appendKeyPart("_logs").appendKeyPart(ls.server_guid), new RecordKey().appendKeyPart("_logs").appendKeyPart(ls.server_guid).appendKeyPart(ls.log_commit_head), null); foreach (var row in this.next_stage.scanForward(scan_old_log_entries)) { // make sure we stop before we delete the last entry LogEntry le = _decodeLogEntry(row.Key, row.Value); if (le.logstamp.Equals(ls.log_commit_head)) { // we reached the head... break; } this.next_stage.setValue(row.Key, RecordUpdate.DeletionTombstone()); Console.WriteLine(" truncateLogs({0}): deleting {1}", ctx.server_guid, row); } } }
public void T002_ScanDirections() { LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\3"); var rk_a = new RecordKey().appendParsedKey(".a"); var rk_b = new RecordKey().appendParsedKey(".b"); string[] keys = { ".a", ".b" }; foreach (var key in keys) { db.setValueParsed(key, "valueof:" + key); } { var rec = db.FindNext(rk_a, false); Assert.AreEqual(rk_b, rec.Key, "simple FindNext"); } { var rec = db.FindPrev(rk_b, false); Assert.AreEqual(rk_a, rec.Key, "simple FindPrev"); } var scan_range = new ScanRange<RecordKey>(rk_a, rk_b, null); // scan forward int count = 0; foreach (var row in db.scanForward(scan_range)) { Console.WriteLine("forward scan: " + row); if (count == keys.Length) { Assert.Fail("too many keys returned in scan"); } Assert.AreEqual(new RecordKeyType_String(keys[count]), row.Key.key_parts[0], "forward scan mistake"); count++; } if (count != keys.Length) { Assert.Fail("not enough keys returned in scan"); } // scan backward count = keys.Length; foreach (var row in db.scanBackward(scan_range)) { Console.WriteLine("backward scan: " + row); if (count == 0) { Assert.Fail("too many keys returned in scan backward"); } count--; Assert.AreEqual(new RecordKeyType_String(keys[count]), row.Key.key_parts[0], "backward scan mistake"); } if (count != 0) { Assert.Fail("not enough keys returned in scan"); } }
public void T001_WorkingSegmentReadWrite() { LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\3"); var rk = new RecordKey().appendParsedKey(".a"); db.setValueParsed(".a", "1"); KeyValuePair<RecordKey, RecordData> record; try { record = db.FindNext(rk, true); Assert.AreEqual(rk, record.Key, "fetched key does not match"); } catch (KeyNotFoundException) { Assert.Fail("couldn't find 'a' record"); } int found_recs = 0; var scan_range = new ScanRange<RecordKey>(rk, RecordKey.AfterPrefix(rk), null); foreach (var row in db.scanForward(scan_range)) { found_recs++; } Assert.AreEqual(1, found_recs, "found the wrong number of records in working segment scan!"); db.flushWorkingSegment(); try { record = db.FindNext(rk, true); Assert.AreEqual(rk, record.Key, "fetched key does not match (after flush)"); } catch (KeyNotFoundException) { Assert.Fail("couldn't find 'a' record"); } found_recs = 0; foreach (var row in db.scanForward( new ScanRange<RecordKey>(rk, RecordKey.AfterPrefix(rk), null))) { found_recs++; } Assert.AreEqual(1, found_recs, "found the wrong number of records after flush !"); }
public void T001_MultiWorkingSegmentReadWrite() { LayerManager db = new LayerManager(InitMode.NEW_REGION, "c:\\BENDtst\\3"); var rk = new RecordKey().appendParsedKey(".data/a"); db.setValueParsed(".data/a", "1"); KeyValuePair<RecordKey, RecordData> record; try { record = db.FindNext(rk, true); Assert.AreEqual(rk, record.Key, "fetched key does not match"); } catch (KeyNotFoundException) { Assert.Fail("couldn't find 'a' record"); } int found_recs = 0; var scan_range = new ScanRange<RecordKey>(rk, RecordKey.AfterPrefix(rk), null); foreach (var row in db.scanForward(scan_range)) { found_recs++; } Assert.AreEqual(1, found_recs, "found the wrong number of records in working segment scan!"); db.DEBUG_addNewWorkingSegmentWithoutFlush(); db.setValueParsed(".data/b", "2"); Console.WriteLine(""); Console.WriteLine("--- contents --"); db.debugDump(); Console.WriteLine(""); // ------------------------------ try { var rkb = new RecordKey().appendParsedKey(".data/b"); record = db.FindNext(rkb, true); Assert.AreEqual(rkb, record.Key, "fetched key does not match (after flush)"); } catch (KeyNotFoundException) { Assert.Fail("couldn't find 'b' record"); } found_recs = 0; var rk_prefix = new RecordKey().appendParsedKey(".data"); foreach (var row in db.scanForward( new ScanRange<RecordKey>(rk_prefix, RecordKey.AfterPrefix(rk_prefix), null))) { found_recs++; } Assert.AreEqual(2, found_recs, "found the wrong number of records after working segment addition !"); }
public IEnumerable<KeyValuePair<RecordKey, RecordData>> scanBackward(IScanner<RecordKey> scanner) { var new_scanner = new ScanRange<RecordKey>( new RecordKeyComparator().appendKeyPart(this.subset_name).appendKeyPart(scanner.genLowestKeyTest()), new RecordKeyComparator().appendKeyPart(this.subset_name).appendKeyPart(scanner.genHighestKeyTest()), null); foreach (var rec in next_stage.scanBackward(new_scanner)) { if (this.subset_name.CompareTo(rec.Key.key_parts[0]) != 0) { // Console.WriteLine("SubsetStage.scanBackward: no more records"); yield break; } RecordKeyType_RecordKey orig_key = (RecordKeyType_RecordKey)rec.Key.key_parts[1]; yield return new KeyValuePair<RecordKey, RecordData>(orig_key.GetRecordKey(), rec.Value); } }