public async Task Test_Case_10() { using (var db = await OpenTestPartitionAsync()) { db.SetDefaultLogHandler((log) => Log(log.GetTimingsReport(true))); // clear everything and write some values await db.WriteAsync(async tr => { var subspace = await db.Root.Resolve(tr); tr.ClearRange(subspace.Encode("K0000"), subspace.Encode("K9999Z")); for (int i = 0; i < 100; i++) { tr.Set(subspace.Encode("K" + i.ToString("D4")), Value("V" + i.ToString("D4"))); } }, this.Cancellation); using (var tr = await db.BeginTransactionAsync(this.Cancellation)) { var subspace = await db.Root.Resolve(tr); tr.ClearRange(subspace.Encode("K0010"), subspace.Encode("K0020")); tr.ClearRange(subspace.Encode("K0050"), subspace.Encode("K0060")); _ = await tr.GetRangeAsync( KeySelector.FirstGreaterOrEqual(subspace.Encode("K0000")), KeySelector.LastLessOrEqual(subspace.Encode("K9999")), new FdbRangeOptions { Mode = FdbStreamingMode.WantAll, Reverse = true } ); //no commit } } }
private async Task <Slice> GetPreviousNodeAsync(IFdbTransaction trans, int level, Slice key) { // GetPreviousNodeAsync looks for the previous node on a level, but "doesn't care" // about the contents of that node. It therefore uses a non-isolated (snaphot) // read and explicitly adds a conflict range that is exclusive of the actual, // found previous node. This allows an increment of that node not to trigger // a transaction conflict. We also add a conflict key on the found previous // key in level 0. This allows detection of erasures. var k = this.Subspace.Encode(level, key); //Console.WriteLine(k); //Console.WriteLine("GetPreviousNode(" + level + ", " + key + ")"); //Console.WriteLine(KeySelector.LastLessThan(k) + " <= x < " + KeySelector.FirstGreaterOrEqual(k)); var kv = await trans .Snapshot .GetRange( KeySelector.LastLessThan(k), KeySelector.FirstGreaterOrEqual(k) ) .FirstAsync() .ConfigureAwait(false); //Console.WriteLine("Found " + FdbKey.Dump(kv.Key)); var prevKey = this.Subspace.DecodeLast <Slice>(kv.Key); trans.AddReadConflictRange(kv.Key + FdbKey.MinValue, k); trans.AddReadConflictKey(this.Subspace.Encode(0, prevKey)); return(prevKey); }
public async void Test_Case_12() { using (var zedb = await OpenTestDatabaseAsync()) { var db = FoundationDB.Filters.Logging.FdbLoggingExtensions.Logged(zedb, (tr) => Log(tr.Log.GetTimingsReport(true))); { var subspace = db.GlobalSpace; using (var tr = db.BeginTransaction(this.Cancellation)) { await tr.GetAsync(subspace.Keys.Encode("KGET")); tr.AddReadConflictRange(subspace.Keys.Encode("KRC0"), subspace.Keys.Encode("KRC0")); tr.AddWriteConflictRange(subspace.Keys.Encode("KWRITECONFLICT0"), subspace.Keys.Encode("KWRITECONFLICT1")); tr.Set(subspace.Keys.Encode("KWRITE"), Slice.Empty); await tr.CommitAsync(); } // once more with feelings using (var tr = db.BeginTransaction(this.Cancellation)) { tr.SetOption(FdbTransactionOption.ReadYourWritesDisable); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Keys.Encode("KGETKEY"))); } using (var tr = db.BeginTransaction(this.Cancellation)) { tr.AddReadConflictRange(subspace.Keys.Encode("KRC0"), subspace.Keys.Encode("KRC1")); tr.Set(subspace.Keys.Encode("KWRITE"), Slice.Empty); await tr.CommitAsync(); } } } }
public async void Test_Case_10() { using (var zedb = await OpenTestDatabaseAsync()) { var db = FoundationDB.Filters.Logging.FdbLoggingExtensions.Logged(zedb, (tr) => Log(tr.Log.GetTimingsReport(true))); { var subspace = db.GlobalSpace; // clear everything and write some values await db.WriteAsync((tr) => { tr.ClearRange(subspace.Keys.Encode("K0000"), subspace.Keys.Encode("K9999Z")); for (int i = 0; i < 100; i++) { tr.Set(subspace.Keys.Encode("K" + i.ToString("D4")), Slice.FromString("V" + i.ToString("D4"))); } }, this.Cancellation); using (var tr = db.BeginTransaction(this.Cancellation)) { tr.ClearRange(subspace.Keys.Encode("K0010"), subspace.Keys.Encode("K0020")); tr.ClearRange(subspace.Keys.Encode("K0050"), subspace.Keys.Encode("K0060")); var chunk = await tr.GetRangeAsync( KeySelector.FirstGreaterOrEqual(subspace.Keys.Encode("K0000")), KeySelector.LastLessOrEqual(subspace.Keys.Encode("K9999")), new FdbRangeOptions { Mode = FdbStreamingMode.WantAll, Reverse = true } ); //no commit } } } }
public async Task Test_Case_13() { using (var db = await OpenTestPartitionAsync()) { db.SetDefaultLogHandler((log) => Log(log.GetTimingsReport(true))); var location = db.Root; // clear everything and write some values await db.WriteAsync(async (tr) => { var subspace = await location.Resolve(tr); tr.ClearRange(subspace.Encode("K0000"), subspace.Encode("K~~~~")); tr.Set(subspace.Encode("K000"), Value("BEGIN")); for (int i = 0; i < 5; i++) { tr.Set(subspace.Encode("K" + i + "A"), Value("V111")); tr.Set(subspace.Encode("K" + i + "B"), Value("V222")); tr.Set(subspace.Encode("K" + i + "C"), Value("V333")); tr.Set(subspace.Encode("K" + i + "D"), Value("V444")); tr.Set(subspace.Encode("K" + i + "E"), Value("V555")); tr.Set(subspace.Encode("K" + i + "F"), Value("V666")); tr.Set(subspace.Encode("K" + i + "G"), Value("V777")); tr.Set(subspace.Encode("K" + i + "H"), Value("V888")); } tr.Set(subspace.Encode("K~~~"), Value("END")); }, this.Cancellation); using (var tr = await db.BeginTransactionAsync(this.Cancellation)) { var subspace = await location.Resolve(tr); tr.Set(subspace.Encode("KZZZ"), Value("V999")); var r = await tr.GetRangeAsync( KeySelector.FirstGreaterOrEqual(subspace.Encode("K0B")), KeySelector.FirstGreaterOrEqual(subspace.Encode("K0G")) ); await tr.CommitAsync(); } } }
public async Task <long?> Rank(IFdbReadOnlyTransaction trans, Slice key) { if (trans == null) { throw new ArgumentNullException(nameof(trans)); } if (key.IsNull) { throw new ArgumentException("Empty key not allowed in set", nameof(key)); } if (!(await ContainsAsync(trans, key).ConfigureAwait(false))) { return(default(long?)); } long r = 0; var rankKey = Slice.Empty; for (int level = MAX_LEVELS - 1; level >= 0; level--) { var lss = this.Subspace.Partition.ByKey(level); long lastCount = 0; var kcs = await trans.GetRange( KeySelector.FirstGreaterOrEqual(lss.Encode(rankKey)), KeySelector.FirstGreaterThan(lss.Encode(key)) ).ToListAsync().ConfigureAwait(false); foreach (var kc in kcs) { rankKey = lss.Decode <Slice>(kc.Key); lastCount = DecodeCount(kc.Value); r += lastCount; } r -= lastCount; if (rankKey == key) { break; } } return(r); }
public FdbRangeQuery <TId> LookupLessThan([NotNull] IFdbReadOnlyTransaction trans, TValue value, bool orEqual, bool reverse = false) { var prefix = this.Subspace.Keys.EncodePartial(value); if (orEqual) { prefix = FdbKey.Increment(prefix); } var space = new KeySelectorPair( KeySelector.FirstGreaterOrEqual(this.Subspace.ToRange().Begin), KeySelector.FirstGreaterThan(prefix) ); return(trans .GetRange(space, new FdbRangeOptions { Reverse = reverse }) .Select((kvp) => this.Subspace.Keys.Decode(kvp.Key).Item2)); }
public async void Test_Case_13() { using (var zedb = await OpenTestDatabaseAsync()) { var db = FoundationDB.Filters.Logging.FdbLoggingExtensions.Logged(zedb, (tr) => Log(tr.Log.GetTimingsReport(true))); { var subspace = db.GlobalSpace; // clear everything and write some values await db.WriteAsync((tr) => { tr.ClearRange(subspace.Keys.Encode("K0000"), subspace.Keys.Encode("K~~~~")); tr.Set(subspace.Keys.Encode("K000"), Slice.FromString("BEGIN")); for (int i = 0; i < 5; i++) { tr.Set(subspace.Keys.Encode("K" + i + "A"), Slice.FromString("V111")); tr.Set(subspace.Keys.Encode("K" + i + "B"), Slice.FromString("V222")); tr.Set(subspace.Keys.Encode("K" + i + "C"), Slice.FromString("V333")); tr.Set(subspace.Keys.Encode("K" + i + "D"), Slice.FromString("V444")); tr.Set(subspace.Keys.Encode("K" + i + "E"), Slice.FromString("V555")); tr.Set(subspace.Keys.Encode("K" + i + "F"), Slice.FromString("V666")); tr.Set(subspace.Keys.Encode("K" + i + "G"), Slice.FromString("V777")); tr.Set(subspace.Keys.Encode("K" + i + "H"), Slice.FromString("V888")); } tr.Set(subspace.Keys.Encode("K~~~"), Slice.FromString("END")); }, this.Cancellation); using (var tr = db.BeginTransaction(this.Cancellation)) { tr.Set(subspace.Keys.Encode("KZZZ"), Slice.FromString("V999")); var r = await tr.GetRangeAsync( KeySelector.FirstGreaterOrEqual(subspace.Keys.Encode("K0B")), KeySelector.FirstGreaterOrEqual(subspace.Keys.Encode("K0G")) ); await tr.CommitAsync(); } } } }
public async Task Test_Case_14() { using (var db = await OpenTestPartitionAsync()) { db.SetDefaultLogHandler((log) => Log(log.GetTimingsReport(true))); var location = db.Root; // clear everything and write some values await db.WriteAsync(async tr => { var subspace = await location.Resolve(tr); tr.ClearRange(subspace.Encode("K0000"), subspace.Encode("K~~~~")); tr.SetValues(Enumerable.Range(0, 100).Select(i => new KeyValuePair <Slice, Slice>(subspace.Encode("K" + i.ToString("D4")), Value("V" + i.ToString("D4"))))); tr.Set(subspace.Encode("K~~~"), Value("END")); }, this.Cancellation); using (var tr = await db.BeginTransactionAsync(this.Cancellation)) { var subspace = await location.Resolve(tr); tr.ClearRange(subspace.Encode("K0042"), Value("K0069")); var r = await tr.GetRangeAsync( KeySelector.FirstGreaterOrEqual(subspace.Encode("K0040")), KeySelector.FirstGreaterOrEqual(subspace.Encode("K0080")), new FdbRangeOptions { Mode = FdbStreamingMode.WantAll } ); // T 1 // => GETRANGE( (< 'KAAA<00>' +1) .. (< LAST +1) Log($"Count={r.Count}, HasMore={r.HasMore}"); foreach (var kvp in r) { Log($"{kvp.Key} = {kvp.Value}"); } } } }
public async Task Test_Case_12() { using (var db = await OpenTestPartitionAsync()) { db.SetDefaultLogHandler((log) => Log(log.GetTimingsReport(true))); var location = db.Root; using (var tr = await db.BeginTransactionAsync(this.Cancellation)) { var subspace = await location.Resolve(tr); await tr.GetAsync(subspace.Encode("KGET")); tr.AddReadConflictRange(subspace.Encode("KRC0"), subspace.Encode("KRC0")); tr.AddWriteConflictRange(subspace.Encode("KWRITECONFLICT0"), subspace.Encode("KWRITECONFLICT1")); tr.Set(subspace.Encode("KWRITE"), Slice.Empty); await tr.CommitAsync(); } // once more with feelings using (var tr = await db.BeginTransactionAsync(this.Cancellation)) { var subspace = await location.Resolve(tr); tr.SetOption(FdbTransactionOption.ReadYourWritesDisable); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Encode("KGETKEY"))); } using (var tr = await db.BeginTransactionAsync(this.Cancellation)) { var subspace = await location.Resolve(tr); tr.AddReadConflictRange(subspace.Encode("KRC0"), subspace.Encode("KRC1")); tr.Set(subspace.Encode("KWRITE"), Slice.Empty); await tr.CommitAsync(); } } }
public async void Test_Case_14() { using (var zedb = await OpenTestDatabaseAsync()) { var db = FoundationDB.Filters.Logging.FdbLoggingExtensions.Logged(zedb, (tr) => Log(tr.Log.GetTimingsReport(true))); { var subspace = db.GlobalSpace; // clear everything and write some values await db.WriteAsync((tr) => { tr.ClearRange(subspace.Keys.Encode("K0000"), subspace.Keys.Encode("K~~~~")); tr.SetValues(Enumerable.Range(0, 100).Select(i => new KeyValuePair <Slice, Slice>(subspace.Keys.Encode("K" + i.ToString("D4")), Slice.FromString("V" + i.ToString("D4"))))); tr.Set(subspace.Keys.Encode("K~~~"), Slice.FromString("END")); }, this.Cancellation); using (var tr = db.BeginTransaction(this.Cancellation)) { tr.ClearRange(subspace.Keys.Encode("K0042"), Slice.FromString("K0069")); var r = await tr.GetRangeAsync( KeySelector.FirstGreaterOrEqual(subspace.Keys.Encode("K0040")), KeySelector.FirstGreaterOrEqual(subspace.Keys.Encode("K0080")), new FdbRangeOptions { Mode = FdbStreamingMode.WantAll } ); // T 1 // => GETRANGE( (< 'KAAA<00>' +1) .. (< LAST +1) Log($"Count={r.Count}, HasMore={r.HasMore}"); foreach (var kvp in r) { Log($"{kvp.Key} = {kvp.Value}"); } } } } }
public static async Task Sampling(string[] path, IVarTuple extras, IFdbDatabase db, TextWriter log, CancellationToken ct) { double ratio = 0.1d; bool auto = true; if (extras.Count > 0) { double x = extras.Get <double>(0); if (x > 0 && x <= 1) { ratio = x; } auto = false; } var folder = await TryOpenCurrentDirectoryAsync(path, db, ct); KeyRange span; if (folder is FdbDirectorySubspace) { span = KeyRange.StartsWith((folder as FdbDirectorySubspace).Copy().GetPrefix()); log.WriteLine($"Reading list of shards for /{String.Join("/", path)} under {FdbKey.Dump(span.Begin)} ..."); } else { log.WriteLine("Reading list of shards for the whole cluster ..."); span = KeyRange.All; } // dump keyServers var ranges = await Fdb.System.GetChunksAsync(db, span, ct); log.WriteLine($"> Found {ranges.Count:N0} shard(s)"); // take a sample var samples = new List <KeyRange>(); if (ranges.Count <= 32) { // small enough to scan it all samples.AddRange(ranges); log.WriteLine($"Sampling all {samples.Count:N0} shards ..."); } else { // need to take a random subset var rnd = new Random(); int sz = Math.Max((int)Math.Ceiling(ratio * ranges.Count), 1); if (auto) { if (sz > 100) { sz = 100; //SAFETY } if (sz < 32) { sz = Math.Max(sz, Math.Min(32, ranges.Count)); } } var population = new List <KeyRange>(ranges); for (int i = 0; i < sz; i++) { int p = rnd.Next(population.Count); samples.Add(population[p]); population.RemoveAt(p); } log.WriteLine($"Sampling {samples.Count:N0} out of {ranges.Count:N0} shards ({(100.0 * samples.Count / ranges.Count):N1}%) ..."); } log.WriteLine(); const string FORMAT_STRING = "{0,9} ║{1,10}{6,6} {2,-29} ║{3,10}{7,7} {4,-37} ║{5,10}"; const string SCALE_KEY = "....--------========########M"; const string SCALE_VAL = "....--------========########@@@@@@@@M"; log.WriteLine(FORMAT_STRING, "Count", "Keys", SCALE_KEY, "Values", SCALE_VAL, "Total", "med.", "med."); var rangeOptions = new FdbRangeOptions { Mode = FdbStreamingMode.WantAll }; samples = samples.OrderBy(x => x.Begin).ToList(); long globalSize = 0; long globalCount = 0; int workers = 8; // Math.Max(4, Environment.ProcessorCount); var sw = Stopwatch.StartNew(); var tasks = new List <Task>(); int n = samples.Count; while (samples.Count > 0) { while (tasks.Count < workers && samples.Count > 0) { var range = samples[0]; samples.RemoveAt(0); tasks.Add(Task.Run(async() => { var kk = new RobustHistogram(RobustHistogram.TimeScale.Ticks); var vv = new RobustHistogram(RobustHistogram.TimeScale.Ticks); #region Method 1: get_range everything... using (var tr = db.BeginTransaction(ct)) { long keySize = 0; long valueSize = 0; long count = 0; int iter = 0; var beginSelector = KeySelector.FirstGreaterOrEqual(range.Begin); var endSelector = KeySelector.FirstGreaterOrEqual(range.End); while (true) { FdbRangeChunk data = default(FdbRangeChunk); FdbException error = null; try { data = await tr.Snapshot.GetRangeAsync( beginSelector, endSelector, rangeOptions, iter ).ConfigureAwait(false); } catch (FdbException e) { error = e; } if (error != null) { await tr.OnErrorAsync(error.Code).ConfigureAwait(false); continue; } if (data.Count == 0) { break; } count += data.Count; foreach (var kvp in data) { keySize += kvp.Key.Count; valueSize += kvp.Value.Count; kk.Add(TimeSpan.FromTicks(kvp.Key.Count)); vv.Add(TimeSpan.FromTicks(kvp.Value.Count)); } if (!data.HasMore) { break; } beginSelector = KeySelector.FirstGreaterThan(data.Last); ++iter; } long totalSize = keySize + valueSize; Interlocked.Add(ref globalSize, totalSize); Interlocked.Add(ref globalCount, count); lock (log) { log.WriteLine(FORMAT_STRING, count.ToString("N0"), FormatSize(keySize), kk.GetDistribution(begin: 1, end: 12000, fold: 2), FormatSize(valueSize), vv.GetDistribution(begin: 1, end: 120000, fold: 2), FormatSize(totalSize), FormatSize((int)Math.Ceiling(kk.Median)), FormatSize((int)Math.Ceiling(vv.Median))); } } #endregion #region Method 2: estimate the count using key selectors... //long counter = await Fdb.System.EstimateCountAsync(db, range, ct); //Console.WriteLine("COUNT = " + counter.ToString("N0")); #endregion }, ct)); } var done = await Task.WhenAny(tasks); tasks.Remove(done); } await Task.WhenAll(tasks); sw.Stop(); log.WriteLine(); if (n != ranges.Count) { log.WriteLine($"Sampled {FormatSize(globalSize)} ({globalSize:N0} bytes) and {globalCount:N0} keys in {sw.Elapsed.TotalSeconds:N1} sec"); log.WriteLine($"> Estimated total size is {FormatSize(globalSize * ranges.Count / n)}"); } else { log.WriteLine($"Found {FormatSize(globalSize)} ({globalSize:N0} bytes) and {globalCount:N0} keys in {sw.Elapsed.TotalSeconds:N1} sec"); // compare to the whole cluster ranges = await Fdb.System.GetChunksAsync(db, FdbKey.MinValue, FdbKey.MaxValue, ct); log.WriteLine($"> This directory contains ~{(100.0 * n / ranges.Count):N2}% of all data"); } log.WriteLine(); }
public async void Test_Case_11() { using (var zedb = await OpenTestDatabaseAsync()) { var db = FoundationDB.Filters.Logging.FdbLoggingExtensions.Logged(zedb, (tr) => Log(tr.Log.GetTimingsReport(true))); { var subspace = db.GlobalSpace; // clear everything and write some values await db.WriteAsync((tr) => { tr.ClearRange(subspace.Keys.Encode("K0000"), subspace.Keys.Encode("K9999Z")); for (int i = 0; i < 100; i++) { tr.Set(subspace.Keys.Encode("K" + i.ToString("D4")), Slice.FromString("V" + i.ToString("D4"))); } }, this.Cancellation); using (var tr = db.BeginTransaction(this.Cancellation)) { tr.ClearRange(subspace.Keys.Encode("K0010"), subspace.Keys.Encode("K0020")); tr.ClearRange(subspace.Keys.Encode("K0050"), subspace.Keys.Encode("K0060")); tr.Set(subspace.Keys.Encode("K0021"), Slice.Empty); tr.Set(subspace.Keys.Encode("K0042"), Slice.Empty); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Keys.Encode("K0005"))); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Keys.Encode("K0010"))); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Keys.Encode("K0015"))); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Keys.Encode("K0022"))); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Keys.Encode("K0049"))); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Keys.Encode("K0050"))); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Keys.Encode("K0055"))); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Keys.Encode("K0061"))); //no commit } using (var tr = db.BeginTransaction(this.Cancellation)) { //tr.SetOption(FdbTransactionOption.ReadYourWritesDisable); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Keys.Encode("K0000"))); // equal=false, offset=1 await tr.GetKeyAsync(KeySelector.FirstGreaterThan(subspace.Keys.Encode("K0011"))); // equal=true, offset=1 await tr.GetKeyAsync(KeySelector.LastLessOrEqual(subspace.Keys.Encode("K0022"))); // equal=true, offset=0 await tr.GetKeyAsync(KeySelector.LastLessThan(subspace.Keys.Encode("K0033"))); // equal=false, offset=0 await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Keys.Encode("K0040")) + 1000); // equal=false, offset=7 ? await tr.GetKeyAsync(KeySelector.LastLessThan(subspace.Keys.Encode("K0050")) + 1000); // equal=false, offset=6 ? } } } }
/// <summary> /// Read from the blob, starting at <paramref name="offset"/>, retrieving up to <paramref name="n"/> bytes (fewer then n bytes are returned when the end of the blob is reached). /// </summary> public async Task <Slice> ReadAsync([NotNull] IFdbReadOnlyTransaction trans, long offset, int n) { if (trans == null) { throw new ArgumentNullException("trans"); } if (offset < 0) { throw new ArgumentNullException("offset", "Offset cannot be less than zero"); } long?size = await GetSizeAsync(trans).ConfigureAwait(false); if (size == null) { return(Slice.Nil); // not found } if (offset >= size.Value) { return(Slice.Empty); } // read all chunks matching the segment we need, and copy them in our buffer var buffer = new byte[Math.Min(n, size.Value - offset)]; await trans .GetRange( KeySelector.LastLessOrEqual(DataKey(offset)), KeySelector.FirstGreaterOrEqual(DataKey(offset + n)) ) .ForEachAsync((chunk) => { // get offset of this chunk long chunkOffset = DataKeyOffset(chunk.Key); Slice chunkData = chunk.Value; checked { // intersect chunk bounds with output int delta = (int)(chunkOffset - offset); int start = delta; int end = delta + chunkData.Count; if (start < 0) { start = 0; } if (end > n) { end = n; } // compute the relative offsets in the chunk int rStart = start - delta; int rEnd = end - delta; var intersect = chunkData[rStart, rEnd]; if (intersect.IsPresent) { // copy the data that fits intersect.CopyTo(buffer, start); } } }) .ConfigureAwait(false); return(new Slice(buffer, 0, buffer.Length)); }
public async Task Run(IFdbDatabase db, TextWriter log, CancellationToken ct) { // estimate the number of machines... Console.WriteLine("# Detecting cluster topology..."); var servers = await db.QueryAsync(tr => tr .WithReadAccessToSystemKeys() .GetRange(KeyRange.StartsWith(Fdb.System.ServerList)) .Select(kvp => new { Node = kvp.Value.Substring(8, 16).ToHexaString(), Machine = kvp.Value.Substring(24, 16).ToHexaString(), DataCenter = kvp.Value.Substring(40, 16).ToHexaString() }), ct ); var numNodes = servers.Select(s => s.Node).Distinct().Count(); var numMachines = servers.Select(s => s.Machine).Distinct().Count(); var numDCs = servers.Select(s => s.DataCenter).Distinct().Count(); Console.WriteLine("# > Found " + numNodes + " process(es) on " + numMachines + " machine(s) in " + numDCs + " datacenter(s)"); Console.WriteLine("# Reading list of shards..."); // dump keyServers var ranges = await Fdb.System.GetChunksAsync(db, FdbKey.MinValue, FdbKey.MaxValue, ct); Console.WriteLine("# > Found " + ranges.Count + " shards:"); // take a sample var rnd = new Random(1234); int sz = Math.Max((int)Math.Ceiling(this.Ratio * ranges.Count), 1); if (sz > 500) { sz = 500; //SAFETY } if (sz < 50) { sz = Math.Max(sz, Math.Min(50, ranges.Count)); } var samples = new List <KeyRange>(); for (int i = 0; i < sz; i++) { int p = rnd.Next(ranges.Count); samples.Add(ranges[p]); ranges.RemoveAt(p); } Console.WriteLine("# Sampling " + sz + " out of " + ranges.Count + " shards (" + (100.0 * sz / ranges.Count).ToString("N1") + "%) ..."); Console.WriteLine("{0,9}{1,10}{2,10}{3,10} : K+V size distribution", "Count", "Keys", "Values", "Total"); var rangeOptions = new FdbRangeOptions { Mode = FdbStreamingMode.WantAll }; samples = samples.OrderBy(x => x.Begin).ToList(); long total = 0; int workers = Math.Min(numMachines, 8); var sw = Stopwatch.StartNew(); var tasks = new List <Task>(); while (samples.Count > 0) { while (tasks.Count < workers && samples.Count > 0) { var range = samples[0]; samples.RemoveAt(0); tasks.Add(Task.Run(async() => { var hh = new RobustHistogram(RobustHistogram.TimeScale.Ticks); #region Method 1: get_range everything... using (var tr = await db.BeginTransactionAsync(ct)) { long keySize = 0; long valueSize = 0; long count = 0; int iter = 0; var beginSelector = KeySelector.FirstGreaterOrEqual(range.Begin); var endSelector = KeySelector.FirstGreaterOrEqual(range.End); while (true) { FdbRangeChunk data = default(FdbRangeChunk); FdbException error = null; try { data = await tr.Snapshot.GetRangeAsync( beginSelector, endSelector, rangeOptions, iter ).ConfigureAwait(false); } catch (FdbException e) { error = e; } if (error != null) { await tr.OnErrorAsync(error.Code).ConfigureAwait(false); continue; } if (data.Count == 0) { break; } count += data.Count; foreach (var kvp in data) { keySize += kvp.Key.Count; valueSize += kvp.Value.Count; hh.Add(TimeSpan.FromTicks(kvp.Key.Count + kvp.Value.Count)); } if (!data.HasMore) { break; } beginSelector = KeySelector.FirstGreaterThan(data.Last); ++iter; } long totalSize = keySize + valueSize; Interlocked.Add(ref total, totalSize); Console.WriteLine("{0,9}{1,10}{2,10}{3,10} : {4}", count.ToString("N0"), FormatSize(keySize), FormatSize(valueSize), FormatSize(totalSize), hh.GetDistribution(begin: 1, end: 10000, fold: 2)); } #endregion #region Method 2: estimate the count using key selectors... //long counter = await Fdb.System.EstimateCountAsync(db, range, ct); //Console.WriteLine("COUNT = " + counter.ToString("N0")); #endregion }, ct)); } var done = await Task.WhenAny(tasks); tasks.Remove(done); } await Task.WhenAll(tasks); sw.Stop(); Console.WriteLine("> Sampled " + FormatSize(total) + " (" + total.ToString("N0") + " bytes) in " + sw.Elapsed.TotalSeconds.ToString("N1") + " sec"); Console.WriteLine("> Estimated total size is " + FormatSize(total * ranges.Count / sz)); }
public async Task Test_Case_11() { using (var db = await OpenTestPartitionAsync()) { db.SetDefaultLogHandler((log) => Log(log.GetTimingsReport(true))); var location = db.Root; // clear everything and write some values await db.WriteAsync(async tr => { var subspace = await location.Resolve(tr); tr.ClearRange(subspace.Encode("K0000"), subspace.Encode("K9999Z")); for (int i = 0; i < 100; i++) { tr.Set(subspace.Encode("K" + i.ToString("D4")), Value("V" + i.ToString("D4"))); } }, this.Cancellation); using (var tr = await db.BeginTransactionAsync(this.Cancellation)) { var subspace = await location.Resolve(tr); tr.ClearRange(subspace.Encode("K0010"), subspace.Encode("K0020")); tr.ClearRange(subspace.Encode("K0050"), subspace.Encode("K0060")); tr.Set(subspace.Encode("K0021"), Slice.Empty); tr.Set(subspace.Encode("K0042"), Slice.Empty); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Encode("K0005"))); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Encode("K0010"))); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Encode("K0015"))); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Encode("K0022"))); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Encode("K0049"))); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Encode("K0050"))); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Encode("K0055"))); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Encode("K0061"))); //no commit } using (var tr = await db.BeginTransactionAsync(this.Cancellation)) { var subspace = await location.Resolve(tr); //tr.SetOption(FdbTransactionOption.ReadYourWritesDisable); await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Encode("K0000"))); // equal=false, offset=1 await tr.GetKeyAsync(KeySelector.FirstGreaterThan(subspace.Encode("K0011"))); // equal=true, offset=1 await tr.GetKeyAsync(KeySelector.LastLessOrEqual(subspace.Encode("K0022"))); // equal=true, offset=0 await tr.GetKeyAsync(KeySelector.LastLessThan(subspace.Encode("K0033"))); // equal=false, offset=0 await tr.GetKeyAsync(KeySelector.FirstGreaterOrEqual(subspace.Encode("K0040")) + 1000); // equal=false, offset=7 ? await tr.GetKeyAsync(KeySelector.LastLessThan(subspace.Encode("K0050")) + 1000); // equal=false, offset=6 ? } } }