public virtual string ResolveBegin(Slice key) { return(FdbKey.PrettyPrint(key, FdbKey.PrettyPrintMode.Begin)); }
public virtual string ResolveEnd(Slice key) { return(FdbKey.PrettyPrint(key, FdbKey.PrettyPrintMode.End)); }
protected virtual string ResolveKey(Slice key, Func <Slice, string> resolver) { return(resolver == null?FdbKey.Dump(key) : resolver(key)); }
public virtual string Resolve(Slice key) { return(FdbKey.PrettyPrint(key, FdbKey.PrettyPrintMode.Single)); }
public static async Task Dir(string[] path, IVarTuple extras, DirectoryBrowseOptions options, IFdbDatabase db, TextWriter log, CancellationToken ct) { if (log == null) { log = Console.Out; } Program.Comment(log, $"# Listing /{string.Join("/", path)}:"); var parent = await TryOpenCurrentDirectoryAsync(path, db, ct); if (parent == null) { Program.Error(log, "Directory not found."); return; } if (parent.Layer.IsPresent) { log.WriteLine($"# Layer: {parent.Layer:P}"); } var folders = await Fdb.Directory.BrowseAsync(db, parent, ct); if (folders != null && folders.Count > 0) { foreach (var kvp in folders) { var name = kvp.Key; var subfolder = kvp.Value; if (subfolder != null) { if ((options & DirectoryBrowseOptions.ShowCount) != 0) { if (!(subfolder is FdbDirectoryPartition)) { long count = await Fdb.System.EstimateCountAsync(db, subfolder.Keys.ToRange(), ct); Program.StdOut(log, $" {FdbKey.Dump(subfolder.Copy().GetPrefix()),-12} {(subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">")),-12} {count,9:N0} {name}", ConsoleColor.White); } else { Program.StdOut(log, $" {FdbKey.Dump(subfolder.Copy().GetPrefix()),-12} {(subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">")),-12} {"-",9} {name}", ConsoleColor.White); } } else { Program.StdOut(log, $" {FdbKey.Dump(subfolder.Copy().GetPrefix()),-12} {(subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">")),-12} {name}", ConsoleColor.White); } } else { Program.Error(log, $" WARNING: {name} seems to be missing!"); } } log.WriteLine($" {folders.Count} sub-directorie(s)."); } else { //TODO: test if it contains data? log.WriteLine("No sub-directories."); } //TODO: check if there is at least one key? }
private static async Task BenchBulkInsertThenBulkReadAsync(IFdbDatabase db, int N, int K, int B, CancellationToken ct, bool instrumented = false) { // test that we can bulk write / bulk read var timings = instrumented ? new List <KeyValuePair <double, double> >() : null; // put test values inside a namespace var subspace = db.Partition.ByKey("BulkInsert"); // cleanup everything using (var tr = db.BeginTransaction(ct)) { tr.ClearRange(subspace); await tr.CommitAsync(); } // insert all values (batched) Console.WriteLine("Inserting " + N.ToString("N0", CultureInfo.InvariantCulture) + " keys: "); var insert = Stopwatch.StartNew(); int batches = 0; long bytes = 0; var start = Stopwatch.StartNew(); var tasks = new List <Task>(); foreach (var worker in FdbKey.Batched(0, N, K, B)) { //hack tasks.Add(Task.Run(async() => { foreach (var chunk in worker) { using (var tr = db.BeginTransaction(ct)) { int z = 0; foreach (int i in Enumerable.Range(chunk.Key, chunk.Value)) { tr.Set(subspace.Keys.Encode(i), Slice.Create(256)); z++; } //Console.Write("#"); //Console.WriteLine(" Commiting batch (" + tr.Size.ToString("N0", CultureInfo.InvariantCulture) + " bytes) " + z + " keys"); var localStart = start.Elapsed.TotalSeconds; await tr.CommitAsync(); var localDuration = start.Elapsed.TotalSeconds - localStart; if (instrumented) { lock (timings) { timings.Add(new KeyValuePair <double, double>(localStart, localDuration)); } } Interlocked.Increment(ref batches); Interlocked.Add(ref bytes, tr.Size); } } }, ct)); } await Task.WhenAll(tasks); insert.Stop(); Console.WriteLine("Committed " + batches + " batches in " + FormatTimeMilli(insert.Elapsed.TotalMilliseconds) + " (" + FormatTimeMilli(insert.Elapsed.TotalMilliseconds / batches) + " / batch, " + FormatTimeMicro(insert.Elapsed.TotalMilliseconds / N) + " / item"); Console.WriteLine("Throughput " + FormatThroughput(bytes, insert.Elapsed.TotalSeconds)); if (instrumented) { var sb = new StringBuilder(); foreach (var kvp in timings) { sb.Append(kvp.Key.ToString()).Append(';').Append((kvp.Key + kvp.Value).ToString()).Append(';').Append(kvp.Value.ToString()).AppendLine(); } #if DEBUG System.IO.File.WriteAllText(@"c:\temp\fdb\timings_" + N + "_" + K + "_" + B + ".csv", sb.ToString()); #else Console.WriteLine(sb.ToString()); #endif } // Read values using (var tr = db.BeginTransaction(ct)) { Console.WriteLine("Reading all keys..."); var sw = Stopwatch.StartNew(); var items = await tr.GetRangeStartsWith(subspace).ToListAsync(); sw.Stop(); Console.WriteLine("Took " + FormatTimeMilli(sw.Elapsed.TotalMilliseconds) + " to get " + items.Count.ToString("N0", CultureInfo.InvariantCulture) + " results"); } }
public static async Task Sampling(string[] path, IVarTuple extras, IFdbDatabase db, TextWriter log, CancellationToken ct) { double ratio = 0.1d; bool auto = true; if (extras.Count > 0) { double x = extras.Get <double>(0); if (x > 0 && x <= 1) { ratio = x; } auto = false; } var folder = await TryOpenCurrentDirectoryAsync(path, db, ct); KeyRange span; if (folder is FdbDirectorySubspace) { span = KeyRange.StartsWith((folder as FdbDirectorySubspace).Copy().GetPrefix()); log.WriteLine($"Reading list of shards for /{String.Join("/", path)} under {FdbKey.Dump(span.Begin)} ..."); } else { log.WriteLine("Reading list of shards for the whole cluster ..."); span = KeyRange.All; } // dump keyServers var ranges = await Fdb.System.GetChunksAsync(db, span, ct); log.WriteLine($"> Found {ranges.Count:N0} shard(s)"); // take a sample var samples = new List <KeyRange>(); if (ranges.Count <= 32) { // small enough to scan it all samples.AddRange(ranges); log.WriteLine($"Sampling all {samples.Count:N0} shards ..."); } else { // need to take a random subset var rnd = new Random(); int sz = Math.Max((int)Math.Ceiling(ratio * ranges.Count), 1); if (auto) { if (sz > 100) { sz = 100; //SAFETY } if (sz < 32) { sz = Math.Max(sz, Math.Min(32, ranges.Count)); } } var population = new List <KeyRange>(ranges); for (int i = 0; i < sz; i++) { int p = rnd.Next(population.Count); samples.Add(population[p]); population.RemoveAt(p); } log.WriteLine($"Sampling {samples.Count:N0} out of {ranges.Count:N0} shards ({(100.0 * samples.Count / ranges.Count):N1}%) ..."); } log.WriteLine(); const string FORMAT_STRING = "{0,9} ║{1,10}{6,6} {2,-29} ║{3,10}{7,7} {4,-37} ║{5,10}"; const string SCALE_KEY = "....--------========########M"; const string SCALE_VAL = "....--------========########@@@@@@@@M"; log.WriteLine(FORMAT_STRING, "Count", "Keys", SCALE_KEY, "Values", SCALE_VAL, "Total", "med.", "med."); var rangeOptions = new FdbRangeOptions { Mode = FdbStreamingMode.WantAll }; samples = samples.OrderBy(x => x.Begin).ToList(); long globalSize = 0; long globalCount = 0; int workers = 8; // Math.Max(4, Environment.ProcessorCount); var sw = Stopwatch.StartNew(); var tasks = new List <Task>(); int n = samples.Count; while (samples.Count > 0) { while (tasks.Count < workers && samples.Count > 0) { var range = samples[0]; samples.RemoveAt(0); tasks.Add(Task.Run(async() => { var kk = new RobustHistogram(RobustHistogram.TimeScale.Ticks); var vv = new RobustHistogram(RobustHistogram.TimeScale.Ticks); #region Method 1: get_range everything... using (var tr = db.BeginTransaction(ct)) { long keySize = 0; long valueSize = 0; long count = 0; int iter = 0; var beginSelector = KeySelector.FirstGreaterOrEqual(range.Begin); var endSelector = KeySelector.FirstGreaterOrEqual(range.End); while (true) { FdbRangeChunk data = default(FdbRangeChunk); FdbException error = null; try { data = await tr.Snapshot.GetRangeAsync( beginSelector, endSelector, rangeOptions, iter ).ConfigureAwait(false); } catch (FdbException e) { error = e; } if (error != null) { await tr.OnErrorAsync(error.Code).ConfigureAwait(false); continue; } if (data.Count == 0) { break; } count += data.Count; foreach (var kvp in data) { keySize += kvp.Key.Count; valueSize += kvp.Value.Count; kk.Add(TimeSpan.FromTicks(kvp.Key.Count)); vv.Add(TimeSpan.FromTicks(kvp.Value.Count)); } if (!data.HasMore) { break; } beginSelector = KeySelector.FirstGreaterThan(data.Last); ++iter; } long totalSize = keySize + valueSize; Interlocked.Add(ref globalSize, totalSize); Interlocked.Add(ref globalCount, count); lock (log) { log.WriteLine(FORMAT_STRING, count.ToString("N0"), FormatSize(keySize), kk.GetDistribution(begin: 1, end: 12000, fold: 2), FormatSize(valueSize), vv.GetDistribution(begin: 1, end: 120000, fold: 2), FormatSize(totalSize), FormatSize((int)Math.Ceiling(kk.Median)), FormatSize((int)Math.Ceiling(vv.Median))); } } #endregion #region Method 2: estimate the count using key selectors... //long counter = await Fdb.System.EstimateCountAsync(db, range, ct); //Console.WriteLine("COUNT = " + counter.ToString("N0")); #endregion }, ct)); } var done = await Task.WhenAny(tasks); tasks.Remove(done); } await Task.WhenAll(tasks); sw.Stop(); log.WriteLine(); if (n != ranges.Count) { log.WriteLine($"Sampled {FormatSize(globalSize)} ({globalSize:N0} bytes) and {globalCount:N0} keys in {sw.Elapsed.TotalSeconds:N1} sec"); log.WriteLine($"> Estimated total size is {FormatSize(globalSize * ranges.Count / n)}"); } else { log.WriteLine($"Found {FormatSize(globalSize)} ({globalSize:N0} bytes) and {globalCount:N0} keys in {sw.Elapsed.TotalSeconds:N1} sec"); // compare to the whole cluster ranges = await Fdb.System.GetChunksAsync(db, FdbKey.MinValue, FdbKey.MaxValue, ct); log.WriteLine($"> This directory contains ~{(100.0 * n / ranges.Count):N2}% of all data"); } log.WriteLine(); }
public async Task Test_FdbKey_Batched() { // we want numbers from 0 to 999 split between 5 workers that will consume batches of 20 items at a time // > we get 5 enumerables that all take ranges from the same pool and all complete where there is no more values const int N = 1000; const int B = 20; const int W = 5; var query = FdbKey.Batched(0, N, W, B); Assert.That(query, Is.Not.Null); var batches = query.ToArray(); Assert.That(batches, Is.Not.Null); Assert.That(batches.Length, Is.EqualTo(W)); Assert.That(batches, Is.All.Not.Null); var used = new bool[N]; var signal = new TaskCompletionSource <object>(); // each batch should return new numbers var tasks = batches.Select(async(iterator, id) => { // force async await signal.Task.ConfigureAwait(false); foreach (var chunk in iterator) { // kvp = (offset, count) // > count should always be 20 // > offset should always be a multiple of 20 // > there should never be any overlap between workers Assert.That(chunk.Value, Is.EqualTo(B), "{0}:{1}", chunk.Key, chunk.Value); Assert.That(chunk.Key % B, Is.EqualTo(0), "{0}:{1}", chunk.Key, chunk.Value); lock (used) { for (int i = chunk.Key; i < chunk.Key + chunk.Value; i++) { if (used[i]) { Assert.Fail("Duplicate index {0} chunk {1}:{2} for worker {3}", i, chunk.Key, chunk.Value, id); } else { used[i] = true; } } } await Task.Delay(1).ConfigureAwait(false); } }).ToArray(); ThreadPool.UnsafeQueueUserWorkItem((_) => signal.TrySetResult(null), null); await Task.WhenAll(tasks); Assert.That(used, Is.All.True); }
public async Task Test_GetKey_ReadConflicts() { Slice key; using (var db = MemoryDatabase.CreateNew("FOO")) { var location = db.Keys; using (var tr = db.BeginTransaction(this.Cancellation)) { tr.Set(location.Encode(42), Slice.FromString("42")); tr.Set(location.Encode(50), Slice.FromString("50")); tr.Set(location.Encode(60), Slice.FromString("60")); await tr.CommitAsync(); } db.Debug_Dump(); Func <FdbKeySelector, Slice, Task> check = async(selector, expected) => { using (var tr = db.BeginTransaction(this.Cancellation)) { key = await tr.GetKeyAsync(selector); await tr.CommitAsync(); Assert.That(key, Is.EqualTo(expected), selector.ToString() + " => " + FdbKey.Dump(expected)); } }; await check( FdbKeySelector.FirstGreaterOrEqual(location.Encode(50)), location.Encode(50) ); await check( FdbKeySelector.FirstGreaterThan(location.Encode(50)), location.Encode(60) ); await check( FdbKeySelector.FirstGreaterOrEqual(location.Encode(49)), location.Encode(50) ); await check( FdbKeySelector.FirstGreaterThan(location.Encode(49)), location.Encode(50) ); await check( FdbKeySelector.FirstGreaterOrEqual(location.Encode(49)) + 1, location.Encode(60) ); await check( FdbKeySelector.FirstGreaterThan(location.Encode(49)) + 1, location.Encode(60) ); await check( FdbKeySelector.LastLessOrEqual(location.Encode(49)), location.Encode(42) ); await check( FdbKeySelector.LastLessThan(location.Encode(49)), location.Encode(42) ); } }
public static async Task Dir(string[] path, ITuple extras, DirectoryBrowseOptions options, IFdbDatabase db, TextWriter log, CancellationToken ct) { if (log == null) { log = Console.Out; } log.WriteLine("# Listing {0}:", string.Join("/", path)); var parent = await TryOpenCurrentDirectoryAsync(path, db, ct); if (parent == null) { log.WriteLine(" Directory not found."); return; } if (parent.Layer.IsPresent) { log.WriteLine("# Layer: {0:P}", parent.Layer); } var folders = await Fdb.Directory.BrowseAsync(db, parent, ct); if (folders != null && folders.Count > 0) { foreach (var kvp in folders) { var name = kvp.Key; var subfolder = kvp.Value; if (subfolder != null) { if ((options & DirectoryBrowseOptions.ShowCount) != 0) { if (!(subfolder is FdbDirectoryPartition)) { long count = await Fdb.System.EstimateCountAsync(db, subfolder.Keys.ToRange(), ct); log.WriteLine(" {0,-12} {1,-12} {3,9:N0} {2}", FdbKey.Dump(subfolder.Copy().GetPrefix()), subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">"), name, count); } else { log.WriteLine(" {0,-12} {1,-12} {3,9} {2}", FdbKey.Dump(subfolder.Copy().GetPrefix()), subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">"), name, "-"); } } else { log.WriteLine(" {0,-12} {1,-12} {2}", FdbKey.Dump(subfolder.Copy().GetPrefix()), subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">"), name); } } else { log.WriteLine(" WARNING: {0} seems to be missing!", name); } } log.WriteLine(" {0} sub-directorie(s).", folders.Count); } else { //TODO: test if it contains data? log.WriteLine(" No sub-directories."); } }
public static FutureHandle TransactionWatch(TransactionHandle transaction, Slice key) { if (key.IsNullOrEmpty) { throw new ArgumentException("Key cannot be null or empty", "key"); fixed(byte *ptrKey = key.Array) { var future = NativeMethods.fdb_transaction_watch(transaction, ptrKey + key.Offset, key.Count); Contract.Assert(future != null); #if DEBUG_NATIVE_CALLS Debug.WriteLine("fdb_transaction_watch(0x" + transaction.Handle.ToString("x") + ", key: '" + FdbKey.Dump(key) + "') => 0x" + future.Handle.ToString("x")); #endif return(future); } }