public CompactionRequest(string dbname, string tablename, CompactionType type) : this() { this.Dbname = dbname; this.Tablename = tablename; this.Type = type; }
public void LogCompactBasicCustomFctnTest([Values] CompactionType compactionType) { MyInput input = new(); const int totalRecords = 2000; var compactUntil = 0L; for (var i = 0; i < totalRecords; i++) { if (i == totalRecords / 2) { compactUntil = fht.Log.TailAddress; } var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; session.Upsert(ref key1, ref value, 0, 0); } compactUntil = session.Compact(compactUntil, compactionType, default(EvenCompactionFunctions)); fht.Log.Truncate(); Assert.AreEqual(compactUntil, fht.Log.BeginAddress); // Read 2000 keys - all should be present for (var i = 0; i < totalRecords; i++) { var output = new MyOutput(); var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; var ctx = (i < (totalRecords / 2) && (i % 2 != 0)) ? 1 : 0; var status = session.Read(ref key1, ref input, ref output, ctx, 0); if (status.IsPending) { session.CompletePending(true); } else { if (ctx == 0) { Assert.IsTrue(status.Found); Assert.AreEqual(value.value, output.value.value); } else { Assert.IsFalse(status.Found); } } } }
public void BlittableLogCompactionTest1([Values] CompactionType compactionType) { using var session = fht.For(new FunctionsCompaction()).NewSession <FunctionsCompaction>(); InputStruct input = default; const int totalRecords = 2000; var start = fht.Log.TailAddress; long compactUntil = 0; for (int i = 0; i < totalRecords; i++) { if (i == 1000) { compactUntil = fht.Log.TailAddress; } var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; session.Upsert(ref key1, ref value, 0, 0); } compactUntil = session.Compact(compactUntil, compactionType); fht.Log.Truncate(); Assert.AreEqual(compactUntil, fht.Log.BeginAddress); // Read 2000 keys - all should be present for (int i = 0; i < totalRecords; i++) { OutputStruct output = default; var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; var status = session.Read(ref key1, ref input, ref output, 0, 0); if (status.IsPending) { session.CompletePending(true); } else { Assert.IsTrue(status.Found); Assert.AreEqual(value.vfield1, output.value.vfield1); Assert.AreEqual(value.vfield2, output.value.vfield2); } } }
public void LogCompactBasicTest([Values] TestUtils.DeviceType deviceType, [Values] CompactionType compactionType) { MyInput input = new(); const int totalRecords = 500; long compactUntil = 0; for (int i = 0; i < totalRecords; i++) { if (i == 250) { compactUntil = fht.Log.TailAddress; } var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; session.Upsert(ref key1, ref value, 0, 0); } compactUntil = session.Compact(compactUntil, compactionType); fht.Log.Truncate(); Assert.AreEqual(compactUntil, fht.Log.BeginAddress); // Read all keys - all should be present for (int i = 0; i < totalRecords; i++) { MyOutput output = new(); var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; var status = session.Read(ref key1, ref input, ref output, 0, 0); if (status.IsPending) { session.CompletePendingWithOutputs(out var completedOutputs, wait: true); Assert.IsTrue(completedOutputs.Next()); Assert.IsTrue(completedOutputs.Current.Status.Found); output = completedOutputs.Current.Output; Assert.IsFalse(completedOutputs.Next()); completedOutputs.Dispose(); } Assert.IsTrue(status.Found); Assert.AreEqual(value.value, output.value.value); } }
public void DeleteCompactLookup([Values] CompactionType compactionType) { using var session = fht.NewSession(new SimpleFunctions <long, long>()); const int totalRecords = 2000; var start = fht.Log.TailAddress; long compactUntil = 0; for (int i = 0; i < totalRecords; i++) { if (i == 1010) { compactUntil = fht.Log.TailAddress; } session.Upsert(i, i); } for (int i = 0; i < totalRecords / 2; i++) { session.Delete(i); } compactUntil = session.Compact(compactUntil, compactionType); Assert.AreEqual(compactUntil, fht.Log.BeginAddress); using var session2 = fht.NewSession(new SimpleFunctions <long, long>()); // Verify records by reading for (int i = 0; i < totalRecords; i++) { (var status, var output) = session2.Read(i); if (status.IsPending) { session2.CompletePendingWithOutputs(out var completedOutputs, true); Assert.IsTrue(completedOutputs.Next()); (status, output) = (completedOutputs.Current.Status, completedOutputs.Current.Output); Assert.IsFalse(completedOutputs.Next()); } if (i < totalRecords / 2) { Assert.IsTrue(status.NotFound); } else { Assert.IsTrue(status.Found); Assert.AreEqual(i, output); } } }
public void BlittableLogCompactionCustomFunctionsTest2([Values] CompactionType compactionType) { // Update: irrelevant as session compaction no longer uses Copy/CopyInPlace // This test checks if CopyInPlace returning false triggers call to Copy using var session = fht.For(new FunctionsCompaction()).NewSession <FunctionsCompaction>(); var key = new KeyStruct { kfield1 = 100, kfield2 = 101 }; var value = new ValueStruct { vfield1 = 10, vfield2 = 20 }; session.Upsert(ref key, ref value, 0, 0); fht.Log.Flush(true); value = new ValueStruct { vfield1 = 11, vfield2 = 21 }; session.Upsert(ref key, ref value, 0, 0); fht.Log.Flush(true); var compactUntil = session.Compact(fht.Log.TailAddress, compactionType); fht.Log.Truncate(); var input = default(InputStruct); var output = default(OutputStruct); var status = session.Read(ref key, ref input, ref output, 0, 0); if (status.IsPending) { session.CompletePending(true); } else { Assert.IsTrue(status.Found); Assert.AreEqual(value.vfield1, output.value.vfield1); Assert.AreEqual(value.vfield2, output.value.vfield2); } }
public void LogCompactCopyInPlaceCustomFctnTest([Values] CompactionType compactionType) { // Update: irrelevant as session compaction no longer uses Copy/CopyInPlace // This test checks if CopyInPlace returning false triggers call to Copy using var session = fht.For(new MyFunctionsDelete()).NewSession <MyFunctionsDelete>(); var key = new MyKey { key = 100 }; var value = new MyValue { value = 20 }; session.Upsert(ref key, ref value, 0, 0); fht.Log.Flush(true); value = new MyValue { value = 21 }; session.Upsert(ref key, ref value, 0, 0); fht.Log.Flush(true); var compactionFunctions = new Test2CompactionFunctions(); var compactUntil = session.Compact(fht.Log.TailAddress, compactionType, compactionFunctions); fht.Log.Truncate(); var input = default(MyInput); var output = default(MyOutput); var status = session.Read(ref key, ref input, ref output, 0, 0); if (status.IsPending) { session.CompletePendingWithOutputs(out var outputs, wait: true); (status, output) = GetSinglePendingResult(outputs); } Assert.IsTrue(status.Found); Assert.AreEqual(value.value, output.value.value); }
/// <summary> /// Compact the log until specified address, moving active records to the tail of the log. BeginAddress is shifted, but the physical log /// is not deleted from disk. Caller is responsible for truncating the physical log on disk by taking a checkpoint or calling Log.Truncate /// </summary> /// <param name="functions">Functions used to manage key-values during compaction</param> /// <param name="cf">User provided compaction functions (see <see cref="ICompactionFunctions{Key, Value}"/>).</param> /// <param name="input">Input for SingleWriter</param> /// <param name="output">Output from SingleWriter; it will be called all records that are moved, before Compact() returns, so the user must supply buffering or process each output completely</param> /// <param name="untilAddress">Compact log until this address</param> /// <param name="compactionType">Compaction type (whether we lookup records or scan log for liveness checking)</param> /// <param name="sessionVariableLengthStructSettings">Session variable length struct settings</param> /// <returns>Address until which compaction was done</returns> internal long Compact <Input, Output, Context, Functions, CompactionFunctions>(Functions functions, CompactionFunctions cf, ref Input input, ref Output output, long untilAddress, CompactionType compactionType, SessionVariableLengthStructSettings <Value, Input> sessionVariableLengthStructSettings = null) where Functions : IFunctions <Key, Value, Input, Output, Context> where CompactionFunctions : ICompactionFunctions <Key, Value> { return(compactionType switch { CompactionType.Scan => CompactScan <Input, Output, Context, Functions, CompactionFunctions>(functions, cf, ref input, ref output, untilAddress, sessionVariableLengthStructSettings), CompactionType.Lookup => CompactLookup <Input, Output, Context, Functions, CompactionFunctions>(functions, cf, ref input, ref output, untilAddress, sessionVariableLengthStructSettings), _ => throw new FasterException("Invalid compaction type"), });
public ShowCompactResponseElement(string dbname, string tablename, string partitionname, CompactionType type, string state, string workerid, long start, string runAs) : this() { this.Dbname = dbname; this.Tablename = tablename; this.Partitionname = partitionname; this.Type = type; this.State = state; this.Workerid = workerid; this.Start = start; this.RunAs = runAs; }
public void MemoryLogCompactionTest1([Values] TestUtils.DeviceType deviceType, [Values] CompactionType compactionType) { string filename = path + "MemoryLogCompactionTests1" + deviceType.ToString() + ".log"; log = TestUtils.CreateTestDevice(deviceType, filename); fht = new FasterKV <ReadOnlyMemory <int>, Memory <int> > (1L << 20, new LogSettings { LogDevice = log, MemorySizeBits = 12, PageSizeBits = 10, SegmentSizeBits = 22 }); using var session = fht.For(new MemoryCompaction()).NewSession <MemoryCompaction>(); var key = new Memory <int>(new int[20]); var value = new Memory <int>(new int[20]); const int totalRecords = 200; var start = fht.Log.TailAddress; for (int i = 0; i < totalRecords; i++) { key.Span.Fill(i); value.Span.Fill(i); session.Upsert(key, value); if (i < 5) { session.Delete(key); // in-place delete } } for (int i = 5; i < 10; i++) { key.Span.Fill(i); value.Span.Fill(i); session.Delete(key); // tombstone inserted } // Compact log var compactUntil = fht.Log.BeginAddress + (fht.Log.TailAddress - fht.Log.BeginAddress) / 5; compactUntil = session.Compact(compactUntil, compactionType); fht.Log.Truncate(); Assert.AreEqual(compactUntil, fht.Log.BeginAddress); // Read total keys - all but first 5 (deleted) should be present for (int i = 0; i < totalRecords; i++) { key.Span.Fill(i); var(status, output) = session.Read(key, userContext: i < 10 ? 1 : 0); if (status.IsCompleted) { if (i < 10) { Assert.IsFalse(status.Found); } else { Assert.IsTrue(status.Found); Assert.IsTrue(output.Item1.Memory.Span.Slice(0, output.Item2).SequenceEqual(key.Span)); output.Item1.Dispose(); } } } session.CompletePending(true); // Test iteration of distinct live keys using (var iter = session.Iterate()) { int count = 0; while (iter.GetNext(out RecordInfo recordInfo)) { var k = iter.GetKey(); Assert.GreaterOrEqual(k.Span[0], 10); count++; } Assert.AreEqual(190, count); } // Test iteration of all log records using (var iter = fht.Log.Scan(fht.Log.BeginAddress, fht.Log.TailAddress)) { int count = 0; while (iter.GetNext(out RecordInfo recordInfo)) { var k = iter.GetKey(); Assert.GreaterOrEqual(k.Span[0], 5); count++; } // Includes 190 live records + 5 deleted records Assert.AreEqual(195, count); } }
public void BlittableLogCompactionCustomFunctionsTest1([Values] CompactionType compactionType) { using var session = fht.For(new FunctionsCompaction()).NewSession <FunctionsCompaction>(); InputStruct input = default; const int totalRecords = 2000; var start = fht.Log.TailAddress; var compactUntil = 0L; for (var i = 0; i < totalRecords; i++) { if (i == totalRecords / 2) { compactUntil = fht.Log.TailAddress; } var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; session.Upsert(ref key1, ref value, 0, 0); } var tail = fht.Log.TailAddress; // Only leave records with even vfield1 compactUntil = session.Compact(compactUntil, compactionType, default(EvenCompactionFunctions)); fht.Log.Truncate(); Assert.AreEqual(compactUntil, fht.Log.BeginAddress); // Read 2000 keys - all should be present for (var i = 0; i < totalRecords; i++) { OutputStruct output = default; var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; var ctx = (i < (totalRecords / 2) && (i % 2 != 0)) ? 1 : 0; var status = session.Read(ref key1, ref input, ref output, ctx, 0); if (status.IsPending) { session.CompletePending(true); } else { if (ctx == 0) { Assert.IsTrue(status.Found); Assert.AreEqual(value.vfield1, output.value.vfield1); Assert.AreEqual(value.vfield2, output.value.vfield2); } else { Assert.IsFalse(status.Found); } } } }
public void LogCompactAfterDeleteTest([Values] CompactionType compactionType) { MyInput input = new(); const int totalRecords = 2000; long compactUntil = 0; for (int i = 0; i < totalRecords; i++) { if (i == totalRecords / 2) { compactUntil = fht.Log.TailAddress; } var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; session.Upsert(ref key1, ref value, 0, 0); if (i % 8 == 0) { int j = i / 4; key1 = new MyKey { key = j }; session.Delete(ref key1, 0, 0); } } compactUntil = session.Compact(compactUntil, compactionType); fht.Log.Truncate(); Assert.AreEqual(compactUntil, fht.Log.BeginAddress); // Read keys - all should be present for (int i = 0; i < totalRecords; i++) { MyOutput output = new(); var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; int ctx = ((i < 500) && (i % 2 == 0)) ? 1 : 0; var status = session.Read(ref key1, ref input, ref output, ctx, 0); if (status.IsPending) { session.CompletePending(true); } else { if (ctx == 0) { Assert.IsTrue(status.Found); Assert.AreEqual(value.value, output.value.value); } else { Assert.IsFalse(status.Found); } } } }
public void LogCompactTestNewEntries([Values] CompactionType compactionType) { MyInput input = new(); const int totalRecords = 2000; long compactUntil = 0; for (int i = 0; i < totalRecords; i++) { if (i == 1000) { compactUntil = fht.Log.TailAddress; } var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; session.Upsert(ref key1, ref value, 0, 0); } // Put fresh entries for 1000 records for (int i = 0; i < 1000; i++) { var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; session.Upsert(ref key1, ref value, 0, 0); } fht.Log.Flush(true); var tail = fht.Log.TailAddress; compactUntil = session.Compact(compactUntil, compactionType); fht.Log.Truncate(); Assert.AreEqual(compactUntil, fht.Log.BeginAddress); Assert.AreEqual(tail, fht.Log.TailAddress); // Read 2000 keys - all should be present for (int i = 0; i < totalRecords; i++) { MyOutput output = new(); var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; var status = session.Read(ref key1, ref input, ref output, 0, 0); if (status.IsPending) { session.CompletePending(true); } else { Assert.IsTrue(status.Found); Assert.AreEqual(value.value, output.value.value); } } }