public void CouldWriteManyFinalizedChunksToTable() { #pragma warning disable 618 Settings.DoAdditionalCorrectnessChecks = false; Settings.DoDetectBufferLeaks = true; #pragma warning restore 618 ProcessConfig.InitDefault(); var path = TestUtils.GetPath(); ulong count = 100_000; var rng = new Random(42); var values = new SmallDecimal[count + 1]; for (ulong i = 1; i <= count; i++) { values[(int)i] = new SmallDecimal(10 + Math.Round(rng.NextDouble() * 2, 4), 4); } var storage = new SQLiteStorage($@"Filename={Path.Combine(path, "blockstorage.db")}"); var totalPayload = 0; var totalCapacity = 0L; //Settings.ZstdCompressionLevel = 1; //Settings.LZ4CompressionLevel = 1; //Settings.ZlibCompressionLevel = 5; using (Benchmark.Run("Chunks", (int)(count))) { var chunkSize = 4032; var rm = BufferPool.Retain(chunkSize, true); rm.Span.Clear(); StreamBlock.TryInitialize(new DirectBuffer(rm), (StreamLogId)1, 8, 1); var block = new StreamBlock(rm, (StreamLogId)1, 8, 1); var chunkCount = 1; for (ulong i = 1; i <= count; i++) { var claim = block.Claim(i, 8); if (!claim.IsValid) { Assert.IsTrue(block.IsCompleted); var(inserted, rowid) = storage.InsertBlock(block); //var couldRead = file.TryReadChunk(chunk.FirstVersion, out var readChunk); //Assert.IsTrue(couldRead); //Assert.AreEqual(chunk.WriteEnd, readChunk.WriteEnd); //var fv = readChunk.FirstVersion; //for (int j = 0; j < readChunk.Count; j++) //{ // var ii = fv + (ulong)j; // var readValue = readChunk[j].Read<SmallDecimal>(0); // if (readValue != values[ii]) // { // Assert.Fail(); // } //} //readChunk.Dispose(); block.DisposeFree(); chunkCount++; if (chunkCount % 100 == 0) { storage.Checkpoint(true); } //if (chunkCount % 50 == 0) //{ // file.Complete(); // totalPayload += file.PayloadSize; // totalCapacity += file.FileCapacity; // fileCount++; // file.Dispose(); // filePath = Path.Combine(path, $"{fileCount}.slc"); // file = new StreamLogChunkFile(filePath, true); //} rm = BufferPool.Retain(chunkSize, true); rm.Span.Clear(); StreamBlock.TryInitialize(new DirectBuffer(rm), (StreamLogId)1, 8, i); block = new StreamBlock(rm, (StreamLogId)1, 8, i); claim = block.Claim(i, 8); } claim.Write(0, values[i]); block.Commit(); } var(inserted1, rowid1) = storage.InsertBlock(block); Assert.IsTrue(inserted1); var blockR = storage.TryGetStreamBlock(block.StreamLogIdLong, block.FirstVersion); var lastVersion = blockR.CurrentVersion; Assert.AreEqual(block.CurrentVersion, lastVersion); blockR.DisposeFree(); block.Complete(); (inserted1, rowid1) = storage.InsertBlock(block); Assert.IsTrue(inserted1); Assert.AreEqual(block.CurrentVersion, lastVersion); (inserted1, rowid1) = storage.InsertBlock(block); Assert.IsFalse(inserted1); //var couldRead1 = file.TryReadChunk(chunk.FirstVersion, out var readChunk1); //Assert.IsTrue(couldRead1); //Assert.AreEqual(chunk.WriteStart, readChunk1.WriteStart, "WriteStart"); //Assert.AreEqual(chunk.WriteEnd, readChunk1.WriteEnd, "WriteEnd"); //readChunk1.Dispose(); block.DisposeFree(); //totalPayload += file.PayloadSize; //totalCapacity += file.FileCapacity; Console.WriteLine("Chunk count: " + chunkCount); //Console.WriteLine("Payload: " + totalPayload); //Console.WriteLine("File size: " + totalCapacity); Console.WriteLine("Useful size: " + count * 8); //Console.WriteLine("Effective Compression: " + (totalPayload / (1.0 * count * 8))); storage.Dispose(); } }