public void try_read_returns_false_when_writer_checksum_is_equal_to_reader_checksum() { var writerchk = new InMemoryCheckpoint(); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, writerchk, chaserchk, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); db.Open(); writerchk.Write(12); writerchk.Flush(); chaserchk.Write(12); chaserchk.Flush(); var chaser = new TFChunkChaser(db, writerchk, chaserchk); chaser.Open(); LogRecord record; Assert.IsFalse(chaser.TryReadNext(out record)); Assert.AreEqual(12, chaserchk.Read()); chaser.Close(); db.Dispose(); }
public void a_record_can_be_written() { _checkpoint = new InMemoryCheckpoint(0); var tf = new MultifileTransactionFileWriter( new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, _checkpoint, new List<ICheckpoint>())); tf.Open(); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] {7, 17}); long tmp; tf.Write(record, out tmp); tf.Close(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), _checkpoint.Read()); using (var filestream = File.Open(Path.Combine(PathName, "prefix.tf0"), FileMode.Open, FileAccess.Read)) { var reader = new BinaryReader(filestream); reader.ReadInt32(); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); File.WriteAllBytes(filename, new byte[10000]); _checkpoint = new InMemoryCheckpoint(137); var tf = new MultifileTransactionFileWriter(new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, _checkpoint, new List<ICheckpoint>())); tf.Open(); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] {7, 17}); long tmp; tf.Write(record, out tmp); tf.Flush(); tf.Close(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); //137 is fluff assigned to beginning of checkpoint //TODO actually read the event using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public void a_record_can_be_written() { var filename = GetFilePathFor("chunk-000000.000000"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, chunkId: Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length); File.WriteAllBytes(filename, bytes); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, _checkpoint, new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); db.Open(); var tf = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: _checkpoint.Read(), correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long tmp; tf.Write(record, out tmp); tf.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); //137 is fluff assigned to beginning of checkpoint using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); var secondfilename = Path.Combine(PathName, "prefix.tf1"); var thirdfilename = Path.Combine(PathName, "prefix.tf2"); File.WriteAllBytes(filename, new byte[50]); _checkpoint = new InMemoryCheckpoint(40); var tf = new MultifileTransactionFileWriter( new TransactionFileDatabaseConfig(PathName, "prefix.tf", 50, _checkpoint, new ICheckpoint[0])); tf.Open(); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] {7, 17}); long tmp; tf.Write(record, out tmp); tf.Close(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 40, _checkpoint.Read()); //+orginal checkpoint position Assert.IsTrue(File.Exists(secondfilename)); var stream = new MemoryStream(); var buffer = new byte[256]; using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(40, SeekOrigin.Begin); filestream.Read(buffer, 0, 10); stream.Write(buffer, 0, 10); } using (var filestream = File.Open(secondfilename, FileMode.Open, FileAccess.Read)) { filestream.Seek(0, SeekOrigin.Begin); filestream.Read(buffer, 0, 50); stream.Write(buffer, 0, 50); } using (var filestream = File.Open(thirdfilename, FileMode.Open, FileAccess.Read)) { filestream.Seek(0, SeekOrigin.Begin); filestream.Read(buffer, 0, 50); stream.Write(buffer, 0, 50); } stream.Seek(0 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(stream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); }
public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); var chunkHeader = new ChunkHeader(1, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), chunkHeader.ChunkSize, 0, _checkpoint, new ICheckpoint[0])); db.OpenVerifyAndClean(); var writer = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[8000], metadata: new byte[] { 7, 17 }); Console.WriteLine(record.GetSizeWithLengthPrefix()); Console.WriteLine(record.GetSizeWithLengthPrefix() + 137); long pos; Assert.IsTrue(writer.Write(record, out pos)); writer.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefix() + 137, _checkpoint.Read()); using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Console.WriteLine(string.Join("\n", Directory.EnumerateFiles(PathName))); Assert.AreEqual(record, read); } }
public void a_record_can_be_written() { var filename = GetFilePathFor("chunk-000000.000000"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), chunkHeader.ChunkSize, 0, _checkpoint, new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); db.Open(); var bytes = new byte[3994]; // this gives exactly 4097 size of record, with 3993 (rec size 4096) everything works fine! new Random().NextBytes(bytes); var writer = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 137, correlationId: _correlationId, eventId: _eventId, transactionPosition: 789, transactionOffset: 543, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.SingleWrite, eventType: "type", data: bytes, metadata: new byte[] { 0x07, 0x17 }); long pos; Assert.IsTrue(writer.Write(record, out pos)); writer.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public void try_read_returns_false_when_writer_checksum_is_equal_to_reader_checksum() { var writerchk = new InMemoryCheckpoint(12); var readerchk = new InMemoryCheckpoint("reader", 12); var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, writerchk, new[] {readerchk}); File.WriteAllBytes(Path.Combine(PathName, "prefix.tf0"), new byte[10000]); var reader = new MultifileTransactionFileChaser(config, "reader"); reader.Open(); LogRecord record; Assert.IsFalse(reader.TryReadNext(out record)); Assert.AreEqual(12, readerchk.Read()); reader.Close(); }
public void a_record_can_be_written() { _checkpoint = new InMemoryCheckpoint(0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 1000, 0, _checkpoint, new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); db.Open(); var tf = new TFChunkWriter(db); tf.Open(); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long tmp; tf.Write(record, out tmp); tf.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), _checkpoint.Read()); using (var filestream = File.Open(GetFilePathFor("chunk-000000.000000"), FileMode.Open, FileAccess.Read)) { filestream.Position = ChunkHeader.Size; var reader = new BinaryReader(filestream); reader.ReadInt32(); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public void should_not_read_buffered_data() { var writerchk = new InMemoryCheckpoint(0); var readerchk = new InMemoryCheckpoint("reader", 0); var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, writerchk, new List<ICheckpoint> {readerchk}); var fileName = Path.Combine(PathName, "prefix.tf0"); File.Create(fileName).Close(); var reader = new MultifileTransactionFileBulkRetriever(config); reader.Open(0); Assert.IsTrue(reader.ReadNextBulk().Length == 0); var bytes = new byte[100]; new Random().NextBytes(bytes); using (var f = new FileStream(fileName, FileMode.Append, FileAccess.Write, FileShare.ReadWrite)) { f.Write(bytes, 0, bytes.Length); f.Flush(flushToDisk: true); } writerchk.Write(bytes.Length); var readBytes = reader.ReadNextBulk(); Assert.AreEqual(bytes, readBytes); bytes = new byte[100]; new Random().NextBytes(bytes); using (var f = new FileStream(fileName, FileMode.Append, FileAccess.Write, FileShare.ReadWrite)) { f.Write(bytes, 0, bytes.Length); f.Flush(flushToDisk: true); } writerchk.Write(writerchk.Read() + bytes.Length); readBytes = reader.ReadNextBulk(); Assert.AreEqual(bytes, readBytes); reader.Close(); }
public void a_record_is_not_written_at_first_but_written_on_second_try() { var filename1 = Path.Combine(PathName, "prefix.tf0"); var filename2 = Path.Combine(PathName, "prefix.tf1"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length); File.WriteAllBytes(filename1, bytes); _checkpoint = new InMemoryCheckpoint(0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, _checkpoint, new ICheckpoint[0])); db.OpenVerifyAndClean(); var tf = new TFChunkWriter(db); long pos; var record1 = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[8000]); Assert.IsTrue(tf.Write(record1, out pos)); // almost fill up first chunk var record2 = new PrepareLogRecord(logPosition: pos, correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: pos, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[8000]); Assert.IsFalse(tf.Write(record2, out pos)); // chunk has too small space var record3 = new PrepareLogRecord(logPosition: pos, correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: pos, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[2000]); Assert.IsTrue(tf.Write(record3, out pos)); tf.Close(); db.Dispose(); Assert.AreEqual(record3.GetSizeWithLengthPrefixAndSuffix() + 10000, _checkpoint.Read()); using (var filestream = File.Open(filename2, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record3, read); } }
public void try_read_returns_record_when_writerchecksum_ahead() { var writerchk = new InMemoryCheckpoint(128); var readerchk = new InMemoryCheckpoint("reader", 0); var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, writerchk, new[] {readerchk}); var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] {1, 2, 3, 4, 5}, metadata: new byte[] {7, 17}); using (var fs = new FileStream(Path.Combine(PathName, "prefix.tf0"), FileMode.CreateNew, FileAccess.Write)) { var writer = new BinaryWriter(fs); recordToWrite.WriteWithLengthPrefixTo(writer); fs.Close(); } var reader = new MultifileTransactionFileChaser(config, "reader"); reader.Open(); LogRecord record = null; var recordRead = reader.TryReadNext(out record); reader.Close(); Assert.AreEqual(record.GetSizeWithLengthPrefix(), readerchk.Read()); Assert.IsTrue(recordRead); Assert.AreEqual(recordToWrite, record); }
public void try_read_returns_properly_when_writer_is_written_to_while_chasing() { var writerchk = new InMemoryCheckpoint(0); var readerchk = new InMemoryCheckpoint("reader", 0); var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, writerchk, new[] {readerchk}); var fileName = Path.Combine(PathName, "prefix.tf0"); File.Create(fileName).Close(); var reader = new MultifileTransactionFileChaser(config, "reader"); reader.Open(); LogRecord record; Assert.IsFalse(reader.TryReadNext(out record)); var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] {7, 17}); var memstream = new MemoryStream(); var writer = new BinaryWriter(memstream); recordToWrite.WriteWithLengthPrefixTo(writer); using (var fs = new FileStream(fileName, FileMode.Append, FileAccess.Write, FileShare.ReadWrite)) { fs.Write(memstream.ToArray(), 0, (int)memstream.Length); fs.Flush(flushToDisk: true); } writerchk.Write(memstream.Length); Assert.IsTrue(reader.TryReadNext(out record)); Assert.AreEqual(record, recordToWrite); var recordToWrite2 = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, eventStreamId: "WorldEnding", expectedVersion: 4321, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 3, 2, 1 }, metadata: new byte[] {9}); memstream.SetLength(0); recordToWrite2.WriteWithLengthPrefixTo(writer); using (var fs = new FileStream(fileName, FileMode.Append, FileAccess.Write, FileShare.ReadWrite)) { fs.Write(memstream.ToArray(), 0, (int) memstream.Length); fs.Flush(flushToDisk: true); } writerchk.Write(writerchk.Read() + memstream.Length); Assert.IsTrue(reader.TryReadNext(out record)); Assert.AreEqual(record, recordToWrite2); reader.Close(); }
public void can_read_a_record_with_length_straddling_multiple_files() { var writerchk = new InMemoryCheckpoint(20020); var readerchk = new InMemoryCheckpoint("reader", 9998); var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, writerchk, new[] {readerchk}); var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] {7, 17}); var memstream = new MemoryStream(); var writer = new BinaryWriter(memstream); recordToWrite.WriteWithLengthPrefixTo(writer); var buf = memstream.GetBuffer(); using (var fs = new FileStream(config.FileNamingStrategy.GetFilenameFor(0), FileMode.CreateNew, FileAccess.Write)) { fs.Seek(9998, SeekOrigin.Begin); fs.Write(buf, 0, 2); fs.Close(); } using (var fs = new FileStream(config.FileNamingStrategy.GetFilenameFor(1), FileMode.CreateNew, FileAccess.Write)) { fs.Seek(0, SeekOrigin.Begin); fs.Write(buf, 2, recordToWrite.GetSizeWithLengthPrefix() - 2); fs.Close(); } var reader = new MultifileTransactionFileChaser(config, "reader"); reader.Open(); LogRecord record = null; var readRecord = reader.TryReadNext(out record); reader.Close(); Assert.IsTrue(readRecord); Assert.AreEqual(recordToWrite, record); Assert.AreEqual(9998 + recordToWrite.GetSizeWithLengthPrefix(), readerchk.Read()); }
public void try_read_returns_record_when_writerchecksum_ahead() { var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); using (var fs = new FileStream(GetFilePathFor("prefix.tf0"), FileMode.CreateNew, FileAccess.Write)) { fs.SetLength(ChunkHeader.Size + ChunkFooter.Size + 10000); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false).AsByteArray(); var writer = new BinaryWriter(fs); writer.Write(chunkHeader); recordToWrite.WriteWithLengthPrefixAndSuffixTo(writer); fs.Close(); } var writerchk = new InMemoryCheckpoint(128); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, writerchk, chaserchk, new[] {chaserchk})); db.OpenVerifyAndClean(); var chaser = new TFChunkChaser(db, writerchk, chaserchk); chaser.Open(); LogRecord record; var recordRead = chaser.TryReadNext(out record); chaser.Close(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read()); Assert.IsTrue(recordRead); Assert.AreEqual(recordToWrite, record); db.Close(); }
public void try_read_returns_record_when_writerchecksum_equal() { var writerchk = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, writerchk, chaserchk, new[] {chaserchk})); db.OpenVerifyAndClean(); var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); var writer = new TFChunkWriter(db); writer.Open(); long pos; Assert.IsTrue(writer.Write(recordToWrite, out pos)); writer.Close(); writerchk.Write(recordToWrite.GetSizeWithLengthPrefixAndSuffix()); var chaser = new TFChunkChaser(db, writerchk, chaserchk); chaser.Open(); LogRecord record; var readRecord = chaser.TryReadNext(out record); chaser.Close(); Assert.IsTrue(readRecord); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read()); Assert.AreEqual(recordToWrite, record); db.Close(); }