public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();

            var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory;
            var streamId      = LogFormatHelper <TLogFormat, TStreamId> .StreamId;

            _record = LogRecord.Prepare(recordFactory, 0, _corrId, _eventId, 0, 0, streamId, 1,
                                        PrepareFlags.None, "Foo", new byte[12], new byte[15], new DateTime(2000, 1, 1, 12, 0, 0));
            _chunk  = TFChunkHelper.CreateNewChunk(Filename);
            _result = _chunk.TryAppend(_record);
        }
Exemple #2
0
        public void try_read_returns_record_when_record_bigger_than_internal_buffer()
        {
            var writerchk = new InMemoryCheckpoint(0);
            var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0);

            var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk));

            db.Open();

            var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory;
            var streamId      = LogFormatHelper <TLogFormat, TStreamId> .StreamId;
            var eventTypeId   = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId;

            var recordToWrite = LogRecord.Prepare(
                factory: recordFactory,
                logPosition: 0,
                correlationId: _correlationId,
                eventId: _eventId,
                transactionPos: 0,
                transactionOffset: 0,
                eventStreamId: streamId,
                expectedVersion: 1234,
                timeStamp: new DateTime(2012, 12, 21),
                flags: PrepareFlags.None,
                eventType: eventTypeId,
                data: new byte[9000],
                metadata: new byte[] { 7, 17 });
            var writer = new TFChunkWriter(db);

            writer.Open();
            long pos;

            Assert.IsTrue(writer.Write(recordToWrite, out pos));
            writer.Close();

            writerchk.Write(recordToWrite.GetSizeWithLengthPrefixAndSuffix());

            var reader = new TFChunkChaser(db, writerchk, chaserchk, false);

            reader.Open();

            ILogRecord record;
            var        readRecord = reader.TryReadNext(out record);

            reader.Close();

            Assert.IsTrue(readRecord);
            Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read());
            Assert.AreEqual(recordToWrite, record);

            db.Close();
        }
        public void try_read_returns_record_when_writerchecksum_ahead()
        {
            var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory;
            var streamId      = LogFormatHelper <TLogFormat, TStreamId> .StreamId;

            var recordToWrite = LogRecord.Prepare(
                factory: recordFactory,
                logPosition: 0,
                correlationId: _correlationId,
                eventId: _eventId,
                transactionPos: 0,
                transactionOffset: 0,
                eventStreamId: streamId,
                expectedVersion: 1234,
                timeStamp: new DateTime(2012, 12, 21),
                flags: PrepareFlags.None,
                eventType: "type",
                data: new byte[] { 1, 2, 3, 4, 5 },
                metadata: new byte[] { 7, 17 });

            using (var fs = new FileStream(GetFilePathFor("chunk-000000.000000"), FileMode.CreateNew,
                                           FileAccess.Write)) {
                fs.SetLength(ChunkHeader.Size + ChunkFooter.Size + 10000);
                var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid())
                                  .AsByteArray();
                var writer = new BinaryWriter(fs);
                writer.Write(chunkHeader);
                recordToWrite.WriteWithLengthPrefixAndSuffixTo(writer);
                fs.Close();
            }

            var writerchk = new InMemoryCheckpoint(recordToWrite.GetSizeWithLengthPrefixAndSuffix() + 16);
            var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0);
            var db        = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk));

            db.Open();

            var chaser = new TFChunkChaser(db, writerchk, chaserchk, false);

            chaser.Open();

            ILogRecord record;
            var        recordRead = chaser.TryReadNext(out record);

            chaser.Close();

            Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read());
            Assert.IsTrue(recordRead);
            Assert.AreEqual(recordToWrite, record);

            db.Close();
        }
Exemple #4
0
        public override async Task TestFixtureSetUp()
        {
            await base.TestFixtureSetUp();

            _db = new TFChunkDb(TFChunkHelper.CreateSizedDbConfig(PathName, 0, chunkSize: 16 * 1024));
            _db.Open();

            var chunk      = _db.Manager.GetChunkFor(0);
            var logFormat  = LogFormatHelper <TLogFormat, TStreamId> .LogFormat;
            var streamName = "es-to-scavenge";

            logFormat.StreamNameIndex.GetOrAddId(streamName, out var streamId, out _, out _);
            var expectedVersion = ExpectedVersion.NoStream;

            _p1 = LogRecord.SingleWrite(logFormat.RecordFactory, 0, Guid.NewGuid(), Guid.NewGuid(), streamId, expectedVersion++, "et1",
                                        new byte[2048], new byte[] { 5, 7 });
            _res1 = chunk.TryAppend(_p1);

            _c1    = LogRecord.Commit(_res1.NewPosition, Guid.NewGuid(), _p1.LogPosition, 0);
            _cres1 = chunk.TryAppend(_c1);

            _p2 = LogRecord.SingleWrite(logFormat.RecordFactory, _cres1.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), streamId, expectedVersion++, "et1",
                                        new byte[2048], new byte[] { 5, 7 });
            _res2 = chunk.TryAppend(_p2);

            _c2    = LogRecord.Commit(_res2.NewPosition, Guid.NewGuid(), _p2.LogPosition, 1);
            _cres2 = chunk.TryAppend(_c2);

            _p3 = LogRecord.SingleWrite(logFormat.RecordFactory, _cres2.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), streamId, expectedVersion++, "et1",
                                        new byte[2048], new byte[] { 5, 7 });
            _res3 = chunk.TryAppend(_p3);

            _c3    = LogRecord.Commit(_res3.NewPosition, Guid.NewGuid(), _p3.LogPosition, 2);
            _cres3 = chunk.TryAppend(_c3);

            chunk.Complete();
            _originalFileSize = chunk.FileSize;

            _db.Config.WriterCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition);
            _db.Config.WriterCheckpoint.Flush();
            _db.Config.ChaserCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition);
            _db.Config.ChaserCheckpoint.Flush();

            var scavenger = new TFChunkScavenger <TStreamId>(_db, new FakeTFScavengerLog(), new FakeTableIndex <TStreamId>(),
                                                             new FakeReadIndex <TLogFormat, TStreamId>(x => EqualityComparer <TStreamId> .Default.Equals(x, streamId)),
                                                             logFormat.SystemStreams);
            await scavenger.Scavenge(alwaysKeepScavenged : true, mergeChunks : false);

            _scavengedChunk = _db.Manager.GetChunk(0);
        }
Exemple #5
0
 public override void TestFixtureSetUp()
 {
     base.TestFixtureSetUp();
     _record = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0),
                                    PrepareFlags.None, "Foo", new byte[12], new byte[15]);
     _chunk  = TFChunkHelper.CreateNewChunk(Filename);
     _result = _chunk.TryAppend(_record);
     _chunk.Flush();
     _chunk.Complete();
     _cachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true, unbufferedRead: false,
                                              initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false);
     _cachedChunk.CacheInMemory();
 }
Exemple #6
0
        public override void SetUp()
        {
            base.SetUp();

            var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory;
            var streamId      = LogFormatHelper <TLogFormat, TStreamId> .StreamId;

            var record = LogRecord.Prepare(recordFactory, 15556, _corrId, _eventId, 15556, 0, streamId, 1,
                                           PrepareFlags.None, "Foo", new byte[12], new byte[15], new DateTime(2000, 1, 1, 12, 0, 0));

            _chunk   = TFChunkHelper.CreateNewChunk(Filename, 20);
            _written = _chunk.TryAppend(record).Success;
        }
Exemple #7
0
        public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();

            var logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormat;

            logFormat.StreamNameIndex.GetOrAddId("test", out var streamId, out _, out _);

            _record = LogRecord.Prepare(logFormat.RecordFactory, 0, _corrId, _eventId, 0, 0, streamId, 1,
                                        PrepareFlags.None, "Foo", new byte[12], new byte[15], new DateTime(2000, 1, 1, 12, 0, 0));
            _chunk  = TFChunkHelper.CreateNewChunk(Filename);
            _result = _chunk.TryAppend(_record);
        }
        public override async Task TestFixtureSetUp()
        {
            await base.TestFixtureSetUp();

            _db = new TFChunkDb(
                TFChunkHelper.CreateSizedDbConfig(PathName, 0, chunkSize: 4096));
            _db.Open();

            var chunk = _db.Manager.GetChunk(0);

            _records = new ILogRecord[RecordsCount];
            _results = new RecordWriteResult[RecordsCount];
            var recordFactory   = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory;
            var streamId        = LogFormatHelper <TLogFormat, TStreamId> .StreamId;
            var eventTypeId     = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId;
            var expectedVersion = ExpectedVersion.NoStream;

            for (int i = 0; i < _records.Length - 1; ++i)
            {
                _records[i] = LogRecord.SingleWrite(
                    recordFactory,
                    i == 0 ? 0 : _results[i - 1].NewPosition,
                    Guid.NewGuid(),
                    Guid.NewGuid(),
                    streamId,
                    expectedVersion++,
                    eventTypeId,
                    new byte[] { 0, 1, 2 },
                    new byte[] { 5, 7 });
                _results[i] = chunk.TryAppend(_records[i]);
            }

            _records[_records.Length - 1] = LogRecord.Prepare(
                recordFactory,
                _results[_records.Length - 1 - 1].NewPosition,
                Guid.NewGuid(),
                Guid.NewGuid(),
                _results[_records.Length - 1 - 1].NewPosition,
                0,
                streamId,
                expectedVersion++,
                PrepareFlags.Data,
                eventTypeId,
                new byte[] { 0, 1, 2 },
                new byte[] { 5, 7 });
            _results[_records.Length - 1] = chunk.TryAppend(_records[_records.Length - 1]);

            chunk.Flush();
            _db.Config.WriterCheckpoint.Write(_results[RecordsCount - 1].NewPosition);
            _db.Config.WriterCheckpoint.Flush();
        }
        public void the_file_will_not_be_deleted_until_reader_released()
        {
            var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 2000);

            using (var reader = chunk.AcquireReader()) {
                chunk.MarkForDeletion();
                var buffer = new byte[1024];
                var result = reader.ReadNextRawBytes(1024, buffer);
                Assert.IsFalse(result.IsEOF);
                Assert.AreEqual(1024, result.BytesRead);
            }

            chunk.WaitForDestroy(5000);
        }
        public void a_read_on_new_file_can_be_performed()
        {
            var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 2000);

            using (var reader = chunk.AcquireReader()) {
                var buffer = new byte[1024];
                var result = reader.ReadNextRawBytes(1024, buffer);
                Assert.IsFalse(result.IsEOF);
                Assert.AreEqual(1024, result.BytesRead);
            }

            chunk.MarkForDeletion();
            chunk.WaitForDestroy(5000);
        }
        public void with_a_writer_checksum_of_zero_the_first_chunk_is_created_with_correct_name_and_is_aligned()
        {
            var config = TFChunkHelper.CreateDbConfig(PathName, 0);
            var db     = new TFChunkDb(config);

            db.Open();
            db.Dispose();

            Assert.AreEqual(1, Directory.GetFiles(PathName).Length);
            Assert.IsTrue(File.Exists(GetFilePathFor("chunk-000000.000000")));
            var fileInfo = new FileInfo(GetFilePathFor("chunk-000000.000000"));

            Assert.AreEqual(12288, fileInfo.Length);
        }
Exemple #12
0
        public void try_read_returns_false_when_writer_checksum_is_zero()
        {
            var writerchk = new InMemoryCheckpoint(0);
            var chaserchk = new InMemoryCheckpoint(0);
            var db        = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk));

            db.Open();

            var reader = new TFChunkReader(db, writerchk, 0);

            Assert.IsFalse(reader.TryReadNext().Success);

            db.Close();
        }
        public void a_read_past_eof_returns_eof_and_no_footer()
        {
            var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 300);

            using (var reader = chunk.AcquireReader()) {
                var buffer = new byte[8092];
                var result = reader.ReadNextRawBytes(8092, buffer);
                Assert.IsTrue(result.IsEOF);
                Assert.AreEqual(4096, result.BytesRead);                 //does not includes header and footer space
            }

            chunk.MarkForDeletion();
            chunk.WaitForDestroy(5000);
        }
Exemple #14
0
        public override async Task TestFixtureSetUp()
        {
            await base.TestFixtureSetUp();

            _db = new TFChunkDb(
                TFChunkHelper.CreateSizedDbConfig(PathName, 0, chunkSize: 4096));
            _db.Open();

            var chunk = _db.Manager.GetChunk(0);

            _records = new ILogRecord[RecordsCount];
            _results = new RecordWriteResult[RecordsCount];
            var logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormat;

            logFormat.StreamNameIndex.GetOrAddId("es1", out var streamId, out _, out _);

            for (int i = 0; i < _records.Length - 1; ++i)
            {
                _records[i] = LogRecord.SingleWrite(
                    logFormat.RecordFactory,
                    i == 0 ? 0 : _results[i - 1].NewPosition,
                    Guid.NewGuid(),
                    Guid.NewGuid(),
                    streamId,
                    ExpectedVersion.Any,
                    "et1",
                    new byte[] { 0, 1, 2 },
                    new byte[] { 5, 7 });
                _results[i] = chunk.TryAppend(_records[i]);
            }

            _records[_records.Length - 1] = LogRecord.Prepare(
                logFormat.RecordFactory,
                _results[_records.Length - 1 - 1].NewPosition,
                Guid.NewGuid(),
                Guid.NewGuid(),
                _results[_records.Length - 1 - 1].NewPosition,
                0,
                streamId,
                ExpectedVersion.Any,
                PrepareFlags.Data,
                "et1",
                new byte[] { 0, 1, 2 },
                new byte[] { 5, 7 });
            _results[_records.Length - 1] = chunk.TryAppend(_records[_records.Length - 1]);

            chunk.Flush();
            _db.Config.WriterCheckpoint.Write(_results[RecordsCount - 1].NewPosition);
            _db.Config.WriterCheckpoint.Flush();
        }
        public void if_asked_for_more_than_buffer_size_will_only_read_buffer_size()
        {
            var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 3000);

            using (var reader = chunk.AcquireReader()) {
                var buffer = new byte[1024];
                var result = reader.ReadNextRawBytes(3000, buffer);
                Assert.IsFalse(result.IsEOF);
                Assert.AreEqual(1024, result.BytesRead);
            }

            chunk.MarkForDeletion();
            chunk.WaitForDestroy(5000);
        }
Exemple #16
0
        public void try_read_returns_record_when_writerchecksum_equal()
        {
            var writerchk = new InMemoryCheckpoint(0);
            var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0);
            var db        = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk));

            db.Open();

            _logFormat.StreamNameIndex.GetOrAddId("WorldEnding", out var streamId, out _, out _);

            var recordToWrite = LogRecord.Prepare(
                factory: _logFormat.RecordFactory,
                logPosition: 0,
                correlationId: _correlationId,
                eventId: _eventId,
                transactionPos: 0,
                transactionOffset: 0,
                eventStreamId: streamId,
                expectedVersion: 1234,
                timeStamp: new DateTime(2012, 12, 21),
                flags: PrepareFlags.None,
                eventType: "type",
                data: new byte[] { 1, 2, 3, 4, 5 },
                metadata: new byte[] { 7, 17 });
            var writer = new TFChunkWriter(db);

            writer.Open();
            long pos;

            Assert.IsTrue(writer.Write(recordToWrite, out pos));
            writer.Close();

            writerchk.Write(recordToWrite.GetSizeWithLengthPrefixAndSuffix());

            var chaser = new TFChunkChaser(db, writerchk, chaserchk, false);

            chaser.Open();

            ILogRecord record;
            var        readRecord = chaser.TryReadNext(out record);

            chaser.Close();

            Assert.IsTrue(readRecord);
            Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read());
            Assert.AreEqual(recordToWrite, record);

            db.Close();
        }
Exemple #17
0
        public void a_record_can_be_written()
        {
            var filename    = GetFilePathFor("chunk-000000.000000");
            var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, chunkId: Guid.NewGuid());
            var chunkBytes  = chunkHeader.AsByteArray();
            var bytes       = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size];

            Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length);
            File.WriteAllBytes(filename, bytes);

            _checkpoint = new InMemoryCheckpoint(137);
            var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint()));

            db.Open();

            var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory;
            var streamId      = LogFormatHelper <TLogFormat, TStreamId> .StreamId;
            var eventTypeId   = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId;

            var tf     = new TFChunkWriter(db);
            var record = LogRecord.Prepare(
                factory: recordFactory,
                logPosition: _checkpoint.Read(),
                correlationId: _correlationId,
                eventId: _eventId,
                expectedVersion: 1234,
                transactionPos: 0,
                transactionOffset: 0,
                eventStreamId: streamId,
                timeStamp: new DateTime(2012, 12, 21),
                flags: PrepareFlags.None,
                eventType: eventTypeId,
                data: new byte[] { 1, 2, 3, 4, 5 },
                metadata: new byte[] { 7, 17 });
            long tmp;

            tf.Write(record, out tmp);
            tf.Close();
            db.Dispose();

            Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137,
                            _checkpoint.Read());     //137 is fluff assigned to beginning of checkpoint
            using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) {
                filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin);
                var reader = new BinaryReader(filestream);
                var read   = LogRecord.ReadFrom(reader, (int)reader.BaseStream.Length);
                Assert.AreEqual(record, read);
            }
        }
        public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();

            _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0, chunkSize: 16 * 1024));
            _db.Open();

            var chunk = _db.Manager.GetChunkFor(0);

            _p1 = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                        new byte[2048], new byte[] { 5, 7 });
            _res1 = chunk.TryAppend(_p1);

            _c1    = LogRecord.Commit(_res1.NewPosition, Guid.NewGuid(), _p1.LogPosition, 0);
            _cres1 = chunk.TryAppend(_c1);

            _p2 = LogRecord.SingleWrite(_cres1.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                        new byte[2048], new byte[] { 5, 7 });
            _res2 = chunk.TryAppend(_p2);

            _c2    = LogRecord.Commit(_res2.NewPosition, Guid.NewGuid(), _p2.LogPosition, 1);
            _cres2 = chunk.TryAppend(_c2);

            _p3 = LogRecord.SingleWrite(_cres2.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                        new byte[2048], new byte[] { 5, 7 });
            _res3 = chunk.TryAppend(_p3);

            _c3    = LogRecord.Commit(_res3.NewPosition, Guid.NewGuid(), _p3.LogPosition, 2);
            _cres3 = chunk.TryAppend(_c3);

            chunk.Complete();
            _originalFileSize = chunk.FileSize;

            _db.Config.WriterCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition);
            _db.Config.WriterCheckpoint.Flush();
            _db.Config.ChaserCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition);
            _db.Config.ChaserCheckpoint.Flush();

            var bus          = new InMemoryBus("Bus");
            var ioDispatcher = new IODispatcher(bus, new PublishEnvelope(bus));
            var scavenger    = new TFChunkScavenger(_db, ioDispatcher, new FakeTableIndex(),
                                                    new FakeReadIndex(x => x == "es-to-scavenge"), Guid.NewGuid(), "fakeNodeIp");

            scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: false);

            _scavengedChunk = _db.Manager.GetChunk(0);
        }
        public void a_read_past_end_of_completed_chunk_does_not_include_footer()
        {
            var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 300);

            chunk.Complete();             // chunk has 0 bytes of actual data
            using (var reader = chunk.AcquireReader()) {
                var buffer = new byte[1024];
                var result = reader.ReadNextDataBytes(1024, buffer);
                Assert.IsTrue(result.IsEOF);
                Assert.AreEqual(0, result.BytesRead);
            }

            chunk.MarkForDeletion();
            chunk.WaitForDestroy(5000);
        }
        public void a_read_on_scavenged_chunk_does_not_include_map()
        {
            var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("afile"), 200, isScavenged: true);

            chunk.CompleteScavenge(new[] { new PosMap(0, 0), new PosMap(1, 1) });
            using (var reader = chunk.AcquireReader()) {
                var buffer = new byte[1024];
                var result = reader.ReadNextDataBytes(1024, buffer);
                Assert.IsTrue(result.IsEOF);
                Assert.AreEqual(0, result.BytesRead);                 //header 128 + footer 128 + map 16
            }

            chunk.MarkForDeletion();
            chunk.WaitForDestroy(5000);
        }
        public void a_record_can_be_written()
        {
            var filename    = GetFilePathFor("chunk-000000.000000");
            var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid());
            var chunkBytes  = chunkHeader.AsByteArray();
            var buf         = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize];

            Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length);
            File.WriteAllBytes(filename, buf);

            _checkpoint = new InMemoryCheckpoint(137);
            var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint(),
                                                                chunkSize: chunkHeader.ChunkSize));

            db.Open();

            var
                bytes = new byte[3994];                 // this gives exactly 4097 size of record, with 3993 (rec size 4096) everything works fine!

            new Random().NextBytes(bytes);
            var writer = new TFChunkWriter(db);
            var record = new PrepareLogRecord(logPosition: 137,
                                              correlationId: _correlationId,
                                              eventId: _eventId,
                                              transactionPosition: 789,
                                              transactionOffset: 543,
                                              eventStreamId: "WorldEnding",
                                              expectedVersion: 1234,
                                              timeStamp: new DateTime(2012, 12, 21),
                                              flags: PrepareFlags.SingleWrite,
                                              eventType: "type",
                                              data: bytes,
                                              metadata: new byte[] { 0x07, 0x17 });

            long pos;

            Assert.IsTrue(writer.Write(record, out pos));
            writer.Close();
            db.Dispose();

            Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read());
            using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) {
                filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin);
                var reader = new BinaryReader(filestream);
                var read   = LogRecord.ReadFrom(reader);
                Assert.AreEqual(record, read);
            }
        }
Exemple #22
0
        public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();
            var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory;
            var streamId      = LogFormatHelper <TLogFormat, TStreamId> .StreamId;

            _record = LogRecord.Prepare(recordFactory, 0, _corrId, _eventId, 0, 0, streamId, 1,
                                        PrepareFlags.None, "Foo", new byte[12], new byte[15], new DateTime(2000, 1, 1, 12, 0, 0));
            _chunk  = TFChunkHelper.CreateNewChunk(Filename);
            _result = _chunk.TryAppend(_record);
            _chunk.Flush();
            _chunk.Complete();
            _cachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true, unbufferedRead: false,
                                                     initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false);
            _cachedChunk.CacheInMemory();
        }
Exemple #23
0
        public override async Task TestFixtureSetUp()
        {
            await base.TestFixtureSetUp();

            _db = new TFChunkDb(TFChunkHelper.CreateSizedDbConfig(PathName, 0, chunkSize: 16 * 1024));
            _db.Open();

            var chunk = _db.Manager.GetChunkFor(0);

            _p1 = LogRecord.SingleWrite(new LogV2RecordFactory(), 0, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                        new byte[2048], new byte[] { 5, 7 });
            _res1 = chunk.TryAppend(_p1);

            _c1    = LogRecord.Commit(_res1.NewPosition, Guid.NewGuid(), _p1.LogPosition, 0);
            _cres1 = chunk.TryAppend(_c1);

            _p2 = LogRecord.SingleWrite(new LogV2RecordFactory(), _cres1.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                        new byte[2048], new byte[] { 5, 7 });
            _res2 = chunk.TryAppend(_p2);

            _c2    = LogRecord.Commit(_res2.NewPosition, Guid.NewGuid(), _p2.LogPosition, 1);
            _cres2 = chunk.TryAppend(_c2);

            _p3 = LogRecord.SingleWrite(new LogV2RecordFactory(), _cres2.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                        new byte[2048], new byte[] { 5, 7 });
            _res3 = chunk.TryAppend(_p3);

            _c3    = LogRecord.Commit(_res3.NewPosition, Guid.NewGuid(), _p3.LogPosition, 2);
            _cres3 = chunk.TryAppend(_c3);

            chunk.Complete();
            _originalFileSize = chunk.FileSize;

            _db.Config.WriterCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition);
            _db.Config.WriterCheckpoint.Flush();
            _db.Config.ChaserCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition);
            _db.Config.ChaserCheckpoint.Flush();

            var scavenger = new TFChunkScavenger <string>(_db, new FakeTFScavengerLog(), new FakeTableIndex <string>(),
                                                          new FakeReadIndex <string>(x => x == "es-to-scavenge"),
                                                          LogFormatAbstractor.V2.SystemStreams);
            await scavenger.Scavenge(alwaysKeepScavenged : true, mergeChunks : false);

            _scavengedChunk = _db.Manager.GetChunk(0);
        }
        public void SetUp()
        {
            _writerCheckpoint = new InMemoryCheckpoint();
            _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _writerCheckpoint, new InMemoryCheckpoint(), 1024));
            _db.Open();
            _writer = new TFChunkWriter(_db);
            _writer.Open();
            _record = new CommitLogRecord(logPosition: 0,
                                          correlationId: _eventId,
                                          transactionPosition: 4321,
                                          timeStamp: new DateTime(2012, 12, 21),
                                          firstEventNumber: 10);
            long newPos;

            _writer.Write(_record, out newPos);
            _writer.Flush();
        }
Exemple #25
0
        public void a_read_past_eof_doesnt_return_eof_if_chunk_is_not_yet_completed()
        {
            var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 300);
            var rec   = LogRecord.Commit(0, Guid.NewGuid(), 0, 0);

            Assert.IsTrue(chunk.TryAppend(rec).Success, "Record was not appended");
            using (var reader = chunk.AcquireReader())
            {
                var buffer = new byte[1024];
                var result = reader.ReadNextDataBytes(1024, buffer);
                Assert.IsFalse(result.IsEOF, "EOF was returned.");
                //does not include header and footer space
                Assert.AreEqual(rec.GetSizeWithLengthPrefixAndSuffix(), result.BytesRead, "Read wrong number of bytes.");
            }
            chunk.MarkForDeletion();
            chunk.WaitForDestroy(5000);
        }
Exemple #26
0
        public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();

            _db = new TFChunkDb(
                TFChunkHelper.CreateDbConfig(PathName, 0, chunkSize: 4096));
            _db.Open();

            var chunk = _db.Manager.GetChunk(0);

            _records = new LogRecord[RecordsCount];
            _results = new RecordWriteResult[RecordsCount];

            for (int i = 0; i < _records.Length - 1; ++i)
            {
                _records[i] = LogRecord.SingleWrite(
                    i == 0 ? 0 : _results[i - 1].NewPosition,
                    Guid.NewGuid(),
                    Guid.NewGuid(),
                    "es1",
                    ExpectedVersion.Any,
                    "et1",
                    new byte[] { 0, 1, 2 },
                    new byte[] { 5, 7 });
                _results[i] = chunk.TryAppend(_records[i]);
            }

            _records[_records.Length - 1] = LogRecord.Prepare(
                _results[_records.Length - 1 - 1].NewPosition,
                Guid.NewGuid(),
                Guid.NewGuid(),
                _results[_records.Length - 1 - 1].NewPosition,
                0,
                "es1",
                ExpectedVersion.Any,
                PrepareFlags.Data,
                "et1",
                new byte[] { 0, 1, 2 },
                new byte[] { 5, 7 });
            _results[_records.Length - 1] = chunk.TryAppend(_records[_records.Length - 1]);

            chunk.Flush();
            _db.Config.WriterCheckpoint.Write(_results[RecordsCount - 1].NewPosition);
            _db.Config.WriterCheckpoint.Flush();
        }
Exemple #27
0
        public void if_asked_for_more_than_buffer_size_will_only_read_buffer_size()
        {
            var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 3000);

            var rec = LogRecord.Prepare(0, Guid.NewGuid(), Guid.NewGuid(), 0, 0, "ES", -1, PrepareFlags.None, "ET", new byte[2000], null);

            Assert.IsTrue(chunk.TryAppend(rec).Success, "Record was not appended");

            using (var reader = chunk.AcquireReader())
            {
                var buffer = new byte[1024];
                var result = reader.ReadNextDataBytes(3000, buffer);
                Assert.IsFalse(result.IsEOF);
                Assert.AreEqual(1024, result.BytesRead);
            }
            chunk.MarkForDeletion();
            chunk.WaitForDestroy(5000);
        }
        public void a_record_can_be_written()
        {
            _checkpoint = new InMemoryCheckpoint(0);
            var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint()));

            db.Open();
            var tf = new TFChunkWriter(db);

            tf.Open();

            var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory;
            var streamId      = LogFormatHelper <TLogFormat, TStreamId> .StreamId;
            var eventTypeId   = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId;

            var record = LogRecord.Prepare(
                factory: recordFactory,
                logPosition: 0,
                correlationId: _correlationId,
                eventId: _eventId,
                transactionPos: 0,
                transactionOffset: 0,
                eventStreamId: streamId,
                expectedVersion: 1234,
                timeStamp: new DateTime(2012, 12, 21),
                flags: PrepareFlags.None,
                eventType: eventTypeId,
                data: new byte[] { 1, 2, 3, 4, 5 },
                metadata: new byte[] { 7, 17 });
            long tmp;

            tf.Write(record, out tmp);
            tf.Close();
            db.Dispose();

            Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), _checkpoint.Read());
            using (var filestream = File.Open(GetFilePathFor("chunk-000000.000000"), FileMode.Open, FileAccess.Read)) {
                filestream.Position = ChunkHeader.Size;

                var reader = new BinaryReader(filestream);
                reader.ReadInt32();
                var read = LogRecord.ReadFrom(reader, (int)reader.BaseStream.Length);
                Assert.AreEqual(record, read);
            }
        }
        public void try_read_returns_false_when_writer_checkpoint_is_zero()
        {
            var writerchk = new InMemoryCheckpoint(0);
            var chaserchk = new InMemoryCheckpoint();
            var db        = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk));

            db.Open();

            var chaser = new TFChunkChaser(db, writerchk, new InMemoryCheckpoint(), false);

            chaser.Open();

            ILogRecord record;

            Assert.IsFalse(chaser.TryReadNext(out record));

            chaser.Close();
            db.Dispose();
        }
        public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();
            _chunk = TFChunkHelper.CreateNewChunk(Filename);

            _prepare1 = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0),
                                             PrepareFlags.None, "Foo", new byte[12], new byte[15]);
            var r1 = _chunk.TryAppend(_prepare1);

            _written1  = r1.Success;
            _position1 = r1.OldPosition;

            _prepare2 = new PrepareLogRecord(r1.NewPosition, _corrId, _eventId, 0, 0, "test2", 2, new DateTime(2000, 1, 1, 12, 0, 0),
                                             PrepareFlags.None, "Foo2", new byte[12], new byte[15]);
            var r2 = _chunk.TryAppend(_prepare2);

            _written2  = r2.Success;
            _position2 = r2.OldPosition;
            _chunk.Flush();
        }