public void try_read_returns_record_when_writerchecksum_ahead()
        {
            var recordToWrite = new PrepareLogRecord(logPosition: 0,
                                                     correlationId: _correlationId,
                                                     eventId: _eventId,
                                                     transactionPosition: 0,
                                                     transactionOffset: 0,
                                                     eventStreamId: "WorldEnding",
                                                     expectedVersion: 1234,
                                                     timeStamp: new DateTime(2012, 12, 21),
                                                     flags: PrepareFlags.None,
                                                     eventType: "type",
                                                     data: new byte[] { 1, 2, 3, 4, 5 },
                                                     metadata: new byte[] { 7, 17 });

            using (var fs = new FileStream(GetFilePathFor("chunk-000000.000000"), FileMode.CreateNew,
                                           FileAccess.Write)) {
                fs.SetLength(ChunkHeader.Size + ChunkFooter.Size + 10000);
                var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid())
                                  .AsByteArray();
                var writer = new BinaryWriter(fs);
                writer.Write(chunkHeader);
                recordToWrite.WriteWithLengthPrefixAndSuffixTo(writer);
                fs.Close();
            }

            var writerchk = new InMemoryCheckpoint(128);
            var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0);
            var db        = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk));

            db.Open();

            var chaser = new TFChunkChaser(db, writerchk, chaserchk, false);

            chaser.Open();

            ILogRecord record;
            var        recordRead = chaser.TryReadNext(out record);

            chaser.Close();

            Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read());
            Assert.IsTrue(recordRead);
            Assert.AreEqual(recordToWrite, record);

            db.Close();
        }
Exemple #2
0
        public void a_record_can_be_written()
        {
            var filename    = GetFilePathFor("chunk-000000.000000");
            var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid());
            var chunkBytes  = chunkHeader.AsByteArray();
            var buf         = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize];

            Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length);
            File.WriteAllBytes(filename, buf);

            _checkpoint = new InMemoryCheckpoint(137);
            var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint(), chunkSize: chunkHeader.ChunkSize));

            db.Open();

            var bytes = new byte[3994]; // this gives exactly 4097 size of record, with 3993 (rec size 4096) everything works fine!

            new Random().NextBytes(bytes);
            var writer = new TFChunkWriter(db);
            var record = new PrepareLogRecord(logPosition: 137,
                                              correlationId: _correlationId,
                                              eventId: _eventId,
                                              transactionPosition: 789,
                                              transactionOffset: 543,
                                              eventStreamId: "WorldEnding",
                                              expectedVersion: 1234,
                                              timeStamp: new DateTime(2012, 12, 21),
                                              flags: PrepareFlags.SingleWrite,
                                              eventType: "type",
                                              data: bytes,
                                              metadata: new byte[] { 0x07, 0x17 });

            long pos;

            Assert.IsTrue(writer.Write(record, out pos));
            writer.Close();
            db.Dispose();

            Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read());
            using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read))
            {
                filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin);
                var reader = new BinaryReader(filestream);
                var read   = LogRecord.ReadFrom(reader);
                Assert.AreEqual(record, read);
            }
        }
        public void try_read_returns_record_when_record_bigger_than_internal_buffer()
        {
            var writerchk = new InMemoryCheckpoint(0);
            var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0);

            var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk));

            db.Open();

            var recordToWrite = new PrepareLogRecord(logPosition: 0,
                                                     correlationId: _correlationId,
                                                     eventId: _eventId,
                                                     transactionPosition: 0,
                                                     transactionOffset: 0,
                                                     eventStreamId: "WorldEnding",
                                                     expectedVersion: 1234,
                                                     timeStamp: new DateTime(2012, 12, 21),
                                                     flags: PrepareFlags.None,
                                                     eventType: "type",
                                                     data: new byte[9000],
                                                     metadata: new byte[] { 7, 17 });
            var writer = new TFChunkWriter(db);

            writer.Open();
            long pos;

            Assert.IsTrue(writer.Write(recordToWrite, out pos));
            writer.Close();

            writerchk.Write(recordToWrite.GetSizeWithLengthPrefixAndSuffix());

            var reader = new TFChunkChaser(db, writerchk, chaserchk, false);

            reader.Open();

            LogRecord record;
            var       readRecord = reader.TryReadNext(out record);

            reader.Close();

            Assert.IsTrue(readRecord);
            Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read());
            Assert.AreEqual(recordToWrite, record);

            db.Close();
        }
Exemple #4
0
        public override async Task TestFixtureSetUp()
        {
            await base.TestFixtureSetUp();

            _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0, chunkSize: 16 * 1024));
            _db.Open();

            var chunk = _db.Manager.GetChunkFor(0);

            _p1 = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                        new byte[2048], new byte[] { 5, 7 });
            _res1 = chunk.TryAppend(_p1);

            _c1    = LogRecord.Commit(_res1.NewPosition, Guid.NewGuid(), _p1.LogPosition, 0);
            _cres1 = chunk.TryAppend(_c1);

            _p2 = LogRecord.SingleWrite(_cres1.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                        new byte[2048], new byte[] { 5, 7 });
            _res2 = chunk.TryAppend(_p2);

            _c2    = LogRecord.Commit(_res2.NewPosition, Guid.NewGuid(), _p2.LogPosition, 1);
            _cres2 = chunk.TryAppend(_c2);

            _p3 = LogRecord.SingleWrite(_cres2.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                        new byte[2048], new byte[] { 5, 7 });
            _res3 = chunk.TryAppend(_p3);

            _c3    = LogRecord.Commit(_res3.NewPosition, Guid.NewGuid(), _p3.LogPosition, 2);
            _cres3 = chunk.TryAppend(_c3);

            chunk.Complete();
            _originalFileSize = chunk.FileSize;

            _db.Config.WriterCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition);
            _db.Config.WriterCheckpoint.Flush();
            _db.Config.ChaserCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition);
            _db.Config.ChaserCheckpoint.Flush();

            var scavenger = new TFChunkScavenger(_db, new FakeTFScavengerLog(), new FakeTableIndex(),
                                                 new FakeReadIndex(x => x == "es-to-scavenge"));
            await scavenger.Scavenge(alwaysKeepScavenged : true, mergeChunks : false);

            _scavengedChunk = _db.Manager.GetChunk(0);
        }
        public void SetUp()
        {
            _writerCheckpoint = new InMemoryCheckpoint();
            _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _writerCheckpoint, new InMemoryCheckpoint(), 1024));
            _db.Open();
            _writer = new TFChunkWriter(_db);
            _writer.Open();
            _record = new CommitLogRecord(logPosition: 0,
                                          correlationId: _eventId,
                                          transactionPosition: 4321,
                                          timeStamp: new DateTime(2012, 12, 21),
                                          firstEventNumber: 10);
            long newPos;

            _writer.Write(_record, out newPos);
            _writer.Flush();
        }
Exemple #6
0
        public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();

            _db = new TFChunkDb(
                TFChunkHelper.CreateDbConfig(PathName, 0, chunkSize: 4096));
            _db.Open();

            var chunk = _db.Manager.GetChunk(0);

            _records = new LogRecord[RecordsCount];
            _results = new RecordWriteResult[RecordsCount];

            for (int i = 0; i < _records.Length - 1; ++i)
            {
                _records[i] = LogRecord.SingleWrite(
                    i == 0 ? 0 : _results[i - 1].NewPosition,
                    Guid.NewGuid(),
                    Guid.NewGuid(),
                    "es1",
                    ExpectedVersion.Any,
                    "et1",
                    new byte[] { 0, 1, 2 },
                    new byte[] { 5, 7 });
                _results[i] = chunk.TryAppend(_records[i]);
            }

            _records[_records.Length - 1] = LogRecord.Prepare(
                _results[_records.Length - 1 - 1].NewPosition,
                Guid.NewGuid(),
                Guid.NewGuid(),
                _results[_records.Length - 1 - 1].NewPosition,
                0,
                "es1",
                ExpectedVersion.Any,
                PrepareFlags.Data,
                "et1",
                new byte[] { 0, 1, 2 },
                new byte[] { 5, 7 });
            _results[_records.Length - 1] = chunk.TryAppend(_records[_records.Length - 1]);

            chunk.Flush();
            _db.Config.WriterCheckpoint.Write(_results[RecordsCount - 1].NewPosition);
            _db.Config.WriterCheckpoint.Flush();
        }
        public void a_record_can_be_written()
        {
            _checkpoint = new InMemoryCheckpoint(0);
            var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint()));

            db.Open();
            var tf = new TFChunkWriter(db);

            tf.Open();

            var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory;
            var streamId      = LogFormatHelper <TLogFormat, TStreamId> .StreamId;
            var eventTypeId   = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId;

            var record = LogRecord.Prepare(
                factory: recordFactory,
                logPosition: 0,
                correlationId: _correlationId,
                eventId: _eventId,
                transactionPos: 0,
                transactionOffset: 0,
                eventStreamId: streamId,
                expectedVersion: 1234,
                timeStamp: new DateTime(2012, 12, 21),
                flags: PrepareFlags.None,
                eventType: eventTypeId,
                data: new byte[] { 1, 2, 3, 4, 5 },
                metadata: new byte[] { 7, 17 });
            long tmp;

            tf.Write(record, out tmp);
            tf.Close();
            db.Dispose();

            Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), _checkpoint.Read());
            using (var filestream = File.Open(GetFilePathFor("chunk-000000.000000"), FileMode.Open, FileAccess.Read)) {
                filestream.Position = ChunkHeader.Size;

                var reader = new BinaryReader(filestream);
                reader.ReadInt32();
                var read = LogRecord.ReadFrom(reader, (int)reader.BaseStream.Length);
                Assert.AreEqual(record, read);
            }
        }
        public void try_read_returns_false_when_writer_checkpoint_is_zero()
        {
            var writerchk = new InMemoryCheckpoint(0);
            var chaserchk = new InMemoryCheckpoint();
            var db        = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk));

            db.Open();

            var chaser = new TFChunkChaser(db, writerchk, new InMemoryCheckpoint(), false);

            chaser.Open();

            ILogRecord record;

            Assert.IsFalse(chaser.TryReadNext(out record));

            chaser.Close();
            db.Dispose();
        }
Exemple #9
0
        public void a_record_can_be_written()
        {
            var filename    = GetFilePathFor("chunk-000000.000000");
            var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, chunkId: Guid.NewGuid());
            var chunkBytes  = chunkHeader.AsByteArray();
            var bytes       = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size];

            Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length);
            File.WriteAllBytes(filename, bytes);

            _checkpoint = new InMemoryCheckpoint(137);
            var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint()));

            db.Open();
            var tf     = new TFChunkWriter(db);
            var record = new PrepareLogRecord(logPosition: _checkpoint.Read(),
                                              correlationId: _correlationId,
                                              eventId: _eventId,
                                              expectedVersion: 1234,
                                              transactionPosition: 0,
                                              transactionOffset: 0,
                                              eventStreamId: "WorldEnding",
                                              timeStamp: new DateTime(2012, 12, 21),
                                              flags: PrepareFlags.None,
                                              eventType: "type",
                                              data: new byte[] { 1, 2, 3, 4, 5 },
                                              metadata: new byte[] { 7, 17 });
            long tmp;

            tf.Write(record, out tmp);
            tf.Close();
            db.Dispose();

            Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137,
                            _checkpoint.Read());     //137 is fluff assigned to beginning of checkpoint
            using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) {
                filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin);
                var reader = new BinaryReader(filestream);
                var read   = LogRecord.ReadFrom(reader);
                Assert.AreEqual(record, read);
            }
        }
        public void try_read_returns_false_when_writer_checksum_is_equal_to_reader_checksum()
        {
            var writerchk = new InMemoryCheckpoint();
            var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0);
            var db        = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk));

            db.Open();
            writerchk.Write(12);
            writerchk.Flush();
            chaserchk.Write(12);
            chaserchk.Flush();

            var chaser = new TFChunkChaser(db, writerchk, chaserchk, false);

            chaser.Open();

            ILogRecord record;

            Assert.IsFalse(chaser.TryReadNext(out record));
            Assert.AreEqual(12, chaserchk.Read());

            chaser.Close();
            db.Dispose();
        }
Exemple #11
0
        public void SetUp()
        {
            _writerCheckpoint = new InMemoryCheckpoint();
            _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _writerCheckpoint, new InMemoryCheckpoint(), 1024));
            _db.Open();
            _writer = new TFChunkWriter(_db);
            _writer.Open();
            _record = new PrepareLogRecord(logPosition: 0,
                                           eventId: _eventId,
                                           correlationId: _correlationId,
                                           transactionPosition: 0xDEAD,
                                           transactionOffset: 0xBEEF,
                                           eventStreamId: "WorldEnding",
                                           expectedVersion: 1234,
                                           timeStamp: new DateTime(2012, 12, 21),
                                           flags: PrepareFlags.SingleWrite,
                                           eventType: "type",
                                           data: new byte[] { 1, 2, 3, 4, 5 },
                                           metadata: new byte[] { 7, 17 });
            long newPos;

            _writer.Write(_record, out newPos);
            _writer.Flush();
        }
        public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();

            _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0, chunkSize: 4096));
            _db.Open();

            var chunk = _db.Manager.GetChunk(0);

            _records = new LogRecord[RecordsCount];
            _results = new RecordWriteResult[RecordsCount];

            var pos = 0;

            for (int i = 0; i < RecordsCount; ++i)
            {
                if (i > 0 && i % 3 == 0)
                {
                    pos = i / 3 * _db.Config.ChunkSize;
                    chunk.Complete();
                    chunk = _db.Manager.AddNewChunk();
                }

                _records[i] = LogRecord.SingleWrite(pos,
                                                    Guid.NewGuid(), Guid.NewGuid(), "es1", ExpectedVersion.Any, "et1",
                                                    new byte[1200], new byte[] { 5, 7 });
                _results[i] = chunk.TryAppend(_records[i]);

                pos += _records[i].GetSizeWithLengthPrefixAndSuffix();
            }

            chunk.Flush();
            _db.Config.WriterCheckpoint.Write((RecordsCount / 3) * _db.Config.ChunkSize +
                                              _results[RecordsCount - 1].NewPosition);
            _db.Config.WriterCheckpoint.Flush();
        }
        public void a_record_is_not_written_at_first_but_written_on_second_try()
        {
            var filename1   = GetFilePathFor("chunk-000000.000000");
            var filename2   = GetFilePathFor("chunk-000001.000000");
            var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid());
            var chunkBytes  = chunkHeader.AsByteArray();
            var bytes       = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size];

            Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length);
            File.WriteAllBytes(filename1, bytes);

            _checkpoint = new InMemoryCheckpoint(0);
            var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint()));

            db.Open();
            var  tf = new TFChunkWriter(db);
            long pos;

            var record1 = new PrepareLogRecord(logPosition: 0,
                                               correlationId: _correlationId,
                                               eventId: _eventId,
                                               expectedVersion: 1234,
                                               transactionPosition: 0,
                                               transactionOffset: 0,
                                               eventStreamId: "WorldEnding",
                                               timeStamp: new DateTime(2012, 12, 21),
                                               flags: PrepareFlags.None,
                                               eventType: "type",
                                               data: new byte[] { 1, 2, 3, 4, 5 },
                                               metadata: new byte[8000]);

            Assert.IsTrue(tf.Write(record1, out pos));             // almost fill up first chunk

            var record2 = new PrepareLogRecord(logPosition: pos,
                                               correlationId: _correlationId,
                                               eventId: _eventId,
                                               expectedVersion: 1234,
                                               transactionPosition: pos,
                                               transactionOffset: 0,
                                               eventStreamId: "WorldEnding",
                                               timeStamp: new DateTime(2012, 12, 21),
                                               flags: PrepareFlags.None,
                                               eventType: "type",
                                               data: new byte[] { 1, 2, 3, 4, 5 },
                                               metadata: new byte[8000]);

            Assert.IsFalse(tf.Write(record2, out pos));             // chunk has too small space

            var record3 = new PrepareLogRecord(logPosition: pos,
                                               correlationId: _correlationId,
                                               eventId: _eventId,
                                               expectedVersion: 1234,
                                               transactionPosition: pos,
                                               transactionOffset: 0,
                                               eventStreamId: "WorldEnding",
                                               timeStamp: new DateTime(2012, 12, 21),
                                               flags: PrepareFlags.None,
                                               eventType: "type",
                                               data: new byte[] { 1, 2, 3, 4, 5 },
                                               metadata: new byte[2000]);

            Assert.IsTrue(tf.Write(record3, out pos));
            tf.Close();
            db.Dispose();

            Assert.AreEqual(record3.GetSizeWithLengthPrefixAndSuffix() + 10000, _checkpoint.Read());
            using (var filestream = File.Open(filename2, FileMode.Open, FileAccess.Read)) {
                filestream.Seek(ChunkHeader.Size + sizeof(int), SeekOrigin.Begin);
                var reader = new BinaryReader(filestream);
                var read   = LogRecord.ReadFrom(reader);
                Assert.AreEqual(record3, read);
            }
        }
        public void a_null_chaser_checksum_throws_argument_null_exception()
        {
            var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0));

            Assert.Throws <ArgumentNullException>(() => new TFChunkChaser(db, new InMemoryCheckpoint(), null));
        }