public void SetUp()
 {
     _writerCheckpoint = new InMemoryCheckpoint();
     _db = new TFChunkDb(new TFChunkDbConfig(PathName,
                                             new VersionedPatternFileNamingStrategy(PathName, "chunk-"),
                                             1024,
                                             0,
                                             _writerCheckpoint,
                                             new InMemoryCheckpoint(),
                                             new InMemoryCheckpoint(-1),
                                             new InMemoryCheckpoint(-1)));
     _db.Open();
     _writer = new TFChunkWriter(_db);
     _writer.Open();
     _record = new PrepareLogRecord(logPosition: 0,
                                    eventId: _eventId,
                                    correlationId: _correlationId,
                                    transactionPosition: 0xDEAD,
                                    transactionOffset: 0xBEEF,
                                    eventStreamId: "WorldEnding",
                                    expectedVersion: 1234,
                                    timeStamp: new DateTime(2012, 12, 21),
                                    flags: PrepareFlags.SingleWrite,
                                    eventType: "type",
                                    data: new byte[] { 1, 2, 3, 4, 5 },
                                    metadata: new byte[] {7, 17});
     long newPos;
     _writer.Write(_record, out newPos);
     _writer.Flush();
 }
 protected override void WriteTestScenario()
 {
     WriteStreamCreated("ES");
     _prepare0 = WritePrepare("ES", 0);
     _prepare1 = WritePrepare("ES", 0);
     _prepare2 = WritePrepare("ES", 0);
 }
 public void Setup()
 {
     _record = new PrepareLogRecord(0, _corrId, _eventId, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0),
                                    PrepareFlags.None, "Foo", new byte[12], new byte[15]);
     _chunk = Core.TransactionLog.Chunks.TFChunk.CreateNew(_filename, 4096, 0, 0);
     _result = _chunk.TryAppend(_record);
 }
 protected override void WriteTestScenario()
 {
     _prepare0 = WritePrepare("ES", expectedVersion: -1);
     _prepare1 = WritePrepare("ES", expectedVersion: -1);
     _prepare2 = WritePrepare("ES", expectedVersion: -1);
     WriteCommit(_prepare1.LogPosition, "ES", eventNumber: 0);
 }
 public void Setup()
 {
     var record = new PrepareLogRecord(15556, _corrId, _eventId, 15556, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0),
                                       PrepareFlags.None, "Foo", new byte[12], new byte[15]);
     _chunk = TFChunk.CreateNew(_filename, 20, 0, 0);
     _written = _chunk.TryAppend(record).Success;
 }
 public void a_record_can_be_written()
 {
     var filename = Path.Combine(PathName, "prefix.tf0");
     File.WriteAllBytes(filename, new byte[10000]);
     _checkpoint = new InMemoryCheckpoint(137);
     var tf = new MultifileTransactionFileWriter(new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, _checkpoint, new List<ICheckpoint>()));
     tf.Open();
     var record = new PrepareLogRecord(logPosition: 0,
                                       correlationId: _correlationId,
                                       eventId: _eventId,
                                       expectedVersion: 1234,
                                       transactionPosition: 0,
                                       transactionOffset: 0,
                                       eventStreamId: "WorldEnding",
                                       timeStamp: new DateTime(2012, 12, 21),
                                       flags: PrepareFlags.None,
                                       eventType: "type",
                                       data: new byte[] { 1, 2, 3, 4, 5 },
                                       metadata: new byte[] {7, 17});
     long tmp;
     tf.Write(record, out tmp);
     tf.Flush();
     tf.Close();
     Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); //137 is fluff assigned to beginning of checkpoint
     //TODO actually read the event
     using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read))
     {
         filestream.Seek(137 + sizeof(int), SeekOrigin.Begin);
         var reader = new BinaryReader(filestream);
         var read = LogRecord.ReadFrom(reader);
         Assert.AreEqual(record, read);
     }
 }
 public void a_record_can_be_written()
 {
     _checkpoint = new InMemoryCheckpoint(0);
     var tf = new MultifileTransactionFileWriter(
         new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, _checkpoint, new List<ICheckpoint>()));
     tf.Open();
     var record = new PrepareLogRecord(logPosition: 0,
                                       correlationId: _correlationId,
                                       eventId: _eventId,
                                       transactionPosition: 0,
                                       transactionOffset: 0,
                                       eventStreamId: "WorldEnding",
                                       expectedVersion: 1234,
                                       timeStamp: new DateTime(2012, 12, 21),
                                       flags: PrepareFlags.None,
                                       eventType: "type",
                                       data: new byte[] { 1, 2, 3, 4, 5 },
                                       metadata: new byte[] {7, 17});
     long tmp;
     tf.Write(record, out tmp);
     tf.Close();
     Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), _checkpoint.Read());
     using (var filestream = File.Open(Path.Combine(PathName, "prefix.tf0"), FileMode.Open, FileAccess.Read))
     {
         var reader = new BinaryReader(filestream);
         reader.ReadInt32();
         var read = LogRecord.ReadFrom(reader);
         Assert.AreEqual(record, read);
     }
 }
 protected override void WriteTestScenario()
 {
     WriteStreamCreated("ES");
     _prepare0 = WritePrepare("ES", expectedVersion: 0);
     _prepare1 = WritePrepare("ES", expectedVersion: 1);
     _prepare2 = WritePrepare("ES", expectedVersion: 2);
     WriteCommit(_prepare0.LogPosition, "ES", eventNumber: 1);
 }
 public override void TestFixtureSetUp()
 {
     base.TestFixtureSetUp();
     _record = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0),
                                    PrepareFlags.None, "Foo", new byte[12], new byte[15]);
     _chunk = TFChunk.CreateNew(Filename, 4096, 0, 0, false);
     _result = _chunk.TryAppend(_record);
 }
 public StreamEventAppeared(int eventNumber, PrepareLogRecord prepareLogRecord, long commitPosition)
 {
     EventStreamId = prepareLogRecord.EventStreamId;
     EventNumber = eventNumber;
     EventId = prepareLogRecord.EventId.ToByteArray();
     EventType = prepareLogRecord.EventType;
     Data = prepareLogRecord.Data;
     Metadata = prepareLogRecord.Metadata;
     PreparePosition = prepareLogRecord.LogPosition;
     CommitPosition = commitPosition;
 }
        public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();

            _db = new TFChunkDb(new TFChunkDbConfig(PathName,
                                                    new VersionedPatternFileNamingStrategy(PathName, "chunk-"),
                                                    16 * 1024,
                                                    0,
                                                    new InMemoryCheckpoint(),
                                                    new InMemoryCheckpoint(),
                                                    new InMemoryCheckpoint(-1),
                                                    new InMemoryCheckpoint(-1)));
            _db.Open();
            
            var chunk = _db.Manager.GetChunkFor(0);

            _p1 = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                          new byte[] { 0, 1, 2 }, new byte[] { 5, 7 });
            _res1 = chunk.TryAppend(_p1);

            _c1 = LogRecord.Commit(_res1.NewPosition, Guid.NewGuid(), _p1.LogPosition, 0);
            _cres1 = chunk.TryAppend(_c1);

            _p2 = LogRecord.SingleWrite(_cres1.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                        new byte[] { 0, 1, 2 }, new byte[] { 5, 7 });
            _res2 = chunk.TryAppend(_p2);

            _c2 = LogRecord.Commit(_res2.NewPosition, Guid.NewGuid(), _p2.LogPosition, 1);
            _cres2 = chunk.TryAppend(_c2);
            
            _p3 = LogRecord.SingleWrite(_cres2.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                        new byte[] { 0, 1, 2 }, new byte[] { 5, 7 });
            _res3 = chunk.TryAppend(_p3);

            _c3 = LogRecord.Commit(_res3.NewPosition, Guid.NewGuid(), _p3.LogPosition, 2);
            _cres3 = chunk.TryAppend(_c3);

            chunk.Complete();

            _db.Config.WriterCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition);
            _db.Config.WriterCheckpoint.Flush();
            _db.Config.ChaserCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition);
            _db.Config.ChaserCheckpoint.Flush();

            var bus = new InMemoryBus("Bus");
            var ioDispatcher = new IODispatcher(bus, new PublishEnvelope(bus));
            var scavenger = new TFChunkScavenger(_db, ioDispatcher, new FakeTableIndex(),
                                                 new FakeReadIndex(x => x == "es-to-scavenge"), Guid.NewGuid(), "fakeNodeIp");
            scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: false);

            _scavengedChunk = _db.Manager.GetChunk(0);
        }
 public void Setup()
 {
     _record = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0),
                                    PrepareFlags.None, "Foo", new byte[12], new byte[15]);
     _chunk = TFChunk.CreateNew(_filename, 4096, 0, 0);
     _result = _chunk.TryAppend(_record);
     _chunk.Flush();
     _chunk.Complete();
     _cachedChunk = TFChunk.FromCompletedFile(_filename, verifyHash: true);
     _cachedChunk.CacheInMemory();
 }
        private CommitLogRecord WriteDeleteCommit(PrepareLogRecord prepare)
        {
            long pos;
            var commit = LogRecord.Commit(WriterChecksum.ReadNonFlushed(),
                                          prepare.CorrelationId,
                                          prepare.LogPosition,
                                          EventNumber.DeletedStream);
            Assert.IsTrue(Writer.Write(commit, out pos));

            return commit;
        }
 public void a_record_can_be_written()
 {
     var filename = Path.Combine(PathName, "prefix.tf0");
     var secondfilename = Path.Combine(PathName, "prefix.tf1");
     var thirdfilename = Path.Combine(PathName, "prefix.tf2");
     File.WriteAllBytes(filename, new byte[50]);
     _checkpoint = new InMemoryCheckpoint(40);
     var tf = new MultifileTransactionFileWriter(
         new TransactionFileDatabaseConfig(PathName, "prefix.tf", 50, _checkpoint, new ICheckpoint[0]));
     tf.Open();
     var record = new PrepareLogRecord(logPosition: 0,
                                       correlationId: _correlationId,
                                       eventId: _eventId,
                                       transactionPosition: 0,
                                       transactionOffset: 0,
                                       eventStreamId: "WorldEnding",
                                       expectedVersion: 1234,
                                       timeStamp: new DateTime(2012, 12, 21),
                                       flags: PrepareFlags.None,
                                       eventType: "type",
                                       data: new byte[] { 1, 2, 3, 4, 5 },
                                       metadata: new byte[] {7, 17});
     long tmp;
     tf.Write(record, out tmp);
     tf.Close();
     Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 40, _checkpoint.Read()); //+orginal checkpoint position
     Assert.IsTrue(File.Exists(secondfilename));
     var stream = new MemoryStream();
     var buffer = new byte[256];
     using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read))
     {
         filestream.Seek(40, SeekOrigin.Begin);
         filestream.Read(buffer, 0, 10);
         stream.Write(buffer, 0, 10);
     }
     using (var filestream = File.Open(secondfilename, FileMode.Open, FileAccess.Read))
     {
         filestream.Seek(0, SeekOrigin.Begin);
         filestream.Read(buffer, 0, 50);
         stream.Write(buffer, 0, 50);
     }
     using (var filestream = File.Open(thirdfilename, FileMode.Open, FileAccess.Read))
     {
         filestream.Seek(0, SeekOrigin.Begin);
         filestream.Read(buffer, 0, 50);
         stream.Write(buffer, 0, 50);
     }
     stream.Seek(0 + sizeof(int), SeekOrigin.Begin);
     var reader = new BinaryReader(stream);
     var read = LogRecord.ReadFrom(reader);
     Assert.AreEqual(record, read);
 }
 public override void TestFixtureSetUp()
 {
     base.TestFixtureSetUp();
     _record = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0),
                                    PrepareFlags.None, "Foo", new byte[12], new byte[15]);
     _chunk = TFChunk.CreateNew(Filename, 4096, 0, false);
     _result = _chunk.TryAppend(_record);
     _chunk.Flush();
     _chunk.Complete();
     _uncachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true);
     _uncachedChunk.CacheInMemory();
     _uncachedChunk.UnCacheFromMemory();
 }
        public void a_record_can_be_written()
        {
            var filename = Path.Combine(PathName, "prefix.tf0");
            var chunkHeader = new ChunkHeader(1, 10000, 0, 0, 0);
            var chunkBytes = chunkHeader.AsByteArray();
            var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize];
            Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length);
            File.WriteAllBytes(filename, buf);

            _checkpoint = new InMemoryCheckpoint(137);
            var db = new TFChunkDb(new TFChunkDbConfig(PathName,
                                                       new PrefixFileNamingStrategy(PathName, "prefix.tf"),
                                                       chunkHeader.ChunkSize,
                                                       0,
                                                       _checkpoint,
                                                       new ICheckpoint[0]));
            db.OpenVerifyAndClean();

            var writer = new TFChunkWriter(db);
            var record = new PrepareLogRecord(logPosition: 0,
                                              correlationId: _correlationId,
                                              eventId: _eventId,
                                              transactionPosition: 0,
                                              eventStreamId: "WorldEnding",
                                              expectedVersion: 1234,
                                              timeStamp: new DateTime(2012, 12, 21),
                                              flags: PrepareFlags.None,
                                              eventType: "type",
                                              data: new byte[8000],
                                              metadata: new byte[] { 7, 17 });

            Console.WriteLine(record.GetSizeWithLengthPrefix());
            Console.WriteLine(record.GetSizeWithLengthPrefix() + 137);

            long pos;
            Assert.IsTrue(writer.Write(record, out pos));
            writer.Close();
            db.Dispose();

            Assert.AreEqual(record.GetSizeWithLengthPrefix() + 137, _checkpoint.Read());
            using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read))
            {
                filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin);
                var reader = new BinaryReader(filestream);
                var read = LogRecord.ReadFrom(reader);

                Console.WriteLine(string.Join("\n", Directory.EnumerateFiles(PathName)));

                Assert.AreEqual(record, read);
            }
        }
        public void a_record_can_be_written()
        {
            var filename = GetFilePathFor("chunk-000000.000000");
            var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid());
            var chunkBytes = chunkHeader.AsByteArray();
            var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize];
            Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length);
            File.WriteAllBytes(filename, buf);

            _checkpoint = new InMemoryCheckpoint(137);
            var db = new TFChunkDb(new TFChunkDbConfig(PathName,
                                                       new VersionedPatternFileNamingStrategy(PathName, "chunk-"),
                                                       chunkHeader.ChunkSize,
                                                       0,
                                                       _checkpoint,
                                                       new InMemoryCheckpoint(),
                                                       new InMemoryCheckpoint(-1),
                                                       new InMemoryCheckpoint(-1)));
            db.Open();

            var bytes = new byte[3994]; // this gives exactly 4097 size of record, with 3993 (rec size 4096) everything works fine!
            new Random().NextBytes(bytes);
            var writer = new TFChunkWriter(db);
            var record = new PrepareLogRecord(logPosition: 137,
                                              correlationId: _correlationId,
                                              eventId: _eventId,
                                              transactionPosition: 789,
                                              transactionOffset: 543,
                                              eventStreamId: "WorldEnding",
                                              expectedVersion: 1234,
                                              timeStamp: new DateTime(2012, 12, 21),
                                              flags: PrepareFlags.SingleWrite,
                                              eventType: "type",
                                              data: bytes, 
                                              metadata: new byte[] { 0x07, 0x17 });

            long pos;
            Assert.IsTrue(writer.Write(record, out pos));
            writer.Close();
            db.Dispose();

            Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read());
            using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read))
            {
                filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin);
                var reader = new BinaryReader(filestream);
                var read = LogRecord.ReadFrom(reader);
                Assert.AreEqual(record, read);
            }
        }
        public void try_read_returns_record_when_writerchecksum_ahead()
        {
            var recordToWrite = new PrepareLogRecord(logPosition: 0,
                                                     correlationId: _correlationId,
                                                     eventId: _eventId,
                                                     transactionPosition: 0,
                                                     transactionOffset: 0,
                                                     eventStreamId: "WorldEnding",
                                                     expectedVersion: 1234,
                                                     timeStamp: new DateTime(2012, 12, 21),
                                                     flags: PrepareFlags.None,
                                                     eventType: "type",
                                                     data: new byte[] { 1, 2, 3, 4, 5 },
                                                     metadata: new byte[] { 7, 17 });

            using (var fs = new FileStream(GetFilePathFor("chunk-000000.000000"), FileMode.CreateNew, FileAccess.Write))
            {
                fs.SetLength(ChunkHeader.Size + ChunkFooter.Size + 10000);
                var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid()).AsByteArray();
                var writer = new BinaryWriter(fs);
                writer.Write(chunkHeader);
                recordToWrite.WriteWithLengthPrefixAndSuffixTo(writer);
                fs.Close();
            }
            
            var writerchk = new InMemoryCheckpoint(128);
            var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0);
            var db = new TFChunkDb(new TFChunkDbConfig(PathName,
                                                       new VersionedPatternFileNamingStrategy(PathName, "chunk-"),
                                                       10000,
                                                       0,
                                                       writerchk,
                                                       chaserchk,
                                                       new InMemoryCheckpoint(-1),
                                                       new InMemoryCheckpoint(-1)));
            db.Open();

            var chaser = new TFChunkChaser(db, writerchk, chaserchk);
            chaser.Open();

            LogRecord record;
            var recordRead = chaser.TryReadNext(out record);
            chaser.Close();

            Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read());
            Assert.IsTrue(recordRead);
            Assert.AreEqual(recordToWrite, record);

            db.Close();
        }
        protected override void WriteTestScenario()
        {
            _id1 = Guid.NewGuid();
            _id2 = Guid.NewGuid();

            long pos1, pos2, pos3, pos4;
            _prepare1 = new PrepareLogRecord(0, _id1, _id1, 0, 0, "test1", ExpectedVersion.NoStream, DateTime.UtcNow,
                                             PrepareFlags.SingleWrite, "type", new byte[0], new byte[0]);
            Writer.Write(_prepare1, out pos1);
            _prepare2 = new PrepareLogRecord(pos1, _id2, _id2, pos1, 0, "test1", 0, DateTime.UtcNow,
                                             PrepareFlags.SingleWrite, "type", new byte[0], new byte[0]);
            Writer.Write(_prepare2, out pos2);
            Writer.Write(new CommitLogRecord(pos2, _id1, 0, DateTime.UtcNow, 0), out pos3);
            Writer.Write(new CommitLogRecord(pos3, _id2, pos1, DateTime.UtcNow, 1), out pos4);
        }
        public void Setup()
        {
            _prepare1 = new PrepareLogRecord(0, _corrId, _eventId, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0),
                                             PrepareFlags.None, "Foo", new byte[12], new byte[15]);
            _prepare2 = new PrepareLogRecord(0, _corrId, _eventId, 0, "test2", 2, new DateTime(2000, 1, 1, 12, 0, 0),
                                             PrepareFlags.None, "Foo2", new byte[12], new byte[15]);

            _chunk = TFChunk.CreateNew(_filename, 4096, 0, 0);
            var r1 = _chunk.TryAppend(_prepare1);
            _written1 = r1.Success;
            _position1 = r1.OldPosition;
            var r2 = _chunk.TryAppend(_prepare2);
            _written2 = r2.Success;
            _position2 = r2.OldPosition;
            _chunk.Flush();
        }
        public void a_record_can_be_written()
        {
            var filename = GetFilePathFor("prefix.tf0");
            var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false);
            var chunkBytes = chunkHeader.AsByteArray();
            var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size];
            Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length);
            File.WriteAllBytes(filename, bytes);

            _checkpoint = new InMemoryCheckpoint(137);
            var db = new TFChunkDb(new TFChunkDbConfig(PathName,
                                                       new PrefixFileNamingStrategy(PathName, "prefix.tf"),
                                                       10000,
                                                       0,
                                                       _checkpoint,
                                                       new InMemoryCheckpoint(),
                                                       new ICheckpoint[0]));
            db.OpenVerifyAndClean();
            var tf = new TFChunkWriter(db);
            var record = new PrepareLogRecord(logPosition: 0,
                                              correlationId: _correlationId,
                                              eventId: _eventId,
                                              expectedVersion: 1234,
                                              transactionPosition: 0,
                                              transactionOffset: 0,
                                              eventStreamId: "WorldEnding",
                                              timeStamp: new DateTime(2012, 12, 21),
                                              flags: PrepareFlags.None,
                                              eventType: "type",
                                              data: new byte[] { 1, 2, 3, 4, 5 },
                                              metadata: new byte[] { 7, 17 });
            long tmp;
            tf.Write(record, out tmp);
            //tf.Flush();
            tf.Close();
            db.Dispose();

            Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); //137 is fluff assigned to beginning of checkpoint
            //TODO actually read the event
            using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read))
            {
                filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin);
                var reader = new BinaryReader(filestream);
                var read = LogRecord.ReadFrom(reader);
                Assert.AreEqual(record, read);
            }
        }
        protected override void WriteTestScenario()
        {
            WriteSingleEvent("ES", 0, new string('.', 3000)); // chunk 1
            WriteSingleEvent("ES", 1, new string('.', 3000));
            WriteSingleEvent("ES", 2, new string('.', 3000));

            WriteSingleEvent("ES", 3, new string('.', 3000), retryOnFail: true); // chunk 2
            WriteSingleEvent("ES", 4, new string('.', 3000));

            _event7prepare = WriteDeletePrepare("ES");
            _event7commit = WriteDeleteCommit(_event7prepare);
            _event7 = new EventRecord(EventNumber.DeletedStream, _event7prepare);

            _event9 = WriteSingleEvent("ES2", 0, new string('.', 5000), retryOnFail: true); //chunk 3

            Scavenge(completeLast: false, mergeChunks: false);
        }
        public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();

            _db = new TFChunkDb(new TFChunkDbConfig(PathName,
                                                    new VersionedPatternFileNamingStrategy(PathName, "chunk-"),
                                                    16 * 1024,
                                                    0,
                                                    new InMemoryCheckpoint(),
                                                    new InMemoryCheckpoint(), 
                                                    new ICheckpoint[0]));
            _db.OpenVerifyAndClean();
            
            var chunk = _db.Manager.GetChunk(0);

            _p1 = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                          new byte[] { 0, 1, 2 }, new byte[] { 5, 7 });
            _res1 = chunk.TryAppend(_p1);

            _c1 = LogRecord.Commit(_res1.NewPosition, Guid.NewGuid(), _p1.LogPosition, 0);
            _cres1 = chunk.TryAppend(_c1);

            _p2 = LogRecord.SingleWrite(_cres1.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                        new byte[] { 0, 1, 2 }, new byte[] { 5, 7 });
            _res2 = chunk.TryAppend(_p2);

            _c2 = LogRecord.Commit(_res2.NewPosition, Guid.NewGuid(), _p2.LogPosition, 1);
            _cres2 = chunk.TryAppend(_c2);
            
            _p3 = LogRecord.SingleWrite(_cres2.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                        new byte[] { 0, 1, 2 }, new byte[] { 5, 7 });
            _res3 = chunk.TryAppend(_p3);

            _c3 = LogRecord.Commit(_res3.NewPosition, Guid.NewGuid(), _p3.LogPosition, 2);
            _cres3 = chunk.TryAppend(_c3);

            chunk.Complete();

            var scavenger = new TFChunkScavenger(_db, new FakeReadIndex(x => x == "es-to-scavenge"));
            scavenger.Scavenge(alwaysKeepScavenged: true);

            _scavengedChunk = _db.Manager.GetChunk(0);
        }
        public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();
            _chunk = TFChunk.CreateNew(Filename, 4096, 0, 0, false);

            _prepare1 = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0),
                                             PrepareFlags.None, "Foo", new byte[12], new byte[15]);
            var r1 = _chunk.TryAppend(_prepare1);
            _written1 = r1.Success;
            _position1 = r1.OldPosition;

            _prepare2 = new PrepareLogRecord(r1.NewPosition, _corrId, _eventId, 0, 0, "test2", 2, new DateTime(2000, 1, 1, 12, 0, 0),
                                             PrepareFlags.None, "Foo2", new byte[12], new byte[15]);
            var r2 = _chunk.TryAppend(_prepare2);
            _written2 = r2.Success;
            _position2 = r2.OldPosition;
            _chunk.Flush();
        }
        public void can_read_a_record_straddling_multiple_files()
        {
            var writerchk = new InMemoryCheckpoint(20020);
            var readerchk = new InMemoryCheckpoint("reader", 9990);
            var config = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, writerchk, new[] {readerchk});
            var recordToWrite = new PrepareLogRecord(logPosition: 0,
                                                     correlationId: _correlationId,
                                                     eventId: _eventId,
                                                     transactionPosition: 0,
                                                     transactionOffset: 0,
                                                     eventStreamId: "WorldEnding",
                                                     expectedVersion: 1234,
                                                     timeStamp: new DateTime(2012, 12, 21),
                                                     flags: PrepareFlags.None,
                                                     eventType: "type",
                                                     data: new byte[] { 1, 2, 3, 4, 5 },
                                                     metadata: new byte[] {7, 17});
            var memstream = new MemoryStream();
            var writer = new BinaryWriter(memstream);
            recordToWrite.WriteWithLengthPrefixAndSuffixTo(writer);
            var buf = memstream.GetBuffer();
            using (var fs = new FileStream(config.FileNamingStrategy.GetFilenameFor(0), FileMode.CreateNew, FileAccess.Write))
            {
                fs.Seek(9990, SeekOrigin.Begin);
                fs.Write(buf, 0, 10);
                fs.Close();
            }
            using (var fs = new FileStream(config.FileNamingStrategy.GetFilenameFor(1), FileMode.CreateNew, FileAccess.Write))
            {
                fs.Seek(0, SeekOrigin.Begin);
                fs.Write(buf, 10, recordToWrite.GetSizeWithLengthPrefixAndSuffix() - 10);
                fs.Close();
            }

            var reader = new MultifileTransactionFileChaser(config, readerchk);
            reader.Open();
            LogRecord record;
            var readRecord = reader.TryReadNext(out record);
            reader.Close();

            Assert.IsTrue(readRecord);
            Assert.That(recordToWrite, Is.EqualTo(record));
            Assert.AreEqual(9990 + recordToWrite.GetSizeWithLengthPrefixAndSuffix(), readerchk.Read());
        }
        public void a_record_can_be_written()
        {
            _checkpoint = new InMemoryCheckpoint(0);
            var db = new TFChunkDb(new TFChunkDbConfig(PathName,
                                                       new VersionedPatternFileNamingStrategy(PathName, "chunk-"),
                                                       1000,
                                                       0,
                                                       _checkpoint,
                                                       new InMemoryCheckpoint(),
                                                       new InMemoryCheckpoint(-1),
                                                       new InMemoryCheckpoint(-1)));
            db.Open();
            var tf = new TFChunkWriter(db);
            tf.Open();
            var record = new PrepareLogRecord(logPosition: 0,
                                              correlationId: _correlationId,
                                              eventId: _eventId,
                                              transactionPosition: 0,
                                              transactionOffset: 0,
                                              eventStreamId: "WorldEnding",
                                              expectedVersion: 1234,
                                              timeStamp: new DateTime(2012, 12, 21),
                                              flags: PrepareFlags.None,
                                              eventType: "type",
                                              data: new byte[] { 1, 2, 3, 4, 5 },
                                              metadata: new byte[] { 7, 17 });
            long tmp;
            tf.Write(record, out tmp);
            tf.Close();
            db.Dispose();

            Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), _checkpoint.Read());
            using (var filestream = File.Open(GetFilePathFor("chunk-000000.000000"), FileMode.Open, FileAccess.Read))
            {
                filestream.Position = ChunkHeader.Size;

                var reader = new BinaryReader(filestream);
                reader.ReadInt32();
                var read = LogRecord.ReadFrom(reader);
                Assert.AreEqual(record, read);
            }
        }
        protected override void WriteTestScenario()
        {
            _event1 = WriteStreamCreated("ES");                                            // chunk 1

            _event2 = WriteSingleEvent("ES", 1, new string('.', 3000));
            _event3 = WriteSingleEvent("ES", 2, new string('.', 3000));
            _event4 = WriteSingleEvent("ES", 3, new string('.', 3000));

            _event5 = WriteSingleEvent("ES", 4, new string('.', 3000), retryOnFail: true); // chunk 2
            _event6 = WriteSingleEvent("ES", 5, new string('.', 3000));

            _event7prepare = WriteDeletePrepare("ES");
            _event7commit = WriteDeleteCommit(_event7prepare);
            _event7 = new EventRecord(EventNumber.DeletedStream, _event7prepare);

            _event8 = WriteStreamCreated("ES2");
            _event9 = WriteSingleEvent("ES2", 1, new string('.', 5000), retryOnFail: true); //chunk 3

            Scavenge(completeLast: false);
        }
Beispiel #28
0

        
        public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();
            _chunk = TFChunkHelper.CreateNewChunk(Filename);

            _prepare1 = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0),
                                             PrepareFlags.None, "Foo", new byte[12], new byte[15]);
            var r1 = _chunk.TryAppend(_prepare1);

            _written1  = r1.Success;
            _position1 = r1.OldPosition;

            _prepare2 = new PrepareLogRecord(r1.NewPosition, _corrId, _eventId, 0, 0, "test2", 2,
                                             new DateTime(2000, 1, 1, 12, 0, 0),
                                             PrepareFlags.None, "Foo2", new byte[12], new byte[15]);
            var r2 = _chunk.TryAppend(_prepare2);

            _written2  = r2.Success;
            _position2 = r2.OldPosition;
            _chunk.Flush();
        }
Beispiel #30
0
        public void try_read_returns_record_when_writerchecksum_equal()
        {
            var writerchk = new InMemoryCheckpoint(0);
            var readerchk = new InMemoryCheckpoint("reader", 0);
            var config    = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, writerchk,
                                                              new List <ICheckpoint> {
                readerchk
            });
            var recordToWrite = new PrepareLogRecord(logPosition: 0,
                                                     correlationId: _correlationId,
                                                     eventId: _eventId,
                                                     transactionPosition: 0,
                                                     eventStreamId: "WorldEnding",
                                                     expectedVersion: 1234,
                                                     timeStamp: new DateTime(2012, 12, 21),
                                                     flags: PrepareFlags.None,
                                                     eventType: "type",
                                                     data: new byte[] { 1, 2, 3, 4, 5 },
                                                     metadata: new byte[] { 7, 17 });

            using (var fs = new FileStream(Path.Combine(PathName, "prefix.tf0"), FileMode.CreateNew, FileAccess.Write))
            {
                var writer = new BinaryWriter(fs);
                recordToWrite.WriteWithLengthPrefixTo(writer);
                fs.Close();
            }
            writerchk.Write(recordToWrite.GetSizeWithLengthPrefix());

            var reader = new MultifileTransactionFileChaser(config, "reader");

            reader.Open();
            LogRecord record     = null;
            var       readRecord = reader.TryReadNext(out record);

            reader.Close();

            Assert.IsTrue(readRecord);
            Assert.AreEqual(record.GetSizeWithLengthPrefix(), readerchk.Read());
            Assert.AreEqual(recordToWrite, record);
        }
        public override void When()
        {
            _eventId       = Guid.NewGuid();
            _transactionId = Guid.NewGuid();
            var record = new PrepareLogRecord(logPosition: 0,
                                              eventId: _eventId,
                                              correlationId: _transactionId,
                                              transactionPosition: 0xDEAD,
                                              transactionOffset: 0xBEEF,
                                              eventStreamId: "WorldEnding",
                                              expectedVersion: 1234,
                                              timeStamp: new DateTime(2012, 12, 21),
                                              flags: PrepareFlags.SingleWrite,
                                              eventType: "type",
                                              data: new byte[] { 1, 2, 3, 4, 5 },
                                              metadata: new byte[] { 7, 17 });



            Assert.True(Writer.Write(record, out _));
            Writer.Flush();
        }
Beispiel #32
0
        private StreamMetadata GetStreamMetadataUncached(TFReaderLease reader, string streamId)
        {
            var metastreamId    = SystemStreams.MetastreamOf(streamId);
            var metaEventNumber = GetStreamLastEventNumberCached(reader, metastreamId);

            if (metaEventNumber == ExpectedVersion.NoStream || metaEventNumber == EventNumber.DeletedStream)
            {
                return(StreamMetadata.Empty);
            }

            PrepareLogRecord prepare = ReadPrepareInternal(reader, metastreamId, metaEventNumber);

            if (prepare == null)
            {
                throw new Exception(string.Format("ReadPrepareInternal could not find metaevent #{0} on metastream '{1}'. "
                                                  + "That should never happen.", metaEventNumber, metastreamId));
            }

            if (prepare.Data.Length == 0 || prepare.Flags.HasNoneOf(PrepareFlags.IsJson))
            {
                return(StreamMetadata.Empty);
            }

            try
            {
                var metadata = StreamMetadata.FromJsonBytes(prepare.Data);
                if (prepare.Version == LogRecordVersion.LogRecordV0 && metadata.TruncateBefore == int.MaxValue)
                {
                    metadata = new StreamMetadata(metadata.MaxCount, metadata.MaxAge, EventNumber.DeletedStream,
                                                  metadata.TempStream, metadata.CacheControl, metadata.Acl);
                }
                return(metadata);
            }
            catch (Exception)
            {
                return(StreamMetadata.Empty);
            }
        }
Beispiel #34
0
        public CommitCheckResult CheckCommitStartingAt(long transactionPosition, long commitPosition)
        {
            string streamId;
            long   expectedVersion;

            using (var reader = _indexBackend.BorrowReader())
            {
                try
                {
                    PrepareLogRecord prepare = GetPrepare(reader, transactionPosition);
                    if (prepare == null)
                    {
                        Log.Error("Could not read first prepare of to-be-committed transaction. "
                                  + "Transaction pos: {transactionPosition}, commit pos: {commitPosition}.",
                                  transactionPosition, commitPosition);
                        var message = String.Format("Could not read first prepare of to-be-committed transaction. "
                                                    + "Transaction pos: {0}, commit pos: {1}.",
                                                    transactionPosition, commitPosition);
                        throw new InvalidOperationException(message);
                    }
                    streamId        = prepare.EventStreamId;
                    expectedVersion = prepare.ExpectedVersion;
                }
                catch (InvalidOperationException)
                {
                    return(new CommitCheckResult(CommitDecision.InvalidTransaction, string.Empty, -1, -1, -1, false));
                }
            }

            // we should skip prepares without data, as they don't mean anything for idempotency
            // though we have to check deletes, otherwise they always will be considered idempotent :)
            var eventIds = from prepare in GetTransactionPrepares(transactionPosition, commitPosition)
                           where prepare.Flags.HasAnyOf(PrepareFlags.Data | PrepareFlags.StreamDelete)
                           select prepare.EventId;

            return(CheckCommit(streamId, expectedVersion, eventIds));
        }
Beispiel #35
0
        protected override void WriteTestScenario()
        {
            var t1 = WriteTransactionBeginV0(Guid.NewGuid(), WriterCheckpoint.ReadNonFlushed(), _streamIdOne,
                                             ExpectedVersion.NoStream);
            var t2 = WriteTransactionBeginV0(Guid.NewGuid(), WriterCheckpoint.ReadNonFlushed(), _streamIdTwo,
                                             ExpectedVersion.NoStream);

            _p1 = WriteTransactionEventV0(t1.CorrelationId, WriterCheckpoint.ReadNonFlushed(), t1.LogPosition, 0,
                                          t1.EventStreamId, 0, "es1", PrepareFlags.Data);
            _p2 = WriteTransactionEventV0(t2.CorrelationId, WriterCheckpoint.ReadNonFlushed(), t2.LogPosition, 0,
                                          t2.EventStreamId, 0, "abc1", PrepareFlags.Data);
            _p3 = WriteTransactionEventV0(t1.CorrelationId, WriterCheckpoint.ReadNonFlushed(), t1.LogPosition, 1,
                                          t1.EventStreamId, 1, "es1", PrepareFlags.Data);
            _p4 = WriteTransactionEventV0(t2.CorrelationId, WriterCheckpoint.ReadNonFlushed(), t2.LogPosition, 1,
                                          t2.EventStreamId, 1, "abc1", PrepareFlags.Data);
            _p5 = WriteTransactionEventV0(t1.CorrelationId, WriterCheckpoint.ReadNonFlushed(), t1.LogPosition, 2,
                                          t1.EventStreamId, 2, "es1", PrepareFlags.Data);

            WriteTransactionEndV0(t2.CorrelationId, WriterCheckpoint.ReadNonFlushed(), t2.TransactionPosition,
                                  t2.EventStreamId);
            WriteTransactionEndV0(t1.CorrelationId, WriterCheckpoint.ReadNonFlushed(), t1.TransactionPosition,
                                  t1.EventStreamId);

            _t2CommitPos = WriteCommitV0(t2.CorrelationId, WriterCheckpoint.ReadNonFlushed(), t2.TransactionPosition,
                                         t2.EventStreamId, 0, out _postCommitPos);
            _t1CommitPos = WriteCommitV0(t1.CorrelationId, WriterCheckpoint.ReadNonFlushed(), t1.TransactionPosition,
                                         t1.EventStreamId, 0, out _postCommitPos);

            Writer.CompleteChunk();

            // Need to have a second chunk as otherwise the checkpoints will be off
            _random1 = WriteSingleEventWithLogVersion0(Guid.NewGuid(), "random-stream",
                                                       WriterCheckpoint.ReadNonFlushed(), 0);

            Scavenge(completeLast: false, mergeChunks: true);
        }
Beispiel #36
0
        public void a_record_can_be_written()
        {
            var filename = Path.Combine(PathName, "prefix.tf0");

            File.WriteAllBytes(filename, new byte[10000]);
            _checkpoint = new InMemoryCheckpoint(137);
            var tf = new MultifileTransactionFileWriter(new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, _checkpoint, new List <ICheckpoint>()));

            tf.Open();
            var record = new PrepareLogRecord(logPosition: 0,
                                              correlationId: _correlationId,
                                              eventId: _eventId,
                                              expectedVersion: 1234,
                                              transactionPosition: 0,
                                              transactionOffset: 0,
                                              eventStreamId: "WorldEnding",
                                              timeStamp: new DateTime(2012, 12, 21),
                                              flags: PrepareFlags.None,
                                              eventType: "type",
                                              data: new byte[] { 1, 2, 3, 4, 5 },
                                              metadata: new byte[] { 7, 17 });
            long tmp;

            tf.Write(record, out tmp);
            tf.Flush();
            tf.Close();
            Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); //137 is fluff assigned to beginning of checkpoint
            //TODO actually read the event
            using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read))
            {
                filestream.Seek(137 + sizeof(int), SeekOrigin.Begin);
                var reader = new BinaryReader(filestream);
                var read   = LogRecord.ReadFrom(reader);
                Assert.AreEqual(record, read);
            }
        }
Beispiel #37
0
        public void SetUp()
        {
            _writerCheckpoint = new InMemoryCheckpoint();
            _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _writerCheckpoint, new InMemoryCheckpoint(), 1024));
            _db.Open();
            _writer = new TFChunkWriter(_db);
            _writer.Open();
            _record = new PrepareLogRecord(logPosition: 0,
                                           eventId: _eventId,
                                           correlationId: _correlationId,
                                           transactionPosition: 0xDEAD,
                                           transactionOffset: 0xBEEF,
                                           eventStreamId: "WorldEnding",
                                           expectedVersion: 1234,
                                           timeStamp: new DateTime(2012, 12, 21),
                                           flags: PrepareFlags.SingleWrite,
                                           eventType: "type",
                                           data: new byte[] { 1, 2, 3, 4, 5 },
                                           metadata: new byte[] { 7, 17 });
            long newPos;

            _writer.Write(_record, out newPos);
            _writer.Flush();
        }
        private bool ShouldKeepPrepare(PrepareLogRecord prepare, Dictionary<long, CommitInfo> commits, long chunkStart, long chunkEnd)
        {
            CommitInfo commitInfo;
            bool isCommitted = commits.TryGetValue(prepare.TransactionPosition, out commitInfo)
                               || prepare.Flags.HasAnyOf(PrepareFlags.IsCommitted);

            if (prepare.Flags.HasAnyOf(PrepareFlags.StreamDelete))
            {
                if(_unsafeIgnoreHardDeletes) {
                    Log.Info("Removing hard deleted stream tombstone for stream {0} at position {1}", prepare.EventStreamId, prepare.TransactionPosition);
                    commitInfo.TryNotToKeep();
                } else {
                    commitInfo.ForciblyKeep();
                }
                return !_unsafeIgnoreHardDeletes;
            }

            if (!isCommitted && prepare.Flags.HasAnyOf(PrepareFlags.TransactionBegin))
            {
                // So here we have prepare which commit is in the following chunks or prepare is not committed at all.
                // Now, whatever heuristic on prepare scavenge we use, we should never delete the very first prepare
                // in transaction, as in some circumstances we need it.
                // For instance, this prepare could be part of ongoing transaction and though we sometimes can determine
                // that prepare wouldn't ever be needed (e.g., stream was deleted, $maxAge or $maxCount rule it out)
                // we still need the first prepare to find out StreamId for possible commit in StorageWriterService.WriteCommit method.
                // There could be other reasons where it is needed, so we just safely filter it out to not bother further.
                return true;
            }

            var lastEventNumber = _readIndex.GetStreamLastEventNumber(prepare.EventStreamId);
            if (lastEventNumber == EventNumber.DeletedStream)
            {
                // When all prepares and commit of transaction belong to single chunk and the stream is deleted,
                // we can safely delete both prepares and commit.
                // Even if this prepare is not committed, but its stream is deleted, then as long as it is
                // not TransactionBegin prepare we can remove it, because any transaction should fail either way on commit stage.
                commitInfo.TryNotToKeep();
                return false;
            }

            if (!isCommitted)
            {
                // If we could somehow figure out (from read index) the event number of this prepare
                // (if it is actually committed, but commit is in another chunk) then we can apply same scavenging logic.
                // Unfortunately, if it is not committed prepare we can say nothing for now, so should conservatively keep it.
                return true;
            }

            if (prepare.Flags.HasNoneOf(PrepareFlags.Data))
            {
                // We encountered system prepare with no data. As of now it can appear only in explicit
                // transactions so we can safely remove it. The performance shouldn't hurt, because
                // TransactionBegin prepare is never needed either way and TransactionEnd should be in most
                // circumstances close to commit, so shouldn't hurt performance too much.
                // The advantage of getting rid of system prepares is ability to completely eliminate transaction
                // prepares and commit, if transaction events are completely ruled out by $maxAge/$maxCount.
                // Otherwise we'd have to either keep prepare not requiring to keep commit, which could leave
                // this prepare as never discoverable garbage, or we could insist on keeping commit forever
                // even if all events in transaction are scavenged.
                commitInfo.TryNotToKeep();
                return false;
            }

            if (IsSoftDeletedTempStreamWithinSameChunk(prepare.EventStreamId, chunkStart, chunkEnd))
            {
                commitInfo.TryNotToKeep();
                return false;
            }

            var eventNumber = prepare.Flags.HasAnyOf(PrepareFlags.IsCommitted)
                                      ? prepare.ExpectedVersion + 1 // IsCommitted prepares always have explicit expected version
                                      : commitInfo.EventNumber + prepare.TransactionOffset;

            if (!KeepOnlyFirstEventOfDuplicate(_tableIndex, prepare, eventNumber)){
                commitInfo.TryNotToKeep();
                return false;
            }

            // We should always physically keep the very last prepare in the stream.
            // Otherwise we get into trouble when trying to resolve LastStreamEventNumber, for instance.
            // That is because our TableIndex doesn't keep EventStreamId, only hash of it, so on doing some operations
            // that needs TableIndex, we have to make sure we have prepare records in TFChunks when we need them.
            if (eventNumber >= lastEventNumber)
            {
                // Definitely keep commit, otherwise current prepare wouldn't be discoverable.
                commitInfo.ForciblyKeep();
                return true;
            }

            var meta = _readIndex.GetStreamMetadata(prepare.EventStreamId);
            bool canRemove = (meta.MaxCount.HasValue && eventNumber < lastEventNumber - meta.MaxCount.Value + 1)
                          || (meta.TruncateBefore.HasValue && eventNumber < meta.TruncateBefore.Value)
                          || (meta.MaxAge.HasValue && prepare.TimeStamp < DateTime.UtcNow - meta.MaxAge.Value);

            if (canRemove)
                commitInfo.TryNotToKeep();
            else
                commitInfo.ForciblyKeep();
            return !canRemove;
        }
Beispiel #39
0
 public bool TryGetRecord(long pos, out PrepareLogRecord record)
 {
     record = null;
     return(false);
 }
 public WriteResult(long writtenPos, long newPos, PrepareLogRecord prepare)
 {
     WrittenPos = writtenPos;
     NewPos     = newPos;
     Prepare    = prepare;
 }
Beispiel #41
0
        public void try_read_returns_properly_when_writer_is_written_to_while_chasing()
        {
            var writerchk = new InMemoryCheckpoint(0);
            var readerchk = new InMemoryCheckpoint("reader", 0);
            var config    = new TransactionFileDatabaseConfig(PathName, "prefix.tf", 10000, writerchk, new[] { readerchk });

            var fileName = Path.Combine(PathName, "prefix.tf0");

            File.Create(fileName).Close();

            var reader = new MultifileTransactionFileChaser(config, "reader");

            reader.Open();

            LogRecord record;

            Assert.IsFalse(reader.TryReadNext(out record));

            var recordToWrite = new PrepareLogRecord(logPosition: 0,
                                                     correlationId: _correlationId,
                                                     eventId: _eventId,
                                                     transactionPosition: 0,
                                                     eventStreamId: "WorldEnding",
                                                     expectedVersion: 1234,
                                                     timeStamp: new DateTime(2012, 12, 21),
                                                     flags: PrepareFlags.None,
                                                     eventType: "type",
                                                     data: new byte[] { 1, 2, 3, 4, 5 },
                                                     metadata: new byte[] { 7, 17 });
            var memstream = new MemoryStream();
            var writer    = new BinaryWriter(memstream);

            recordToWrite.WriteWithLengthPrefixTo(writer);

            using (var fs = new FileStream(fileName, FileMode.Append, FileAccess.Write, FileShare.ReadWrite))
            {
                fs.Write(memstream.ToArray(), 0, (int)memstream.Length);
                fs.Flush(flushToDisk: true);
            }
            writerchk.Write(memstream.Length);

            Assert.IsTrue(reader.TryReadNext(out record));
            Assert.AreEqual(record, recordToWrite);

            var recordToWrite2 = new PrepareLogRecord(logPosition: 0,
                                                      correlationId: _correlationId,
                                                      eventId: _eventId,
                                                      transactionPosition: 0,
                                                      eventStreamId: "WorldEnding",
                                                      expectedVersion: 4321,
                                                      timeStamp: new DateTime(2012, 12, 21),
                                                      flags: PrepareFlags.None,
                                                      eventType: "type",
                                                      data: new byte[] { 3, 2, 1 },
                                                      metadata: new byte[] { 9 });

            memstream.SetLength(0);
            recordToWrite2.WriteWithLengthPrefixTo(writer);

            using (var fs = new FileStream(fileName, FileMode.Append, FileAccess.Write, FileShare.ReadWrite))
            {
                fs.Write(memstream.ToArray(), 0, (int)memstream.Length);
                fs.Flush(flushToDisk: true);
            }
            writerchk.Write(writerchk.Read() + memstream.Length);

            Assert.IsTrue(reader.TryReadNext(out record));
            Assert.AreEqual(record, recordToWrite2);

            reader.Close();
        }
 protected override void WriteTestScenario()
 {
     _prepare0 = WritePrepare("ES", -1);
     _prepare1 = WritePrepare("ES", -1);
     _prepare2 = WritePrepare("ES", -1);
 }
        public void a_record_is_not_written_at_first_but_written_on_second_try()
        {
            var filename1 = Path.Combine(PathName, "prefix.tf0");
            var filename2 = Path.Combine(PathName, "prefix.tf1");
            var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0);
            var chunkBytes = chunkHeader.AsByteArray();
            var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size];
            Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length);
            File.WriteAllBytes(filename1, bytes);

            _checkpoint = new InMemoryCheckpoint(0);
            var db = new TFChunkDb(new TFChunkDbConfig(PathName,
                                                       new PrefixFileNamingStrategy(PathName, "prefix.tf"),
                                                       10000,
                                                       0,
                                                       _checkpoint,
                                                       new ICheckpoint[0]));
            db.OpenVerifyAndClean();
            var tf = new TFChunkWriter(db);
            long pos;

            var record1 = new PrepareLogRecord(logPosition: 0,
                                               correlationId: _correlationId,
                                               eventId: _eventId,
                                               expectedVersion: 1234,
                                               transactionPosition: 0,
                                               transactionOffset: 0,
                                               eventStreamId: "WorldEnding",
                                               timeStamp: new DateTime(2012, 12, 21),
                                               flags: PrepareFlags.None,
                                               eventType: "type",
                                               data: new byte[] { 1, 2, 3, 4, 5 },
                                               metadata: new byte[8000]);
            Assert.IsTrue(tf.Write(record1, out pos)); // almost fill up first chunk

            var record2 = new PrepareLogRecord(logPosition: pos,
                                               correlationId: _correlationId,
                                               eventId: _eventId,
                                               expectedVersion: 1234,
                                               transactionPosition: pos,
                                               transactionOffset: 0,
                                               eventStreamId: "WorldEnding",
                                               timeStamp: new DateTime(2012, 12, 21),
                                               flags: PrepareFlags.None,
                                               eventType: "type",
                                               data: new byte[] { 1, 2, 3, 4, 5 },
                                               metadata: new byte[8000]);
            Assert.IsFalse(tf.Write(record2, out pos)); // chunk has too small space

            var record3 = new PrepareLogRecord(logPosition: pos,
                                               correlationId: _correlationId,
                                               eventId: _eventId,
                                               expectedVersion: 1234,
                                               transactionPosition: pos,
                                               transactionOffset: 0,
                                               eventStreamId: "WorldEnding",
                                               timeStamp: new DateTime(2012, 12, 21),
                                               flags: PrepareFlags.None,
                                               eventType: "type",
                                               data: new byte[] { 1, 2, 3, 4, 5 },
                                               metadata: new byte[2000]);
            Assert.IsTrue(tf.Write(record3, out pos));
            tf.Close();
            db.Dispose();

            Assert.AreEqual(record3.GetSizeWithLengthPrefixAndSuffix() + 10000, _checkpoint.Read());
            using (var filestream = File.Open(filename2, FileMode.Open, FileAccess.Read))
            {
                filestream.Seek(ChunkHeader.Size + sizeof(int), SeekOrigin.Begin);
                var reader = new BinaryReader(filestream);
                var read = LogRecord.ReadFrom(reader);
                Assert.AreEqual(record3, read);
            }
        }
 public PrepareInfo(PrepareLogRecord prepareRecord)
     : this()
 {
     _prepareRecord = prepareRecord;
 }
Beispiel #45
0
 public EventCommited(long commitPosition, int eventNumber, PrepareLogRecord prepare)
 {
     CommitPosition = commitPosition;
     EventNumber    = eventNumber;
     Prepare        = prepare;
 }
 private bool KeepOnlyFirstEventOfDuplicate(ITableIndex tableIndex, PrepareLogRecord prepare, long eventNumber){
     var result = _readIndex.ReadEvent(prepare.EventStreamId, eventNumber);
     if(result.Result == ReadEventResult.Success && result.Record.LogPosition != prepare.LogPosition) return false;
     return true;
 }
Beispiel #47
0
 protected override void WriteTestScenario()
 {
     _prepare0 = WritePrepare("ES", -1);
     _prepare1 = WritePrepare("ES", -1);
     _prepare2 = WritePrepare("ES", -1);
 }
Beispiel #48
0
 public StreamEventAppeared(Guid correlationId, int eventNumber, PrepareLogRecord @event)
 {
     CorrelationId = correlationId;
     EventNumber   = eventNumber;
     Event         = @event;
 }
        protected EventRecord WriteTransactionEvent(Guid correlationId,
                                                    long transactionPos,
                                                    int transactionOffset,
                                                    string eventStreamId,
                                                    int eventNumber,
                                                    string eventData,
                                                    PrepareFlags flags,
                                                    bool retryOnFail = false)
        {
            var prepare = LogRecord.Prepare(WriterCheckpoint.ReadNonFlushed(),
                                            correlationId,
                                            Guid.NewGuid(),
                                            transactionPos,
                                            transactionOffset,
                                            eventStreamId,
                                            ExpectedVersion.Any,
                                            flags,
                                            "some-type",
                                            Helper.UTF8NoBom.GetBytes(eventData),
                                            null);

            if (retryOnFail)
            {
                long firstPos = prepare.LogPosition;
                long newPos;
                if (!Writer.Write(prepare, out newPos))
                {
                    var tPos = prepare.TransactionPosition == prepare.LogPosition ? newPos : prepare.TransactionPosition;
                    prepare = new PrepareLogRecord(newPos,
                                                   prepare.CorrelationId,
                                                   prepare.EventId,
                                                   tPos,
                                                   prepare.TransactionOffset,
                                                   prepare.EventStreamId,
                                                   prepare.ExpectedVersion,
                                                   prepare.TimeStamp,
                                                   prepare.Flags,
                                                   prepare.EventType,
                                                   prepare.Data,
                                                   prepare.Metadata);
                    if (!Writer.Write(prepare, out newPos))
                        Assert.Fail("Second write try failed when first writing prepare at {0}, then at {1}.", firstPos, prepare.LogPosition);
                }
                return new EventRecord(eventNumber, prepare);
            }

            long pos;
            Assert.IsTrue(Writer.Write(prepare, out pos));
            return new EventRecord(eventNumber, prepare);
        }
        public void a_record_is_not_written_at_first_but_written_on_second_try()
        {
            var filename1   = GetFilePathFor("chunk-000000.000000");
            var filename2   = GetFilePathFor("chunk-000001.000000");
            var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid());
            var chunkBytes  = chunkHeader.AsByteArray();
            var bytes       = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size];

            Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length);
            File.WriteAllBytes(filename1, bytes);

            _checkpoint = new InMemoryCheckpoint(0);
            var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint()));

            db.Open();
            var  tf = new TFChunkWriter(db);
            long pos;

            var record1 = new PrepareLogRecord(logPosition: 0,
                                               correlationId: _correlationId,
                                               eventId: _eventId,
                                               expectedVersion: 1234,
                                               transactionPosition: 0,
                                               transactionOffset: 0,
                                               eventStreamId: "WorldEnding",
                                               timeStamp: new DateTime(2012, 12, 21),
                                               flags: PrepareFlags.None,
                                               eventType: "type",
                                               data: new byte[] { 1, 2, 3, 4, 5 },
                                               metadata: new byte[8000]);

            Assert.IsTrue(tf.Write(record1, out pos));             // almost fill up first chunk

            var record2 = new PrepareLogRecord(logPosition: pos,
                                               correlationId: _correlationId,
                                               eventId: _eventId,
                                               expectedVersion: 1234,
                                               transactionPosition: pos,
                                               transactionOffset: 0,
                                               eventStreamId: "WorldEnding",
                                               timeStamp: new DateTime(2012, 12, 21),
                                               flags: PrepareFlags.None,
                                               eventType: "type",
                                               data: new byte[] { 1, 2, 3, 4, 5 },
                                               metadata: new byte[8000]);

            Assert.IsFalse(tf.Write(record2, out pos));             // chunk has too small space

            var record3 = new PrepareLogRecord(logPosition: pos,
                                               correlationId: _correlationId,
                                               eventId: _eventId,
                                               expectedVersion: 1234,
                                               transactionPosition: pos,
                                               transactionOffset: 0,
                                               eventStreamId: "WorldEnding",
                                               timeStamp: new DateTime(2012, 12, 21),
                                               flags: PrepareFlags.None,
                                               eventType: "type",
                                               data: new byte[] { 1, 2, 3, 4, 5 },
                                               metadata: new byte[2000]);

            Assert.IsTrue(tf.Write(record3, out pos));
            tf.Close();
            db.Dispose();

            Assert.AreEqual(record3.GetSizeWithLengthPrefixAndSuffix() + 10000, _checkpoint.Read());
            using (var filestream = File.Open(filename2, FileMode.Open, FileAccess.Read)) {
                filestream.Seek(ChunkHeader.Size + sizeof(int), SeekOrigin.Begin);
                var reader = new BinaryReader(filestream);
                var read   = LogRecord.ReadFrom(reader);
                Assert.AreEqual(record3, read);
            }
        }
Beispiel #51
0
 public void PutRecord(long pos, PrepareLogRecord record)
 {
 }
Beispiel #52
0
        private bool ShouldKeepPrepare(PrepareLogRecord prepare, Dictionary <long, CommitInfo> commits)
        {
            CommitInfo commitInfo;

            if (commits.TryGetValue(prepare.TransactionPosition, out commitInfo))
            {
                if ((prepare.Flags & PrepareFlags.StreamDelete) != 0) // we always keep delete tombstones
                {
                    commitInfo.KeepCommit = true;                     // see notes below
                    return(true);
                }

                if (_readIndex.IsStreamDeleted(prepare.EventStreamId))
                {
                    // When all prepares and commit of transaction belong to single chunk and the stream is deleted,
                    // we can safely delete both prepares and commit.

                    // If someone decided definitely to keep corresponding commit then we shouldn't interfere.
                    // Otherwise we should point that yes, you can remove commit for this prepare.
                    commitInfo.KeepCommit = commitInfo.KeepCommit ?? false;
                    return(false);
                }

                if ((prepare.Flags & PrepareFlags.Data) == 0)
                {
                    // We encountered system prepare with no data. As of now it can appear only in explicit
                    // transactions so we can safely remove it. The performance shouldn't hurt, because
                    // TransactionBegin prepare is never needed either way and TransactionEnd should be in most
                    // circumstances close to commit, so shouldn't hurt performance too much.
                    // The advantage of getting rid of system prepares is ability to completely eliminate transaction
                    // prepares and commit, if transaction events are completely ruled out by $maxAge/$maxCount.
                    // Otherwise we'd have to either keep prepare not requiring to keep commit, which could leave
                    // this prepare as never discoverable garbage, or we could insist on keeping commit forever
                    // even if all events in transaction are scavenged.
                    commitInfo.KeepCommit = commitInfo.KeepCommit ?? false;
                    return(false);
                }

                var lastEventNumber = _readIndex.GetLastStreamEventNumber(prepare.EventStreamId);
                var streamMetadata  = _readIndex.GetStreamMetadata(prepare.EventStreamId);
                var eventNumber     = commitInfo.EventNumber + prepare.TransactionOffset;

                // We should always physically keep the very last prepare in the stream.
                // Otherwise we get into trouble when trying to resolve LastStreamEventNumber, for instance.
                // That is because our TableIndex doesn't keep EventStreamId, only hash of it, so on doing some operations
                // that needs TableIndex, we have to make sure we have prepare records in TFChunks when we need them.
                if (eventNumber >= lastEventNumber)
                {
                    // Definitely keep commit, otherwise current prepare wouldn't be discoverable.
                    // TODO AN I should think more carefully about this stuff with prepare/commits relations
                    // TODO AN and whether we can keep prepare without keeping commit (problems on index rebuild).
                    // TODO AN What can save us -- some effective and clever way to mark prepare as committed,
                    // TODO AN but place updated prepares in the place of commit, so they are discoverable during
                    // TODO AN index rebuild or ReadAllForward/Backward queries EXACTLY in the same order as they
                    // TODO AN were originally written to TF
                    commitInfo.KeepCommit = true;
                    return(true);
                }

                bool keep = true;
                if (streamMetadata.MaxCount.HasValue)
                {
                    int maxKeptEventNumber = lastEventNumber - streamMetadata.MaxCount.Value + 1;
                    if (eventNumber < maxKeptEventNumber)
                    {
                        keep = false;
                    }
                }

                if (streamMetadata.MaxAge.HasValue)
                {
                    if (prepare.TimeStamp < DateTime.UtcNow - streamMetadata.MaxAge.Value)
                    {
                        keep = false;
                    }
                }

                if (keep)
                {
                    commitInfo.KeepCommit = true;
                }
                else
                {
                    commitInfo.KeepCommit = commitInfo.KeepCommit ?? false;
                }
                return(keep);
            }
            else
            {
                if ((prepare.Flags & PrepareFlags.StreamDelete) != 0) // we always keep delete tombstones
                {
                    return(true);
                }

                // So here we have prepare which commit is in the following chunks or prepare is not committed at all.
                // Now, whatever heuristic on prepare scavenge we use, we should never delete the very first prepare
                // in transaction, as in some circumstances we need it.
                // For instance, this prepare could be part of ongoing transaction and though we sometimes can determine
                // that prepare wouldn't ever be needed (e.g., stream was deleted, $maxAge or $maxCount rule it out)
                // we still need the first prepare to find out StreamId for possible commit in StorageWriterService.WriteCommit method.
                // There could be other reasons where it is needed, so we just safely filter it out to not bother further.
                if ((prepare.Flags & PrepareFlags.TransactionBegin) != 0)
                {
                    return(true);
                }

                // If stream of this prepare is deleted, then we can safely delete this prepare.
                if (_readIndex.IsStreamDeleted(prepare.EventStreamId))
                {
                    return(false);
                }

                // TODO AN we can try to figure out if this prepare is committed, and if yes, what is its event number.
                // TODO AN only then we can actually do something here, unfortunately.
                return(true);

                // We should always physically keep the very last prepare in the stream.
                // Otherwise we get into trouble when trying to resolve LastStreamEventNumber, for instance.
                // That is because our TableIndex doesn't keep EventStreamId, only hash of it, so on doing some operations
                // that needs TableIndex, we have to make sure we have prepare records in TFChunks when we need them.

                /*
                 * var lastEventNumber = _readIndex.GetLastStreamEventNumber(prepare.EventStreamId);
                 * var streamMetadata = _readIndex.GetStreamMetadata(prepare.EventStreamId);
                 * if (streamMetadata.MaxCount.HasValue)
                 * {
                 *  // nothing to do here until we know prepare's event number
                 * }
                 * if (streamMetadata.MaxAge.HasValue)
                 * {
                 *  // nothing to do here until we know prepare's event number
                 * }
                 * return false;
                 */
            }
        }