Exemplo n.º 1
0
        public RecordWriteResult TryAppend(LogRecord record)
        {
            if (_isReadonly)
            {
                throw new InvalidOperationException("Cannot write to a read-only block.");
            }

            var workItem     = _writerWorkItem;
            var buffer       = workItem.Buffer;
            var bufferWriter = workItem.BufferWriter;
            var stream       = workItem.Stream;

            buffer.SetLength(4);
            buffer.Position = 4;
            record.WriteTo(bufferWriter);
            buffer.Position = 0;
            var toWriteLength = (int)buffer.Length;

            bufferWriter.Write(toWriteLength - 4);

            if (!VerifyStreamLength(stream, toWriteLength))
            {
                return(RecordWriteResult.Failed(GetLogicalPosition(workItem)));
            }

            var oldPosition = WriteRawData(buffer);

            _actualDataSize = GetLogicalPosition(workItem);
            return(RecordWriteResult.Successful(oldPosition, _actualDataSize));
        }
Exemplo n.º 2
0
        public RecordWriteResult TryAppend(LogRecord record)
        {
            if (_isReadonly)
            {
                throw new InvalidOperationException("Cannot write to a read-only block.");
            }

            var workItem     = _writerWorkItem;
            var buffer       = workItem.Buffer;
            var bufferWriter = workItem.BufferWriter;
            var stream       = workItem.Stream;

            buffer.SetLength(4);
            buffer.Position = 4;
            record.WriteTo(bufferWriter);
            var length = (int)buffer.Length - 4;

            bufferWriter.Write(length); // length suffix
            buffer.Position = 0;
            bufferWriter.Write(length); // length prefix

            if (stream.Position + length + 8 > ChunkHeader.Size + _chunkHeader.ChunkSize)
            {
                return(RecordWriteResult.Failed(GetLogicalPosition(workItem)));
            }

            var oldPosition = WriteRawData(buffer);

            _actualDataSize = GetLogicalPosition(workItem);
            return(RecordWriteResult.Successful(oldPosition, _actualDataSize));
        }
 public void Setup()
 {
     _record = new PrepareLogRecord(0, _corrId, _eventId, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0),
                                    PrepareFlags.None, "Foo", new byte[12], new byte[15]);
     _chunk  = Core.TransactionLog.Chunks.TFChunk.CreateNew(_filename, 4096, 0, 0);
     _result = _chunk.TryAppend(_record);
 }
Exemplo n.º 4
0
        public void Should_delegate_to_base_writer(RecordWriteResult result)
        {
            SetupResult(result, 35);

            Write(out var size).Should().Be(result);

            size.Should().Be(35);
        }
 public override void TestFixtureSetUp()
 {
     base.TestFixtureSetUp();
     _record = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0),
                                    PrepareFlags.None, "Foo", new byte[12], new byte[15]);
     _chunk  = TFChunk.CreateNew(Filename, 4096, 0, 0, false);
     _result = _chunk.TryAppend(_record);
 }
Exemplo n.º 6
0
 private void SetupResult(RecordWriteResult result, int recordSize)
 => baseWriter
 .TryWrite(null, null, out _)
 .ReturnsForAnyArgs(
     info =>
 {
     info[2] = recordSize;
     return(result);
 });
Exemplo n.º 7
0
        public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();

            _db = new TFChunkDb(new TFChunkDbConfig(PathName,
                                                    new VersionedPatternFileNamingStrategy(PathName, "chunk-"),
                                                    16 * 1024,
                                                    0,
                                                    new InMemoryCheckpoint(),
                                                    new InMemoryCheckpoint(),
                                                    new InMemoryCheckpoint(-1),
                                                    new InMemoryCheckpoint(-1)));
            _db.Open();

            var chunk = _db.Manager.GetChunkFor(0);

            _p1 = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                        new byte[] { 0, 1, 2 }, new byte[] { 5, 7 });
            _res1 = chunk.TryAppend(_p1);

            _c1    = LogRecord.Commit(_res1.NewPosition, Guid.NewGuid(), _p1.LogPosition, 0);
            _cres1 = chunk.TryAppend(_c1);

            _p2 = LogRecord.SingleWrite(_cres1.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                        new byte[] { 0, 1, 2 }, new byte[] { 5, 7 });
            _res2 = chunk.TryAppend(_p2);

            _c2    = LogRecord.Commit(_res2.NewPosition, Guid.NewGuid(), _p2.LogPosition, 1);
            _cres2 = chunk.TryAppend(_c2);

            _p3 = LogRecord.SingleWrite(_cres2.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                        new byte[] { 0, 1, 2 }, new byte[] { 5, 7 });
            _res3 = chunk.TryAppend(_p3);

            _c3    = LogRecord.Commit(_res3.NewPosition, Guid.NewGuid(), _p3.LogPosition, 2);
            _cres3 = chunk.TryAppend(_c3);

            chunk.Complete();

            _db.Config.WriterCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition);
            _db.Config.WriterCheckpoint.Flush();
            _db.Config.ChaserCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition);
            _db.Config.ChaserCheckpoint.Flush();

            var bus          = new InMemoryBus("Bus");
            var ioDispatcher = new IODispatcher(bus, new PublishEnvelope(bus));
            var scavenger    = new TFChunkScavenger(_db, ioDispatcher, new FakeTableIndex(),
                                                    new FakeReadIndex(x => x == "es-to-scavenge"), Guid.NewGuid(), "fakeNodeIp");

            scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: false);

            _scavengedChunk = _db.Manager.GetChunk(0);
        }
 public void Setup()
 {
     _record = new PrepareLogRecord(0, _corrId, _eventId, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0),
                                    PrepareFlags.None, "Foo", new byte[12], new byte[15]);
     _chunk  = TFChunk.CreateNew(filename, 4096, 0, 0);
     _result = _chunk.TryAppend(_record);
     _chunk.Flush();
     _chunk.Complete();
     _cachedChunk = TFChunk.FromCompletedFile(filename);
     _cachedChunk.CacheInMemory();
 }
        public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();

            var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory;
            var streamId      = LogFormatHelper <TLogFormat, TStreamId> .StreamId;

            _record = LogRecord.Prepare(recordFactory, 0, _corrId, _eventId, 0, 0, streamId, 1,
                                        PrepareFlags.None, "Foo", new byte[12], new byte[15], new DateTime(2000, 1, 1, 12, 0, 0));
            _chunk  = TFChunkHelper.CreateNewChunk(Filename);
            _result = _chunk.TryAppend(_record);
        }
 public override void TestFixtureSetUp()
 {
     base.TestFixtureSetUp();
     _record = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0),
                                    PrepareFlags.None, "Foo", new byte[12], new byte[15]);
     _chunk  = TFChunkHelper.CreateNewChunk(Filename);
     _result = _chunk.TryAppend(_record);
     _chunk.Flush();
     _chunk.Complete();
     _cachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true, unbufferedRead: false, initialReaderCount: 5, reduceFileCachePressure: false);
     _cachedChunk.CacheInMemory();
 }
 public override void TestFixtureSetUp()
 {
     base.TestFixtureSetUp();
     _record = new PrepareLogRecord(0, _corrId, _eventId, 0, 0, "test", 1, new DateTime(2000, 1, 1, 12, 0, 0),
                                    PrepareFlags.None, "Foo", new byte[12], new byte[15]);
     _chunk  = TFChunk.CreateNew(Filename, 4096, 0, 0, isScavenged: false, inMem: false, unbuffered: false, writethrough: false);
     _result = _chunk.TryAppend(_record);
     _chunk.Flush();
     _chunk.Complete();
     _cachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true, unbufferedRead: false);
     _cachedChunk.CacheInMemory();
 }
Exemplo n.º 12
0
        public override async Task TestFixtureSetUp()
        {
            await base.TestFixtureSetUp();

            _db = new TFChunkDb(TFChunkHelper.CreateSizedDbConfig(PathName, 0, chunkSize: 16 * 1024));
            _db.Open();

            var chunk      = _db.Manager.GetChunkFor(0);
            var logFormat  = LogFormatHelper <TLogFormat, TStreamId> .LogFormat;
            var streamName = "es-to-scavenge";

            logFormat.StreamNameIndex.GetOrAddId(streamName, out var streamId, out _, out _);
            var expectedVersion = ExpectedVersion.NoStream;

            _p1 = LogRecord.SingleWrite(logFormat.RecordFactory, 0, Guid.NewGuid(), Guid.NewGuid(), streamId, expectedVersion++, "et1",
                                        new byte[2048], new byte[] { 5, 7 });
            _res1 = chunk.TryAppend(_p1);

            _c1    = LogRecord.Commit(_res1.NewPosition, Guid.NewGuid(), _p1.LogPosition, 0);
            _cres1 = chunk.TryAppend(_c1);

            _p2 = LogRecord.SingleWrite(logFormat.RecordFactory, _cres1.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), streamId, expectedVersion++, "et1",
                                        new byte[2048], new byte[] { 5, 7 });
            _res2 = chunk.TryAppend(_p2);

            _c2    = LogRecord.Commit(_res2.NewPosition, Guid.NewGuid(), _p2.LogPosition, 1);
            _cres2 = chunk.TryAppend(_c2);

            _p3 = LogRecord.SingleWrite(logFormat.RecordFactory, _cres2.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), streamId, expectedVersion++, "et1",
                                        new byte[2048], new byte[] { 5, 7 });
            _res3 = chunk.TryAppend(_p3);

            _c3    = LogRecord.Commit(_res3.NewPosition, Guid.NewGuid(), _p3.LogPosition, 2);
            _cres3 = chunk.TryAppend(_c3);

            chunk.Complete();
            _originalFileSize = chunk.FileSize;

            _db.Config.WriterCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition);
            _db.Config.WriterCheckpoint.Flush();
            _db.Config.ChaserCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition);
            _db.Config.ChaserCheckpoint.Flush();

            var scavenger = new TFChunkScavenger <TStreamId>(_db, new FakeTFScavengerLog(), new FakeTableIndex <TStreamId>(),
                                                             new FakeReadIndex <TLogFormat, TStreamId>(x => EqualityComparer <TStreamId> .Default.Equals(x, streamId)),
                                                             logFormat.SystemStreams);
            await scavenger.Scavenge(alwaysKeepScavenged : true, mergeChunks : false);

            _scavengedChunk = _db.Manager.GetChunk(0);
        }
Exemplo n.º 13
0
        public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();

            var logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormat;

            logFormat.StreamNameIndex.GetOrAddId("test", out var streamId, out _, out _);

            _record = LogRecord.Prepare(logFormat.RecordFactory, 0, _corrId, _eventId, 0, 0, streamId, 1,
                                        PrepareFlags.None, "Foo", new byte[12], new byte[15], new DateTime(2000, 1, 1, 12, 0, 0));
            _chunk  = TFChunkHelper.CreateNewChunk(Filename);
            _result = _chunk.TryAppend(_record);
        }
Exemplo n.º 14
0
        public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();
            var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory;
            var streamId      = LogFormatHelper <TLogFormat, TStreamId> .StreamId;

            _record = LogRecord.Prepare(recordFactory, 0, _corrId, _eventId, 0, 0, streamId, 1,
                                        PrepareFlags.None, "Foo", new byte[12], new byte[15], new DateTime(2000, 1, 1, 12, 0, 0));
            _chunk  = TFChunkHelper.CreateNewChunk(Filename);
            _result = _chunk.TryAppend(_record);
            _chunk.Flush();
            _chunk.Complete();
            _cachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true, unbufferedRead: false,
                                                     initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false);
            _cachedChunk.CacheInMemory();
        }
        public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();

            _db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0, chunkSize: 16 * 1024));
            _db.Open();

            var chunk = _db.Manager.GetChunkFor(0);

            _p1 = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                        new byte[2048], new byte[] { 5, 7 });
            _res1 = chunk.TryAppend(_p1);

            _c1    = LogRecord.Commit(_res1.NewPosition, Guid.NewGuid(), _p1.LogPosition, 0);
            _cres1 = chunk.TryAppend(_c1);

            _p2 = LogRecord.SingleWrite(_cres1.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                        new byte[2048], new byte[] { 5, 7 });
            _res2 = chunk.TryAppend(_p2);

            _c2    = LogRecord.Commit(_res2.NewPosition, Guid.NewGuid(), _p2.LogPosition, 1);
            _cres2 = chunk.TryAppend(_c2);

            _p3 = LogRecord.SingleWrite(_cres2.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                        new byte[2048], new byte[] { 5, 7 });
            _res3 = chunk.TryAppend(_p3);

            _c3    = LogRecord.Commit(_res3.NewPosition, Guid.NewGuid(), _p3.LogPosition, 2);
            _cres3 = chunk.TryAppend(_c3);

            chunk.Complete();
            _originalFileSize = chunk.FileSize;

            _db.Config.WriterCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition);
            _db.Config.WriterCheckpoint.Flush();
            _db.Config.ChaserCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition);
            _db.Config.ChaserCheckpoint.Flush();

            var scavenger = new TFChunkScavenger(_db, new FakeTFScavengerLog(), new FakeTableIndex(),
                                                 new FakeReadIndex(x => x == "es-to-scavenge"));

            scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: false).Wait();

            _scavengedChunk = _db.Manager.GetChunk(0);
        }
Exemplo n.º 16
0
        public RecordWriteResult TryAppend(LogRecord record)
        {
            if (_isReadOnly)
            {
                throw new InvalidOperationException("Cannot write to a read-only block.");
            }

            var workItem     = _writerWorkItem;
            var buffer       = workItem.Buffer;
            var bufferWriter = workItem.BufferWriter;
            var stream       = workItem.Stream;

            buffer.SetLength(4);
            buffer.Position = 4;
            record.WriteTo(bufferWriter);
            var length = (int)buffer.Length - 4;

            bufferWriter.Write(length); // length suffix
            buffer.Position = 0;
            bufferWriter.Write(length); // length prefix

            if (stream.Position + length + 2 * sizeof(int) > ChunkHeader.Size + _chunkHeader.ChunkSize)
            {
                return(RecordWriteResult.Failed(GetDataPosition(workItem)));
            }

            var oldPosition = WriteRawData(workItem, buffer);

            _physicalDataSize = (int)GetDataPosition(workItem); // should fit 32 bits
            _logicalDataSize  = ChunkHeader.GetLocalLogPosition(record.LogPosition + length + 2 * sizeof(int));

            // for non-scavenged chunk _physicalDataSize should be the same as _logicalDataSize
            // for scavenged chunk _logicalDataSize should be at least the same as _physicalDataSize
            if ((!ChunkHeader.IsScavenged && _logicalDataSize != _physicalDataSize) ||
                (ChunkHeader.IsScavenged && _logicalDataSize < _physicalDataSize))
            {
                throw new Exception(string.Format("Data sizes violation. Chunk: {0}, IsScavenged: {1}, LogicalDataSize: {2}, PhysicalDataSize: {3}.",
                                                  FileName, ChunkHeader.IsScavenged, _logicalDataSize, _physicalDataSize));
            }

            return(RecordWriteResult.Successful(oldPosition, _physicalDataSize));
        }
Exemplo n.º 17
0
        public override void TestFixtureSetUp()
        {
            base.TestFixtureSetUp();

            _db = new TFChunkDb(new TFChunkDbConfig(PathName,
                                                    new VersionedPatternFileNamingStrategy(PathName, "chunk-"),
                                                    16 * 1024,
                                                    0,
                                                    new InMemoryCheckpoint(),
                                                    new InMemoryCheckpoint(),
                                                    new ICheckpoint[0]));
            _db.OpenVerifyAndClean();

            var chunk = _db.Manager.GetChunk(0);

            _rec1 = LogRecord.SingleWrite(0, Guid.NewGuid(), Guid.NewGuid(), "es1", ExpectedVersion.Any, "et1",
                                          new byte[] { 0, 1, 2 }, new byte[] { 5, 7 });
            _res1 = chunk.TryAppend(_rec1);

            _rec2 = LogRecord.SingleWrite(_res1.NewPosition,
                                          Guid.NewGuid(), Guid.NewGuid(), "es-to-scavenge", ExpectedVersion.Any, "et1",
                                          new byte[] { 0, 1, 2 }, new byte[] { 5, 7 });
            _res2 = chunk.TryAppend(_rec2);

            _rec3 = LogRecord.SingleWrite(_res2.NewPosition,
                                          Guid.NewGuid(), Guid.NewGuid(), "es1", ExpectedVersion.Any, "et1",
                                          new byte[] { 0, 1, 2 }, new byte[] { 5, 7 });
            _res3 = chunk.TryAppend(_rec3);

            chunk.Complete();

            var scavenger = new TFChunkScavenger(_db, new FakeReadIndex(x => x == "es-to-scavenge"));

            scavenger.Scavenge(alwaysKeepScavenged: true);

            _scavengedChunk = _db.Manager.GetChunk(0);
        }
Exemplo n.º 18
0
        public override async Task TestFixtureSetUp()
        {
            await base.TestFixtureSetUp();

            _logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormatFactory.Create(new() {
                IndexDirectory = GetFilePathFor("index"),
            });

            _db = new TFChunkDb(TFChunkHelper.CreateSizedDbConfig(PathName, 0, chunkSize: 16 * 1024));
            _db.Open();

            var chunk      = _db.Manager.GetChunkFor(0);
            var streamName = "es-to-scavenge";
            var pos        = 0L;

            _logFormat.StreamNameIndex.GetOrReserve(_logFormat.RecordFactory, streamName, 0, out var streamId, out var streamRecord);
            if (streamRecord is not null)
            {
                var res = chunk.TryAppend(streamRecord);
                pos = res.NewPosition;
            }

            _logFormat.EventTypeIndex.GetOrReserveEventType(_logFormat.RecordFactory, "et1", pos, out var eventTypeId, out var eventTypeRecord);
            if (eventTypeRecord is not null)
            {
                var res = chunk.TryAppend(eventTypeRecord);
                pos = res.NewPosition;
            }

            var expectedVersion = ExpectedVersion.NoStream;

            _p1 = LogRecord.SingleWrite(_logFormat.RecordFactory, pos, Guid.NewGuid(), Guid.NewGuid(), streamId, expectedVersion++, eventTypeId,
                                        new byte[2048], new byte[] { 5, 7 });
            _res1 = chunk.TryAppend(_p1);

            _c1    = LogRecord.Commit(_res1.NewPosition, Guid.NewGuid(), _p1.LogPosition, 0);
            _cres1 = chunk.TryAppend(_c1);

            _p2 = LogRecord.SingleWrite(_logFormat.RecordFactory, _cres1.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), streamId, expectedVersion++, eventTypeId,
                                        new byte[2048], new byte[] { 5, 7 });
            _res2 = chunk.TryAppend(_p2);

            _c2    = LogRecord.Commit(_res2.NewPosition, Guid.NewGuid(), _p2.LogPosition, 1);
            _cres2 = chunk.TryAppend(_c2);

            _p3 = LogRecord.SingleWrite(_logFormat.RecordFactory, _cres2.NewPosition,
                                        Guid.NewGuid(), Guid.NewGuid(), streamId, expectedVersion++, eventTypeId,
                                        new byte[2048], new byte[] { 5, 7 });
            _res3 = chunk.TryAppend(_p3);

            _c3    = LogRecord.Commit(_res3.NewPosition, Guid.NewGuid(), _p3.LogPosition, 2);
            _cres3 = chunk.TryAppend(_c3);

            chunk.Complete();
            _originalFileSize = chunk.FileSize;

            _db.Config.WriterCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition);
            _db.Config.WriterCheckpoint.Flush();
            _db.Config.ChaserCheckpoint.Write(chunk.ChunkHeader.ChunkEndPosition);
            _db.Config.ChaserCheckpoint.Flush();

            var scavenger = new TFChunkScavenger <TStreamId>(_db, new FakeTFScavengerLog(), new FakeTableIndex <TStreamId>(),
                                                             new FakeReadIndex <TLogFormat, TStreamId>(x => EqualityComparer <TStreamId> .Default.Equals(x, streamId), _logFormat.Metastreams),
                                                             _logFormat.Metastreams);
            await scavenger.Scavenge(alwaysKeepScavenged : true, mergeChunks : false);

            _scavengedChunk = _db.Manager.GetChunk(0);
        }