// todo: rename to GetOrReserveStream when we generalise to EventTypes too. /// Generates a StreamRecord if necessary public static bool GetOrReserve <TStreamId>( this INameIndex <TStreamId> streamNameIndex, IRecordFactory <TStreamId> recordFactory, string streamName, long logPosition, out TStreamId streamId, out IPrepareLogRecord <TStreamId> streamRecord) { var preExisting = streamNameIndex.GetOrReserve(streamName, out streamId, out var addedId, out var addedName); var appendNewStream = recordFactory.ExplicitStreamCreation && !preExisting; if (!appendNewStream) { streamRecord = null; return(preExisting); } streamRecord = recordFactory.CreateStreamRecord( streamId: Guid.NewGuid(), logPosition: logPosition, timeStamp: DateTime.UtcNow, streamNumber: addedId, streamName: addedName); return(preExisting); }
public static bool GetOrReserveEventType <TStreamId>( this INameIndex <TStreamId> eventTypeIndex, IRecordFactory <TStreamId> recordFactory, string eventType, long logPosition, out TStreamId eventTypeId, out IPrepareLogRecord <TStreamId> eventTypeRecord) { var preExisting = eventTypeIndex.GetOrReserve(eventType, out eventTypeId, out var addedNumber, out var addedName); var appendNewEventType = recordFactory.ExplicitEventTypeCreation && !preExisting; if (!appendNewEventType) { eventTypeRecord = null; return(preExisting); } eventTypeRecord = recordFactory.CreateEventTypeRecord( eventTypeId: Guid.NewGuid(), parentEventTypeId: Guid.Empty, eventType: addedName, eventTypeNumber: addedNumber, eventTypeVersion: 0, logPosition: logPosition, timeStamp: DateTime.UtcNow); return(preExisting); }
public StreamNameIndexMetastreamDecorator( INameIndex <StreamId> wrapped, IMetastreamLookup <StreamId> metastreams) { _wrapped = wrapped; _metastreams = metastreams; }
public EpochManager(IPublisher bus, int cachedEpochCount, ICheckpoint checkpoint, ITransactionFileWriter writer, int initialReaderCount, int maxReaderCount, Func <ITransactionFileReader> readerFactory, IRecordFactory <TStreamId> recordFactory, INameIndex <TStreamId> streamNameIndex, INameIndex <TStreamId> eventTypeIndex, IPartitionManager partitionManager, Guid instanceId) { Ensure.NotNull(bus, "bus"); Ensure.Nonnegative(cachedEpochCount, "cachedEpochCount"); Ensure.NotNull(checkpoint, "checkpoint"); Ensure.NotNull(writer, "chunkWriter"); Ensure.Nonnegative(initialReaderCount, "initialReaderCount"); Ensure.Positive(maxReaderCount, "maxReaderCount"); if (initialReaderCount > maxReaderCount) { throw new ArgumentOutOfRangeException(nameof(initialReaderCount), "initialReaderCount is greater than maxReaderCount."); } Ensure.NotNull(readerFactory, "readerFactory"); _bus = bus; _cacheSize = cachedEpochCount; _checkpoint = checkpoint; _readers = new ObjectPool <ITransactionFileReader>("EpochManager readers pool", initialReaderCount, maxReaderCount, readerFactory); _writer = writer; _recordFactory = recordFactory; _streamNameIndex = streamNameIndex; _eventTypeIndex = eventTypeIndex; _partitionManager = partitionManager; _instanceId = instanceId; }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); var indexDirectory = GetFilePathFor("index"); _logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormatFactory.Create(new() { IndexDirectory = indexDirectory, }); _recordFactory = _logFormat.RecordFactory; _streamNameIndex = _logFormat.StreamNameIndex; _eventTypeIndex = _logFormat.EventTypeIndex; WriterCheckpoint = new InMemoryCheckpoint(0); ChaserCheckpoint = new InMemoryCheckpoint(0); Db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, WriterCheckpoint, ChaserCheckpoint, replicationCheckpoint: new InMemoryCheckpoint(-1), chunkSize: _chunkSize)); Db.Open(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); var readers = new ObjectPool <ITransactionFileReader>("Readers", 2, 5, () => new TFChunkReader(Db, Db.Config.WriterCheckpoint)); var lowHasher = _logFormat.LowHasher; var highHasher = _logFormat.HighHasher; var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = new TableIndex <TStreamId>(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), IndexBitnessVersion, int.MaxValue, Constants.PTableMaxReaderCountDefault, MaxEntriesInMemTable); _logFormat.StreamNamesProvider.SetTableIndex(TableIndex); var readIndex = new ReadIndex <TStreamId>(new NoopPublisher(), readers, TableIndex, _logFormat.StreamNameIndexConfirmer, _logFormat.StreamIds, _logFormat.StreamNamesProvider, _logFormat.EmptyStreamId, _logFormat.StreamIdValidator, _logFormat.StreamIdSizer, _logFormat.StreamExistenceFilter, _logFormat.StreamExistenceFilterReader, _logFormat.EventTypeIndexConfirmer, streamInfoCacheCapacity: StreamInfoCacheCapacity, additionalCommitChecks: PerformAdditionalCommitChecks, metastreamMaxCount: MetastreamMaxCount, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault, skipIndexScanOnReads: Opts.SkipIndexScanOnReadsDefault, replicationCheckpoint: Db.Config.ReplicationCheckpoint, indexCheckpoint: Db.Config.IndexCheckpoint); readIndex.IndexCommitter.Init(ChaserCheckpoint.Read()); ReadIndex = readIndex; // scavenge must run after readIndex is built if (_scavenge) { if (_completeLastChunkOnScavenge) { Db.Manager.GetChunk(Db.Manager.ChunksCount - 1).Complete(); } _scavenger = new TFChunkScavenger <TStreamId>(Db, new FakeTFScavengerLog(), TableIndex, ReadIndex, _logFormat.Metastreams); await _scavenger.Scavenge(alwaysKeepScavenged : true, mergeChunks : _mergeChunks); } }
public EventTypeIndexSystemTypesDecorator(INameIndex <uint> wrapped) { _wrapped = wrapped; }