public LogV2StreamExistenceFilterInitializerTests() { _log = new FakeInMemoryTfReader(recordOffset: _recordOffset); _tableIndex = new TableIndex <string>( directory: Fixture.Directory, lowHasher: new XXHashUnsafe(), highHasher: new Murmur3AUnsafe(), emptyStreamId: string.Empty, memTableFactory: () => new HashListMemTable( version: PTableVersions.IndexV4, maxSize: 1_000_000 * 2), maxSizeForMemory: 100_000, tfReaderFactory: () => new TFReaderLease(_log), ptableVersion: PTableVersions.IndexV4, maxAutoMergeIndexLevel: int.MaxValue, pTableMaxReaderCount: 5); _tableIndex.Initialize(0); _sut = new LogV2StreamExistenceFilterInitializer( tfReaderFactory: () => new TFReaderLease(_log), tableIndex: _tableIndex); var hasher = new CompositeHasher <string>(new XXHashUnsafe(), new Murmur3AUnsafe()); _filter = new MockExistenceFilter(hasher); }
public void can_initalize_during_merge() { var eventsPerStream = 1_000; var numStreams = 1_000; var numEvents = eventsPerStream * numStreams; for (int i = 0; i < numStreams; i++) { for (int j = 0; j < eventsPerStream; j++) { AddEventToSut(stream: $"stream-{i}", eventNumber: j); } } var hasher = new CompositeHasher <string>(new XXHashUnsafe(), new Murmur3AUnsafe()); // addDelayMs: we want to initialize the filter slowly, to give the ptables longer to move around var slowFilter = new MockExistenceFilter(hasher, addDelayMs: 1); _sut.Initialize(slowFilter, 0); Assert.Equal(numEvents * _recordOffset, slowFilter.CurrentCheckpoint); Assert.Equal(numStreams, slowFilter.Hashes.Count); Assert.Equal(2, _log.NumReads); }