public TFChunkScavenger(TFChunkDb db, IReadIndex readIndex) { _db = db; _readIndex = readIndex; Ensure.NotNull(db, "db"); Ensure.NotNull(readIndex, "readIndex"); }
private const int FlushPageInterval = 32; // max 65536 pages to write resulting in 2048 flushes per chunk public TFChunkScavenger(TFChunkDb db, ITFChunkScavengerLog scavengerLog, ITableIndex tableIndex, IReadIndex readIndex, long?maxChunkDataSize = null, bool unsafeIgnoreHardDeletes = false, int threads = 1) { Ensure.NotNull(db, "db"); Ensure.NotNull(scavengerLog, "scavengerLog"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(readIndex, "readIndex"); Ensure.Positive(threads, "threads"); if (threads > MaxThreadCount) { Log.Warning( "{numThreads} scavenging threads not allowed. Max threads allowed for scavenging is {maxThreadCount}. Capping.", threads, MaxThreadCount); threads = MaxThreadCount; } _db = db; _scavengerLog = scavengerLog; _tableIndex = tableIndex; _readIndex = readIndex; _maxChunkDataSize = maxChunkDataSize ?? db.Config.ChunkSize; _unsafeIgnoreHardDeletes = unsafeIgnoreHardDeletes; _threads = threads; }
public StorageScavenger(TFChunkDb db, IReadIndex readIndex) { Ensure.NotNull(db, "db"); Ensure.NotNull(readIndex, "readIndex"); _db = db; _readIndex = readIndex; }
public AllSubscription(IPublisher bus, Position position, bool resolveLinks, IPrincipal user, IReadIndex readIndex, CancellationToken cancellationToken) { if (bus == null) { throw new ArgumentNullException(nameof(bus)); } if (readIndex == null) { throw new ArgumentNullException(nameof(readIndex)); } _bus = bus; _nextPosition = position; _resolveLinks = resolveLinks; _user = user; _readIndex = readIndex; _disposedTokenSource = new CancellationTokenSource(); _buffer = new ConcurrentQueue <ResolvedEvent>(); cancellationToken.Register(_disposedTokenSource.Dispose); }
public StorageWriter(IPublisher bus, ISubscriber subscriber, TFChunkWriter writer, IReadIndex readIndex) { Ensure.NotNull(bus, "bus"); Ensure.NotNull(subscriber, "subscriber"); Ensure.NotNull(writer, "writer"); Ensure.NotNull(readIndex, "readIndex"); Bus = bus; _subscriber = subscriber; ReadIndex = readIndex; _flushDelay = 0; _lastFlush = _watch.ElapsedTicks; Writer = writer; Writer.Open(); _writerBus = new InMemoryBus("StorageWriterBus", watchSlowMsg: true, slowMsgThresholdMs: 500); _storageWriterQueue = new QueuedHandler(_writerBus, "StorageWriterQueue", watchSlowMsg: false); _storageWriterQueue.Start(); SubscribeToMessage <SystemMessage.SystemInit>(); SubscribeToMessage <SystemMessage.BecomeShuttingDown>(); SubscribeToMessage <StorageMessage.WritePrepares>(); SubscribeToMessage <StorageMessage.WriteDelete>(); SubscribeToMessage <StorageMessage.WriteTransactionStart>(); SubscribeToMessage <StorageMessage.WriteTransactionData>(); SubscribeToMessage <StorageMessage.WriteTransactionPrepare>(); SubscribeToMessage <StorageMessage.WriteCommit>(); }
public CatchupAllSubscription(Guid subscriptionId, IPublisher bus, Position position, bool resolveLinks, ClaimsPrincipal user, bool requiresLeader, IReadIndex readIndex, CancellationToken cancellationToken) { if (bus == null) { throw new ArgumentNullException(nameof(bus)); } if (readIndex == null) { throw new ArgumentNullException(nameof(readIndex)); } _subscriptionId = subscriptionId; _bus = bus; _nextPosition = position == Position.End ? Position.FromInt64(readIndex.LastIndexedPosition, readIndex.LastIndexedPosition) : position; _startPosition = position == Position.End ? Position.Start : position; _resolveLinks = resolveLinks; _user = user; _requiresLeader = requiresLeader; _disposedTokenSource = new CancellationTokenSource(); _buffer = new ConcurrentQueue <ResolvedEvent>(); _tokenRegistration = cancellationToken.Register(_disposedTokenSource.Dispose); _currentPosition = _startPosition; Log.Information("Catch-up subscription {subscriptionId} to $all running...", _subscriptionId); }
public AllSubscription(IPublisher bus, Position?startPosition, bool resolveLinks, ClaimsPrincipal user, bool requiresLeader, IReadIndex readIndex, CancellationToken cancellationToken) { if (bus == null) { throw new ArgumentNullException(nameof(bus)); } if (readIndex == null) { throw new ArgumentNullException(nameof(readIndex)); } _subscriptionId = Guid.NewGuid(); _bus = bus; _resolveLinks = resolveLinks; _user = user; _requiresLeader = requiresLeader; _readIndex = readIndex; _cancellationToken = cancellationToken; _subscriptionStarted = new TaskCompletionSource <bool>(); _subscriptionStarted.SetResult(true); _inner = startPosition == Position.End ? (IStreamSubscription) new LiveStreamSubscription(_subscriptionId, _bus, Position.FromInt64(_readIndex.LastIndexedPosition, _readIndex.LastIndexedPosition), _resolveLinks, _user, _requiresLeader, _cancellationToken) : new CatchupAllSubscription(_subscriptionId, bus, startPosition ?? Position.Start, resolveLinks, user, _requiresLeader, readIndex, cancellationToken); }
public void CreateDb(params Rec[] records) { if (DbRes != null) { DbRes.Db.Close(); } var indexDirectory = GetFilePathFor("index"); _logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormatFactory.Create(new() { IndexDirectory = indexDirectory, }); var dbConfig = TFChunkHelper.CreateSizedDbConfig(PathName, 0, chunkSize: 1024 * 1024); var dbHelper = new TFChunkDbCreationHelper <TLogFormat, TStreamId>(dbConfig, _logFormat); DbRes = dbHelper.Chunk(records).CreateDb(); DbRes.Db.Config.WriterCheckpoint.Flush(); DbRes.Db.Config.ChaserCheckpoint.Write(DbRes.Db.Config.WriterCheckpoint.Read()); DbRes.Db.Config.ChaserCheckpoint.Flush(); var readers = new ObjectPool <ITransactionFileReader>( "Readers", 2, 2, () => new TFChunkReader(DbRes.Db, DbRes.Db.Config.WriterCheckpoint)); var lowHasher = _logFormat.LowHasher; var highHasher = _logFormat.HighHasher; var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = new TableIndex <TStreamId>(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV3, MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), PTableVersions.IndexV3, int.MaxValue, Constants.PTableMaxReaderCountDefault, MaxEntriesInMemTable); _logFormat.StreamNamesProvider.SetTableIndex(TableIndex); var readIndex = new ReadIndex <TStreamId>(new NoopPublisher(), readers, TableIndex, _logFormat.StreamNameIndexConfirmer, _logFormat.StreamIds, _logFormat.StreamNamesProvider, _logFormat.EmptyStreamId, _logFormat.StreamIdValidator, _logFormat.StreamIdSizer, _logFormat.StreamExistenceFilter, _logFormat.StreamExistenceFilterReader, 0, additionalCommitChecks: true, metastreamMaxCount: _metastreamMaxCount, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault, skipIndexScanOnReads: Opts.SkipIndexScanOnReadsDefault, replicationCheckpoint: DbRes.Db.Config.ReplicationCheckpoint, indexCheckpoint: DbRes.Db.Config.IndexCheckpoint); readIndex.IndexCommitter.Init(DbRes.Db.Config.ChaserCheckpoint.Read()); ReadIndex = readIndex; }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); var dbConfig = TFChunkHelper.CreateDbConfig(PathName, 0, chunkSize: 1024 * 1024); var dbCreationHelper = new TFChunkDbCreationHelper(dbConfig); _dbResult = CreateDb(dbCreationHelper); _keptRecords = KeptRecords(_dbResult); _dbResult.Db.Config.WriterCheckpoint.Flush(); _dbResult.Db.Config.ChaserCheckpoint.Write(_dbResult.Db.Config.WriterCheckpoint.Read()); _dbResult.Db.Config.ChaserCheckpoint.Flush(); var indexPath = Path.Combine(PathName, "index"); var readerPool = new ObjectPool <ITransactionFileReader>( "ReadIndex readers pool", ESConsts.PTableInitialReaderCount, ESConsts.PTableMaxReaderCount, () => new TFChunkReader(_dbResult.Db, _dbResult.Db.Config.WriterCheckpoint)); var lowHasher = new XXHashUnsafe(); var highHasher = new Murmur3AUnsafe(); var tableIndex = new TableIndex(indexPath, lowHasher, highHasher, () => new HashListMemTable(PTableVersions.IndexV3, maxSize: 200), () => new TFReaderLease(readerPool), PTableVersions.IndexV3, maxSizeForMemory: 100, maxTablesPerLevel: 2); ReadIndex = new ReadIndex(new NoopPublisher(), readerPool, tableIndex, 100, true, _metastreamMaxCount, Opts.HashCollisionReadLimitDefault, Opts.SkipIndexScanOnReadsDefault, _dbResult.Db.Config.ReplicationCheckpoint); ReadIndex.Init(_dbResult.Db.Config.WriterCheckpoint.Read()); var scavenger = new TFChunkScavenger(_dbResult.Db, new FakeTFScavengerLog(), tableIndex, ReadIndex, unsafeIgnoreHardDeletes: UnsafeIgnoreHardDelete()); scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: false).Wait(); }
public ClusterVNodeStartup( ISubsystem[] subsystems, IPublisher mainQueue, ISubscriber mainBus, MultiQueuedHandler httpMessageHandler, IReadOnlyList <IHttpAuthenticationProvider> httpAuthenticationProviders, IAuthorizationProvider authorizationProvider, IReadIndex readIndex, int maxAppendSize, KestrelHttpService httpService) { if (subsystems == null) { throw new ArgumentNullException(nameof(subsystems)); } if (mainQueue == null) { throw new ArgumentNullException(nameof(mainQueue)); } if (httpAuthenticationProviders == null) { throw new ArgumentNullException(nameof(httpAuthenticationProviders)); } if (authorizationProvider == null) { throw new ArgumentNullException(nameof(authorizationProvider)); } if (readIndex == null) { throw new ArgumentNullException(nameof(readIndex)); } Ensure.Positive(maxAppendSize, nameof(maxAppendSize)); if (httpService == null) { throw new ArgumentNullException(nameof(httpService)); } if (mainBus == null) { throw new ArgumentNullException(nameof(mainBus)); } _subsystems = subsystems; _mainQueue = mainQueue; _mainBus = mainBus; _httpMessageHandler = httpMessageHandler; _httpAuthenticationProviders = httpAuthenticationProviders; _authorizationProvider = authorizationProvider; _readIndex = readIndex; _maxAppendSize = maxAppendSize; _httpService = httpService; _statusCheck = new StatusCheck(this); }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); WriterCheckpoint = new InMemoryCheckpoint(0); ChaserCheckpoint = new InMemoryCheckpoint(0); ReplicationCheckpoint = new InMemoryCheckpoint(-1); Db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, WriterCheckpoint, ChaserCheckpoint, replicationCheckpoint: ReplicationCheckpoint)); Db.Open(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); var readers = new ObjectPool <ITransactionFileReader>("Readers", 2, 5, () => new TFChunkReader(Db, Db.Config.WriterCheckpoint)); var lowHasher = new XXHashUnsafe(); var highHasher = new Murmur3AUnsafe(); TableIndex = new TableIndex(GetFilePathFor("index"), lowHasher, highHasher, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), IndexBitnessVersion, int.MaxValue, Constants.PTableMaxReaderCountDefault, MaxEntriesInMemTable); ReadIndex = new ReadIndex(new NoopPublisher(), readers, TableIndex, 0, additionalCommitChecks: PerformAdditionalCommitChecks, metastreamMaxCount: MetastreamMaxCount, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault, skipIndexScanOnReads: Opts.SkipIndexScanOnReadsDefault, replicationCheckpoint: Db.Config.ReplicationCheckpoint); ReadIndex.Init(ChaserCheckpoint.Read()); // scavenge must run after readIndex is built if (_scavenge) { if (_completeLastChunkOnScavenge) { Db.Manager.GetChunk(Db.Manager.ChunksCount - 1).Complete(); } _scavenger = new TFChunkScavenger(Db, new FakeTFScavengerLog(), TableIndex, ReadIndex); await _scavenger.Scavenge(alwaysKeepScavenged : true, mergeChunks : _mergeChunks); } }
public StorageReaderWorker(IReadIndex readIndex, ICheckpoint writerCheckpoint) { Ensure.NotNull(readIndex, "readIndex"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); _readIndex = readIndex; _writerCheckpoint = writerCheckpoint; }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); WriterCheckpoint = new InMemoryCheckpoint(0); ChaserCheckpoint = new InMemoryCheckpoint(0); Db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, WriterCheckpoint, ChaserCheckpoint, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); Db.Open(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); var readers = new ObjectPool <ITransactionFileReader>("Readers", 2, 2, () => new TFChunkReader(Db, Db.Config.WriterCheckpoint)); TableIndex = new TableIndex(GetFilePathFor("index"), () => new HashListMemTable(MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), MaxEntriesInMemTable); var hasher = new ByLengthHasher(); ReadIndex = new ReadIndex(new NoopPublisher(), readers, TableIndex, hasher, 0, additionalCommitChecks: true, metastreamMaxCount: MetastreamMaxCount); ReadIndex.Init(ChaserCheckpoint.Read()); // scavenge must run after readIndex is built if (_scavenge) { if (_completeLastChunkOnScavenge) { Db.Manager.GetChunk(Db.Manager.ChunksCount - 1).Complete(); } _scavenger = new TFChunkScavenger(Db, TableIndex, hasher, ReadIndex); _scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: _mergeChunks); } }
public StorageScavenger(TFChunkDb db, IReadIndex readIndex, bool alwaysKeepScavenged, bool mergeChunks) { Ensure.NotNull(db, "db"); Ensure.NotNull(readIndex, "readIndex"); _db = db; _readIndex = readIndex; _alwaysKeepScavenged = alwaysKeepScavenged; _mergeChunks = mergeChunks; }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); var indexDirectory = GetFilePathFor("index"); _logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormatFactory.Create(new() { IndexDirectory = indexDirectory, }); var dbConfig = TFChunkHelper.CreateSizedDbConfig(PathName, 0, chunkSize: 1024 * 1024); var dbCreationHelper = new TFChunkDbCreationHelper <TLogFormat, TStreamId>(dbConfig, _logFormat); _dbResult = CreateDb(dbCreationHelper); _keptRecords = KeptRecords(_dbResult); _dbResult.Db.Config.WriterCheckpoint.Flush(); _dbResult.Db.Config.ChaserCheckpoint.Write(_dbResult.Db.Config.WriterCheckpoint.Read()); _dbResult.Db.Config.ChaserCheckpoint.Flush(); var readerPool = new ObjectPool <ITransactionFileReader>( "ReadIndex readers pool", Constants.PTableInitialReaderCount, Constants.PTableMaxReaderCountDefault, () => new TFChunkReader(_dbResult.Db, _dbResult.Db.Config.WriterCheckpoint)); var lowHasher = _logFormat.LowHasher; var highHasher = _logFormat.HighHasher; var emptyStreamId = _logFormat.EmptyStreamId; var tableIndex = new TableIndex <TStreamId>(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV3, maxSize: 200), () => new TFReaderLease(readerPool), PTableVersions.IndexV3, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 100, maxTablesPerLevel: 2); _logFormat.StreamNamesProvider.SetTableIndex(tableIndex); var readIndex = new ReadIndex <TStreamId>(new NoopPublisher(), readerPool, tableIndex, _logFormat.StreamNameIndexConfirmer, _logFormat.StreamIds, _logFormat.StreamNamesProvider, _logFormat.EmptyStreamId, _logFormat.StreamIdValidator, _logFormat.StreamIdSizer, _logFormat.StreamExistenceFilter, _logFormat.StreamExistenceFilterReader, _logFormat.EventTypeIndexConfirmer, 100, true, _metastreamMaxCount, Opts.HashCollisionReadLimitDefault, Opts.SkipIndexScanOnReadsDefault, _dbResult.Db.Config.ReplicationCheckpoint, _dbResult.Db.Config.IndexCheckpoint); readIndex.IndexCommitter.Init(_dbResult.Db.Config.WriterCheckpoint.Read()); ReadIndex = readIndex; var scavenger = new TFChunkScavenger <TStreamId>(_dbResult.Db, new FakeTFScavengerLog(), tableIndex, ReadIndex, _logFormat.Metastreams, unsafeIgnoreHardDeletes: UnsafeIgnoreHardDelete()); await scavenger.Scavenge(alwaysKeepScavenged : true, mergeChunks : false); }
public TFChunkScavenger(TFChunkDb db, IReadIndex readIndex, long? maxChunkDataSize = null) { Ensure.NotNull(db, "db"); Ensure.NotNull(readIndex, "readIndex"); _db = db; _readIndex = readIndex; _maxChunkDataSize = maxChunkDataSize ?? db.Config.ChunkSize; }
public TFChunkScavenger(TFChunkDb db, IReadIndex readIndex, long?maxChunkDataSize = null) { Ensure.NotNull(db, "db"); Ensure.NotNull(readIndex, "readIndex"); _db = db; _readIndex = readIndex; _maxChunkDataSize = maxChunkDataSize ?? db.Config.ChunkSize; }
public StorageReaderService( IPublisher bus, ISubscriber subscriber, IReadIndex readIndex, int threadCount, IReadOnlyCheckpoint writerCheckpoint, QueueStatsManager queueStatsManager) { Ensure.NotNull(bus, "bus"); Ensure.NotNull(subscriber, "subscriber"); Ensure.NotNull(readIndex, "readIndex"); Ensure.Positive(threadCount, "threadCount"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); _bus = bus; _readIndex = readIndex; _threadCount = threadCount; StorageReaderWorker[] readerWorkers = new StorageReaderWorker[threadCount]; InMemoryBus[] storageReaderBuses = new InMemoryBus[threadCount]; for (var i = 0; i < threadCount; i++) { readerWorkers[i] = new StorageReaderWorker(bus, readIndex, writerCheckpoint, i); storageReaderBuses[i] = new InMemoryBus("StorageReaderBus", watchSlowMsg: false); storageReaderBuses[i].Subscribe <ClientMessage.ReadEvent>(readerWorkers[i]); storageReaderBuses[i].Subscribe <ClientMessage.ReadStreamEventsBackward>(readerWorkers[i]); storageReaderBuses[i].Subscribe <ClientMessage.ReadStreamEventsForward>(readerWorkers[i]); storageReaderBuses[i].Subscribe <ClientMessage.ReadAllEventsForward>(readerWorkers[i]); storageReaderBuses[i].Subscribe <ClientMessage.ReadAllEventsBackward>(readerWorkers[i]); storageReaderBuses[i].Subscribe <ClientMessage.FilteredReadAllEventsForward>(readerWorkers[i]); storageReaderBuses[i].Subscribe <ClientMessage.FilteredReadAllEventsBackward>(readerWorkers[i]); storageReaderBuses[i].Subscribe <StorageMessage.BatchLogExpiredMessages>(readerWorkers[i]); storageReaderBuses[i].Subscribe <StorageMessage.EffectiveStreamAclRequest>(readerWorkers[i]); storageReaderBuses[i].Subscribe <StorageMessage.StreamIdFromTransactionIdRequest>(readerWorkers[i]); } _workersMultiHandler = new MultiQueuedHandler( _threadCount, queueNum => new QueuedHandlerThreadPool(storageReaderBuses[queueNum], string.Format("StorageReaderQueue #{0}", queueNum + 1), queueStatsManager, groupName: "StorageReaderQueue", watchSlowMsg: true, slowMsgThreshold: TimeSpan.FromMilliseconds(200))); _workersMultiHandler.Start(); subscriber.Subscribe(_workersMultiHandler.WidenFrom <ClientMessage.ReadEvent, Message>()); subscriber.Subscribe(_workersMultiHandler.WidenFrom <ClientMessage.ReadStreamEventsBackward, Message>()); subscriber.Subscribe(_workersMultiHandler.WidenFrom <ClientMessage.ReadStreamEventsForward, Message>()); subscriber.Subscribe(_workersMultiHandler.WidenFrom <ClientMessage.ReadAllEventsForward, Message>()); subscriber.Subscribe(_workersMultiHandler.WidenFrom <ClientMessage.ReadAllEventsBackward, Message>()); subscriber.Subscribe(_workersMultiHandler.WidenFrom <ClientMessage.FilteredReadAllEventsForward, Message>()); subscriber.Subscribe(_workersMultiHandler.WidenFrom <ClientMessage.FilteredReadAllEventsBackward, Message>()); subscriber.Subscribe(_workersMultiHandler.WidenFrom <StorageMessage.BatchLogExpiredMessages, Message>()); subscriber.Subscribe(_workersMultiHandler.WidenFrom <StorageMessage.EffectiveStreamAclRequest, Message>()); subscriber.Subscribe(_workersMultiHandler.WidenFrom <StorageMessage.StreamIdFromTransactionIdRequest, Message>()); }
public StorageReaderWorker(IPublisher publisher, IReadIndex readIndex, ICheckpoint writerCheckpoint) { Ensure.NotNull(publisher, "publisher"); Ensure.NotNull(readIndex, "readIndex"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); _publisher = publisher; _readIndex = readIndex; _writerCheckpoint = writerCheckpoint; }
public SubscriptionsService(IPublisher bus, IQueuedHandler queuedHandler, IReadIndex readIndex) { Ensure.NotNull(bus, "bus"); Ensure.NotNull(queuedHandler, "queuedHandler"); Ensure.NotNull(readIndex, "readIndex"); _bus = bus; _busEnvelope = new PublishEnvelope(bus); _queuedHandler = queuedHandler; _readIndex = readIndex; }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); WriterCheckpoint = new InMemoryCheckpoint(0); ChaserCheckpoint = new InMemoryCheckpoint(0); Db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, WriterCheckpoint, ChaserCheckpoint, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); Db.Open(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); var readers = new ObjectPool<ITransactionFileReader>("Readers", 2, 2, () => new TFChunkReader(Db, Db.Config.WriterCheckpoint)); TableIndex = new TableIndex(GetFilePathFor("index"), () => new HashListMemTable(MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), MaxEntriesInMemTable); var hasher = new ByLengthHasher(); ReadIndex = new ReadIndex(new NoopPublisher(), readers, TableIndex, hasher, 0, additionalCommitChecks: true, metastreamMaxCount: MetastreamMaxCount); ReadIndex.Init(ChaserCheckpoint.Read()); // scavenge must run after readIndex is built if (_scavenge) { if (_completeLastChunkOnScavenge) Db.Manager.GetChunk(Db.Manager.ChunksCount - 1).Complete(); _scavenger = new TFChunkScavenger(Db, TableIndex, hasher, ReadIndex); _scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: _mergeChunks); } }
public Streams(IPublisher publisher, IReadIndex readIndex, int maxAppendSize, IAuthorizationProvider provider) { if (publisher == null) { throw new ArgumentNullException(nameof(publisher)); } _publisher = publisher; _readIndex = readIndex; _maxAppendSize = maxAppendSize; _provider = provider; }
public Streams(IQueuedHandler queue, IReadIndex readIndex, int maxAppendSize) { if (queue == null) { throw new ArgumentNullException(nameof(queue)); } _queue = queue; _readIndex = readIndex; _maxAppendSize = maxAppendSize; }
public TFChunkScavenger(TFChunkDb db, ITableIndex tableIndex, IHasher hasher, IReadIndex readIndex, long?maxChunkDataSize = null) { Ensure.NotNull(db, "db"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(hasher, "hasher"); Ensure.NotNull(readIndex, "readIndex"); _db = db; _tableIndex = tableIndex; _hasher = hasher; _readIndex = readIndex; _maxChunkDataSize = maxChunkDataSize ?? db.Config.ChunkSize; }
public TFChunkScavenger(TFChunkDb db, ITableIndex tableIndex, IHasher hasher, IReadIndex readIndex, long? maxChunkDataSize = null) { Ensure.NotNull(db, "db"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(hasher, "hasher"); Ensure.NotNull(readIndex, "readIndex"); _db = db; _tableIndex = tableIndex; _hasher = hasher; _readIndex = readIndex; _maxChunkDataSize = maxChunkDataSize ?? db.Config.ChunkSize; }
public Streams(IPublisher publisher, IReadIndex <TStreamId> readIndex, int maxAppendSize, TimeSpan writeTimeout, IAuthorizationProvider provider) { if (publisher == null) { throw new ArgumentNullException(nameof(publisher)); } _publisher = publisher; _readIndex = readIndex; _maxAppendSize = maxAppendSize; _writeTimeout = writeTimeout; _provider = provider; }
public StorageChaser(IPublisher masterBus, ITransactionFileChaser chaser, IReadIndex readIndex) { Ensure.NotNull(masterBus, "masterBus"); Ensure.NotNull(chaser, "chaser"); Ensure.NotNull(readIndex, "readIndex"); _masterBus = masterBus; _chaser = chaser; _readIndex = readIndex; _flushDelay = 0; _lastFlush = _watch.ElapsedTicks; }
public StorageReader(IPublisher bus, ISubscriber subscriber, IReadIndex readIndex, int threadCount) { Ensure.NotNull(bus, "bus"); Ensure.NotNull(subscriber, "subscriber"); Ensure.NotNull(readIndex, "readIndex"); Ensure.Positive(threadCount, "threadCount"); _bus = bus; _readIndex = readIndex; _threadCount = threadCount; SetupMessaging(subscriber); }
public TFChunkScavenger(TFChunkDb db, ITFChunkScavengerLog scavengerLog, ITableIndex tableIndex, IReadIndex readIndex, long? maxChunkDataSize = null, bool unsafeIgnoreHardDeletes=false) { Ensure.NotNull(db, "db"); Ensure.NotNull(scavengerLog, "scavengerLog"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(readIndex, "readIndex"); _db = db; _scavengerLog = scavengerLog; _tableIndex = tableIndex; _readIndex = readIndex; _maxChunkDataSize = maxChunkDataSize ?? db.Config.ChunkSize; _unsafeIgnoreHardDeletes = unsafeIgnoreHardDeletes; }
public ClusterVNodeStartup( ISubsystem[] subsystems, IQueuedHandler mainQueue, IAuthenticationProvider internalAuthenticationProvider, IReadIndex readIndex, ClusterVNodeSettings vNodeSettings, KestrelHttpService externalHttpService, KestrelHttpService internalHttpService = null) { if (subsystems == null) { throw new ArgumentNullException(nameof(subsystems)); } if (mainQueue == null) { throw new ArgumentNullException(nameof(mainQueue)); } if (internalAuthenticationProvider == null) { throw new ArgumentNullException(nameof(internalAuthenticationProvider)); } if (readIndex == null) { throw new ArgumentNullException(nameof(readIndex)); } if (vNodeSettings == null) { throw new ArgumentNullException(nameof(vNodeSettings)); } if (externalHttpService == null) { throw new ArgumentNullException(nameof(externalHttpService)); } _subsystems = subsystems; _mainQueue = mainQueue; _internalAuthenticationProvider = internalAuthenticationProvider; _readIndex = readIndex; _vNodeSettings = vNodeSettings; _externalHttpService = externalHttpService; _internalHttpService = internalHttpService; _statusCheck = new StatusCheck(this); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); WriterChecksum = new InMemoryCheckpoint(0); ChaserChecksum = new InMemoryCheckpoint(0); Db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, WriterChecksum, ChaserChecksum, new[] { WriterChecksum, ChaserChecksum })); Db.OpenVerifyAndClean(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterChecksum.Flush(); ChaserChecksum.Write(WriterChecksum.Read()); ChaserChecksum.Flush(); TableIndex = new TableIndex(Path.Combine(PathName, "index"), () => new HashListMemTable(MaxEntriesInMemTable * 2), MaxEntriesInMemTable); var reader = new TFChunkReader(Db, Db.Config.WriterCheckpoint); ReadIndex = new ReadIndex(new NoopPublisher(), 2, () => new TFChunkSequentialReader(Db, Db.Config.WriterCheckpoint, 0), () => reader, TableIndex, new ByLengthHasher(), new NoLRUCache <string, StreamCacheInfo>()); ReadIndex.Build(); // scavenge must run after readIndex is built if (_scavenge) { _scavenger = new TFChunkScavenger(Db, ReadIndex); _scavenger.Scavenge(alwaysKeepScavenged: true); } }
public StorageScavenger(TFChunkDb db, ITableIndex tableIndex, IReadIndex readIndex, ITFChunkScavengerLogManager logManager, bool alwaysKeepScavenged, bool mergeChunks, bool unsafeIgnoreHardDeletes) { Ensure.NotNull(db, "db"); Ensure.NotNull(logManager, "logManager"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(readIndex, "readIndex"); _db = db; _tableIndex = tableIndex; _readIndex = readIndex; _alwaysKeepScavenged = alwaysKeepScavenged; _mergeChunks = mergeChunks; _unsafeIgnoreHardDeletes = unsafeIgnoreHardDeletes; _logManager = logManager; }
public StorageScavenger(TFChunkDb db, ITableIndex tableIndex, IHasher hasher, IReadIndex readIndex, bool alwaysKeepScavenged, bool mergeChunks) { Ensure.NotNull(db, "db"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(hasher, "hasher"); Ensure.NotNull(readIndex, "readIndex"); _db = db; _tableIndex = tableIndex; _hasher = hasher; _readIndex = readIndex; _alwaysKeepScavenged = alwaysKeepScavenged; _mergeChunks = mergeChunks; }
public TFChunkScavenger(TFChunkDb db, IODispatcher ioDispatcher, ITableIndex tableIndex, IReadIndex readIndex, Guid scavengeId, string nodeEndpoint, long? maxChunkDataSize = null, bool unsafeIgnoreHardDeletes=false) { Ensure.NotNull(db, "db"); Ensure.NotNull(ioDispatcher, "ioDispatcher"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(nodeEndpoint, "nodeEndpoint"); Ensure.NotNull(readIndex, "readIndex"); _db = db; _ioDispatcher = ioDispatcher; _tableIndex = tableIndex; _scavengeId = scavengeId; _nodeEndpoint = nodeEndpoint; _readIndex = readIndex; _maxChunkDataSize = maxChunkDataSize ?? db.Config.ChunkSize; _unsafeIgnoreHardDeletes = unsafeIgnoreHardDeletes; }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); var dbConfig = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 1024*1024, 0, new InMemoryCheckpoint(0), new InMemoryCheckpoint(0), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1)); var dbCreationHelper = new TFChunkDbCreationHelper(dbConfig); _dbResult = CreateDb(dbCreationHelper); _keptRecords = KeptRecords(_dbResult); _dbResult.Db.Config.WriterCheckpoint.Flush(); _dbResult.Db.Config.ChaserCheckpoint.Write(_dbResult.Db.Config.WriterCheckpoint.Read()); _dbResult.Db.Config.ChaserCheckpoint.Flush(); var indexPath = Path.Combine(PathName, "index"); var readerPool = new ObjectPool<ITransactionFileReader>( "ReadIndex readers pool", ESConsts.PTableInitialReaderCount, ESConsts.PTableMaxReaderCount, () => new TFChunkReader(_dbResult.Db, _dbResult.Db.Config.WriterCheckpoint)); var lowHasher = new XXHashUnsafe(); var highHasher = new Murmur3AUnsafe(); var tableIndex = new TableIndex(indexPath, lowHasher, highHasher, () => new HashListMemTable(PTableVersions.Index64Bit, maxSize: 200), () => new TFReaderLease(readerPool), PTableVersions.Index64Bit, maxSizeForMemory: 100, maxTablesPerLevel: 2); ReadIndex = new ReadIndex(new NoopPublisher(), readerPool, tableIndex, 100, true, _metastreamMaxCount, Opts.HashCollisionReadLimitDefault); ReadIndex.Init(_dbResult.Db.Config.WriterCheckpoint.Read()); //var scavengeReadIndex = new ScavengeReadIndex(_dbResult.Streams, _metastreamMaxCount); var bus = new InMemoryBus("Bus"); var ioDispatcher = new IODispatcher(bus, new PublishEnvelope(bus)); var scavenger = new TFChunkScavenger(_dbResult.Db, ioDispatcher, tableIndex, ReadIndex, Guid.NewGuid(), "fakeNodeIp", unsafeIgnoreHardDeletes: UnsafeIgnoreHardDelete()); scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: false); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); var dbConfig = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 1024*1024, 0, new InMemoryCheckpoint(0), new InMemoryCheckpoint(0), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1)); var dbCreationHelper = new TFChunkDbCreationHelper(dbConfig); DbRes = CreateDb(dbCreationHelper); DbRes.Db.Config.WriterCheckpoint.Flush(); DbRes.Db.Config.ChaserCheckpoint.Write(DbRes.Db.Config.WriterCheckpoint.Read()); DbRes.Db.Config.ChaserCheckpoint.Flush(); var readers = new ObjectPool<ITransactionFileReader>( "Readers", 2, 2, () => new TFChunkReader(DbRes.Db, DbRes.Db.Config.WriterCheckpoint)); var lowHasher = new XXHashUnsafe(); var highHasher = new Murmur3AUnsafe(); TableIndex = new TableIndex(GetFilePathFor("index"), lowHasher, highHasher, () => new HashListMemTable(PTableVersions.Index64Bit, MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), PTableVersions.Index64Bit, MaxEntriesInMemTable); ReadIndex = new ReadIndex(new NoopPublisher(), readers, TableIndex, 0, additionalCommitChecks: true, metastreamMaxCount: _metastreamMaxCount, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault); ReadIndex.Init(DbRes.Db.Config.ChaserCheckpoint.Read()); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); var dbConfig = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 1024*1024, 0, new InMemoryCheckpoint(0), new InMemoryCheckpoint(0), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1)); var dbCreationHelper = new TFChunkDbCreationHelper(dbConfig); _dbResult = CreateDb(dbCreationHelper); _keptRecords = KeptRecords(_dbResult); _dbResult.Db.Config.WriterCheckpoint.Flush(); _dbResult.Db.Config.ChaserCheckpoint.Write(_dbResult.Db.Config.WriterCheckpoint.Read()); _dbResult.Db.Config.ChaserCheckpoint.Flush(); var indexPath = Path.Combine(PathName, "index"); var readerPool = new ObjectPool<ITransactionFileReader>( "ReadIndex readers pool", ESConsts.PTableInitialReaderCount, ESConsts.PTableMaxReaderCount, () => new TFChunkReader(_dbResult.Db, _dbResult.Db.Config.WriterCheckpoint)); var tableIndex = new TableIndex(indexPath, () => new HashListMemTable(maxSize: 200), () => new TFReaderLease(readerPool), maxSizeForMemory: 100, maxTablesPerLevel: 2); var hasher = new XXHashUnsafe(); ReadIndex = new ReadIndex(new NoopPublisher(), readerPool, tableIndex, hasher, 100, true, _metastreamMaxCount); ReadIndex.Init(_dbResult.Db.Config.WriterCheckpoint.Read()); //var scavengeReadIndex = new ScavengeReadIndex(_dbResult.Streams, _metastreamMaxCount); var scavenger = new TFChunkScavenger(_dbResult.Db, tableIndex, hasher, ReadIndex); scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: false); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); var dbConfig = new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 1024*1024, 0, new InMemoryCheckpoint(0), new InMemoryCheckpoint(0), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1)); var dbCreationHelper = new TFChunkDbCreationHelper(dbConfig); DbRes = CreateDb(dbCreationHelper); DbRes.Db.Config.WriterCheckpoint.Flush(); DbRes.Db.Config.ChaserCheckpoint.Write(DbRes.Db.Config.WriterCheckpoint.Read()); DbRes.Db.Config.ChaserCheckpoint.Flush(); TableIndex = new TableIndex(GetFilePathFor("index"), () => new HashListMemTable(MaxEntriesInMemTable * 2), MaxEntriesInMemTable); ReadIndex = new ReadIndex(new NoopPublisher(), 2, 2, () => new TFChunkReader(DbRes.Db, DbRes.Db.Config.WriterCheckpoint), TableIndex, new ByLengthHasher(), new NoLRUCache<string, StreamCacheInfo>(), additionalCommitChecks: true, metastreamMaxCount: _metastreamMaxCount); ReadIndex.Init(DbRes.Db.Config.WriterCheckpoint.Read(), DbRes.Db.Config.ChaserCheckpoint.Read()); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); WriterChecksum = new InMemoryCheckpoint(0); ChaserChecksum = new InMemoryCheckpoint(0); Db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, WriterChecksum, ChaserChecksum, new[] { WriterChecksum, ChaserChecksum })); Db.OpenVerifyAndClean(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterChecksum.Flush(); ChaserChecksum.Write(WriterChecksum.Read()); ChaserChecksum.Flush(); TableIndex = new TableIndex(Path.Combine(PathName, "index"), () => new HashListMemTable(MaxEntriesInMemTable * 2), MaxEntriesInMemTable); var reader = new TFChunkReader(Db, Db.Config.WriterCheckpoint); ReadIndex = new ReadIndex(new NoopPublisher(), 2, () => new TFChunkSequentialReader(Db, Db.Config.WriterCheckpoint, 0), () => reader, TableIndex, new ByLengthHasher(), new NoLRUCache<string, StreamCacheInfo>()); ReadIndex.Build(); // scavenge must run after readIndex is built if (_scavenge) { _scavenger = new TFChunkScavenger(Db, ReadIndex); _scavenger.Scavenge(alwaysKeepScavenged: true); } }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); WriterCheckpoint = new InMemoryCheckpoint(0); ChaserCheckpoint = new InMemoryCheckpoint(0); Bus = new InMemoryBus("bus"); IODispatcher = new IODispatcher(Bus, new PublishEnvelope(Bus)); Db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, WriterCheckpoint, ChaserCheckpoint, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); Db.Open(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); var readers = new ObjectPool<ITransactionFileReader>("Readers", 2, 5, () => new TFChunkReader(Db, Db.Config.WriterCheckpoint)); var lowHasher = new XXHashUnsafe(); var highHasher = new Murmur3AUnsafe(); TableIndex = new TableIndex(GetFilePathFor("index"), lowHasher, highHasher, () => new HashListMemTable(PTableVersions.Index64Bit, MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), PTableVersions.Index64Bit, MaxEntriesInMemTable); ReadIndex = new ReadIndex(new NoopPublisher(), readers, TableIndex, 0, additionalCommitChecks: true, metastreamMaxCount: MetastreamMaxCount, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault); ReadIndex.Init(ChaserCheckpoint.Read()); // scavenge must run after readIndex is built if (_scavenge) { if (_completeLastChunkOnScavenge) Db.Manager.GetChunk(Db.Manager.ChunksCount - 1).Complete(); _scavenger = new TFChunkScavenger(Db, IODispatcher, TableIndex, ReadIndex, Guid.NewGuid(), "fakeNodeIp"); _scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: _mergeChunks); } }