//TODO GFY add fluent builder for this public TransactionFileDatabaseConfig(string path, string filePrefix, long segmentSize, ICheckpoint writerCheckpoint, IEnumerable<ICheckpoint> namedCheckpoints) { if (path == null) throw new ArgumentNullException("path"); if (filePrefix == null) throw new ArgumentNullException("filePrefix"); if (segmentSize <= 0) throw new ArgumentOutOfRangeException("segmentSize"); if (writerCheckpoint == null) throw new ArgumentNullException("writerCheckpoint"); if (namedCheckpoints == null) throw new ArgumentNullException("namedCheckpoints"); // if ((segmentSize & (segmentSize-1)) != 0) // throw new ArgumentException("Segment size should be the power of 2.", "segmentSize"); Path = path; FilePrefix = filePrefix; FileNamingStrategy = new PrefixFileNamingStrategy(path, filePrefix); SegmentSize = segmentSize; WriterCheckpoint = writerCheckpoint; _namedCheckpoints = namedCheckpoints.ToDictionary(x => x.Name); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); WriterCheckpoint = new InMemoryCheckpoint(0); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); Db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, WriterCheckpoint, new[] {chaserchk})); Db.OpenVerifyAndClean(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); chaserchk.Write(WriterCheckpoint.Read()); chaserchk.Flush(); TableIndex = new TableIndex(Path.Combine(PathName, "index"), () => new HashListMemTable(), _maxEntriesInMemTable); TableIndex.Initialize(); var reader = new TFChunkReader(Db, Db.Config.WriterCheckpoint); ReadIndex = new ReadIndex(new NoopPublisher(), 2, () => new TFChunkSequentialReader(Db, Db.Config.WriterCheckpoint, 0), () => reader, TableIndex, new ByLengthHasher()); ReadIndex.Build(); }
public TFChunkDbConfig(string path, IFileNamingStrategy fileNamingStrategy, int chunkSize, long maxChunksCacheSize, ICheckpoint writerCheckpoint, ICheckpoint chaserCheckpoint, ICheckpoint epochCheckpoint, ICheckpoint truncateCheckpoint, bool inMemDb = false) { Ensure.NotNullOrEmpty(path, "path"); Ensure.NotNull(fileNamingStrategy, "fileNamingStrategy"); Ensure.Positive(chunkSize, "chunkSize"); Ensure.Nonnegative(maxChunksCacheSize, "maxChunksCacheSize"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); Ensure.NotNull(chaserCheckpoint, "chaserCheckpoint"); Ensure.NotNull(epochCheckpoint, "epochCheckpoint"); Ensure.NotNull(truncateCheckpoint, "truncateCheckpoint"); Path = path; ChunkSize = chunkSize; MaxChunksCacheSize = maxChunksCacheSize; WriterCheckpoint = writerCheckpoint; ChaserCheckpoint = chaserCheckpoint; EpochCheckpoint = epochCheckpoint; TruncateCheckpoint = truncateCheckpoint; FileNamingStrategy = fileNamingStrategy; InMemDb = inMemDb; }
public TFChunkDbConfig(string path, IFileNamingStrategy fileNamingStrategy, int chunkSize, int cachedChunkCount, ICheckpoint writerCheckpoint, ICheckpoint chaserCheckpoint, params ICheckpoint[] namedCheckpoints) { Ensure.NotNullOrEmpty(path, "path"); Ensure.NotNull(fileNamingStrategy, "fileNamingStrategy"); Ensure.Positive(chunkSize, "chunkSize"); Ensure.Nonnegative(cachedChunkCount, "cachedChunkCount"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); Ensure.NotNull(chaserCheckpoint, "chaserCheckpoint"); Ensure.NotNull(namedCheckpoints, "namedCheckpoints"); // if ((chunkSize & (chunkSize-1)) != 0) // throw new ArgumentException("Segment size should be the power of 2.", "chunkSize"); Path = path; ChunkSize = chunkSize; CachedChunkCount = cachedChunkCount; WriterCheckpoint = writerCheckpoint; ChaserCheckpoint = chaserCheckpoint; FileNamingStrategy = fileNamingStrategy; _namedCheckpoints = namedCheckpoints.ToDictionary(x => x.Name); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); WriterCheckpoint = new InMemoryCheckpoint(0); ChaserCheckpoint = new InMemoryCheckpoint(0); Bus = new InMemoryBus("bus"); IODispatcher = new IODispatcher(Bus, new PublishEnvelope(Bus)); Db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, WriterCheckpoint, ChaserCheckpoint, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); Db.Open(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); var readers = new ObjectPool<ITransactionFileReader>("Readers", 2, 5, () => new TFChunkReader(Db, Db.Config.WriterCheckpoint)); var lowHasher = new XXHashUnsafe(); var highHasher = new Murmur3AUnsafe(); TableIndex = new TableIndex(GetFilePathFor("index"), lowHasher, highHasher, () => new HashListMemTable(PTableVersions.Index64Bit, MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), PTableVersions.Index64Bit, MaxEntriesInMemTable); ReadIndex = new ReadIndex(new NoopPublisher(), readers, TableIndex, 0, additionalCommitChecks: true, metastreamMaxCount: MetastreamMaxCount, hashCollisionReadLimit: Opts.HashCollisionReadLimitDefault); ReadIndex.Init(ChaserCheckpoint.Read()); // scavenge must run after readIndex is built if (_scavenge) { if (_completeLastChunkOnScavenge) Db.Manager.GetChunk(Db.Manager.ChunksCount - 1).Complete(); _scavenger = new TFChunkScavenger(Db, IODispatcher, TableIndex, ReadIndex, Guid.NewGuid(), "fakeNodeIp"); _scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: _mergeChunks); } }
public TFChunkReader(TFChunkDb db, ICheckpoint checkpoint) { Ensure.NotNull(db, "dbConfig"); Ensure.NotNull(checkpoint, "writerCheckpoint"); _db = db; _checkpoint = checkpoint; }
protected override void Because() { var persistence = new SqlPersistenceFactory("Connection", new BinarySerializer(), new MsSqlDialect()).Build(); _checkpoint = persistence.GetCheckpoint(); }
public SystemStatsHelper(ILogger log, ICheckpoint writerCheckpoint) { Ensure.NotNull(log, "log"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); _log = log; _writerCheckpoint = writerCheckpoint; _perfCounter = new PerfCounterHelper(_log); }
private void ValidateReaderChecksumsMustBeLess(ICheckpoint writerCheckpoint, IEnumerable<ICheckpoint> readerCheckpoints) { var current = writerCheckpoint.Read(); foreach (var checkpoint in readerCheckpoints) { if (checkpoint.Read() > current) throw new CorruptDatabaseException(new ReaderCheckpointHigherThanWriterException(checkpoint.Name)); } }
public TFChunkChaser(TFChunkDb db, ICheckpoint writerCheckpoint, ICheckpoint chaserCheckpoint) { Ensure.NotNull(db, "dbConfig"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); Ensure.NotNull(chaserCheckpoint, "chaserCheckpoint"); _chaserCheckpoint = chaserCheckpoint; _reader = new TFChunkReader(db, writerCheckpoint, _chaserCheckpoint.Read()); }
public TFChunkReader(TFChunkDb db, ICheckpoint writerCheckpoint, long initialPosition = 0) { Ensure.NotNull(db, "dbConfig"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); Ensure.Nonnegative(initialPosition, "initialPosition"); _db = db; _writerCheckpoint = writerCheckpoint; _curPos = initialPosition; }
public TFChunkWriter(TFChunkDb db) { Ensure.NotNull(db, "db"); _db = db; _writerCheckpoint = db.Config.WriterCheckpoint; _currentChunk = db.Manager.GetChunkFor(_writerCheckpoint.Read()); if (_currentChunk == null) throw new InvalidOperationException("No chunk given for existing position."); }
public TFChunkChaser(TFChunkDb db, ICheckpoint writerCheckpoint, ICheckpoint chaserCheckpoint) { Ensure.NotNull(db, "dbConfig"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); Ensure.NotNull(chaserCheckpoint, "chaserCheckpoint"); _db = db; _writerCheckpoint = writerCheckpoint; _chaserCheckpoint = chaserCheckpoint; _curPos = _chaserCheckpoint.Read(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); WriterCheckpoint = new InMemoryCheckpoint(0); ChaserCheckpoint = new InMemoryCheckpoint(0); Db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, WriterCheckpoint, ChaserCheckpoint, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); Db.Open(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); var readers = new ObjectPool<ITransactionFileReader>("Readers", 2, 2, () => new TFChunkReader(Db, Db.Config.WriterCheckpoint)); TableIndex = new TableIndex(GetFilePathFor("index"), () => new HashListMemTable(MaxEntriesInMemTable * 2), () => new TFReaderLease(readers), MaxEntriesInMemTable); var hasher = new ByLengthHasher(); ReadIndex = new ReadIndex(new NoopPublisher(), readers, TableIndex, hasher, 0, additionalCommitChecks: true, metastreamMaxCount: MetastreamMaxCount); ReadIndex.Init(ChaserCheckpoint.Read()); // scavenge must run after readIndex is built if (_scavenge) { if (_completeLastChunkOnScavenge) Db.Manager.GetChunk(Db.Manager.ChunksCount - 1).Complete(); _scavenger = new TFChunkScavenger(Db, TableIndex, hasher, ReadIndex); _scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: _mergeChunks); } }
public MultifileTransactionFileWriter(TransactionFileDatabaseConfig config, Int32 bufferSize) { if (config == null) throw new ArgumentNullException("config"); _config = config; _segmentSize = config.SegmentSize; _writerCheckpoint = config.WriterCheckpoint; _bufferSize = bufferSize; _buffer = new MemoryStream(1024); _bufferWriter = new BinaryWriter(_buffer); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); WriterChecksum = new InMemoryCheckpoint(0); ChaserChecksum = new InMemoryCheckpoint(0); Db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), 10000, 0, WriterChecksum, ChaserChecksum, new[] { WriterChecksum, ChaserChecksum })); Db.OpenVerifyAndClean(); // create db Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterChecksum.Flush(); ChaserChecksum.Write(WriterChecksum.Read()); ChaserChecksum.Flush(); TableIndex = new TableIndex(GetFilePathFor("index"), () => new HashListMemTable(MaxEntriesInMemTable * 2), MaxEntriesInMemTable); var reader = new TFChunkReader(Db, Db.Config.WriterCheckpoint); ReadIndex = new ReadIndex(new NoopPublisher(), 2, () => new TFChunkSequentialReader(Db, Db.Config.WriterCheckpoint, 0), () => reader, TableIndex, new ByLengthHasher(), new NoLRUCache<string, StreamCacheInfo>()); ReadIndex.Build(); // scavenge must run after readIndex is built if (_scavenge) { if (_completeLastChunkOnScavenge) Db.Manager.GetChunk(Db.Manager.ChunksCount - 1).Complete(); _scavenger = new TFChunkScavenger(Db, ReadIndex); _scavenger.Scavenge(alwaysKeepScavenged: true); } }
public MultifileTransactionFileReader(TransactionFileDatabaseConfig config, ICheckpoint checkpoint, int bufferSize) { Ensure.NotNull(config, "config"); Ensure.NotNull(checkpoint, "checkpoint"); _config = config; _segmentSize = config.SegmentSize; _bufferSize = bufferSize; _tmpBuffer = new byte[bufferSize]; _buffer = new MemoryStream(); _bufferReader = new BinaryReader(_buffer); _checkpoint = checkpoint; _lastCheck = _checkpoint.Read(); }
public static TFChunkDbConfig CreateDbConfig(string pathName, ICheckpoint writerCheckpoint, ICheckpoint chaserCheckpoint, int chunkSize = 10000, ICheckpoint replicationCheckpoint = null) { if (replicationCheckpoint == null) { replicationCheckpoint = new InMemoryCheckpoint(-1); } return(new TFChunkDbConfig(pathName, new VersionedPatternFileNamingStrategy(pathName, "chunk-"), chunkSize, 0, writerCheckpoint, chaserCheckpoint, new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1), replicationCheckpoint, Opts.ChunkInitialReaderCountDefault)); }
public TFChunkDbConfig(string path, IFileNamingStrategy fileNamingStrategy, int chunkSize, long maxChunksCacheSize, ICheckpoint writerCheckpoint, ICheckpoint chaserCheckpoint, ICheckpoint epochCheckpoint, ICheckpoint truncateCheckpoint, ICheckpoint replicationCheckpoint, int initialReaderCount, int maxReaderCount, bool inMemDb = false, bool unbuffered = false, bool writethrough = false, bool optimizeReadSideCache = false, bool reduceFileCachePressure = false) { Ensure.NotNullOrEmpty(path, "path"); Ensure.NotNull(fileNamingStrategy, "fileNamingStrategy"); Ensure.Positive(chunkSize, "chunkSize"); Ensure.Nonnegative(maxChunksCacheSize, "maxChunksCacheSize"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); Ensure.NotNull(chaserCheckpoint, "chaserCheckpoint"); Ensure.NotNull(epochCheckpoint, "epochCheckpoint"); Ensure.NotNull(truncateCheckpoint, "truncateCheckpoint"); Ensure.NotNull(replicationCheckpoint, "replicationCheckpoint"); Ensure.Positive(initialReaderCount, "initialReaderCount"); Ensure.Positive(maxReaderCount, "maxReaderCount"); Path = path; ChunkSize = chunkSize; MaxChunksCacheSize = maxChunksCacheSize; WriterCheckpoint = writerCheckpoint; ChaserCheckpoint = chaserCheckpoint; EpochCheckpoint = epochCheckpoint; TruncateCheckpoint = truncateCheckpoint; ReplicationCheckpoint = replicationCheckpoint; FileNamingStrategy = fileNamingStrategy; InMemDb = inMemDb; Unbuffered = unbuffered; WriteThrough = writethrough; InitialReaderCount = initialReaderCount; MaxReaderCount = maxReaderCount; OptimizeReadSideCache = optimizeReadSideCache; ReduceFileCachePressure = reduceFileCachePressure; }
public TFChunkReader(TFChunkDb db, ICheckpoint writerCheckpoint, long initialPosition = 0, bool optimizeReadSideCache = false) { Ensure.NotNull(db, "dbConfig"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); Ensure.Nonnegative(initialPosition, "initialPosition"); _db = db; _writerCheckpoint = writerCheckpoint; _curPos = initialPosition; _optimizeReadSideCache = optimizeReadSideCache; if (_optimizeReadSideCache) { _existsAtOptimizer = TFChunkReaderExistsAtOptimizer.Instance; } }
public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); string dbPath = Path.Combine(PathName, string.Format("mini-node-db-{0}", Guid.NewGuid())); Bus = new InMemoryBus("bus"); IODispatcher = new IODispatcher(Bus, new PublishEnvelope(Bus)); if (!Directory.Exists(dbPath)) { Directory.CreateDirectory(dbPath); } var writerCheckFilename = Path.Combine(dbPath, Checkpoint.Writer + ".chk"); var chaserCheckFilename = Path.Combine(dbPath, Checkpoint.Chaser + ".chk"); WriterCheckpoint = new MemoryMappedFileCheckpoint(writerCheckFilename, Checkpoint.Writer, cached: true); ChaserCheckpoint = new MemoryMappedFileCheckpoint(chaserCheckFilename, Checkpoint.Chaser, cached: true); Db = new TFChunkDb(TFChunkHelper.CreateDbConfig(dbPath, WriterCheckpoint, ChaserCheckpoint, TFConsts.ChunkSize)); Db.Open(); // create DB Writer = new TFChunkWriter(Db); Writer.Open(); WriteTestScenario(); Writer.Close(); Writer = null; WriterCheckpoint.Flush(); ChaserCheckpoint.Write(WriterCheckpoint.Read()); ChaserCheckpoint.Flush(); Db.Close(); // start node with our created DB Node = new MiniNode(PathName, inMemDb: false, dbPath: dbPath); await Node.Start(); try { await Given().WithTimeout(); } catch (Exception ex) { throw new Exception("Given Failed", ex); } }
protected virtual void SetUp() { var dbPath = Path.Combine(Path.GetTempPath(), "EventStoreTests", Guid.NewGuid().ToString()); Directory.CreateDirectory(dbPath); var chunkSize = 256 * 1024 * 1024; var chunksToCache = 2; if (Runtime.IsMono) { _writerChk = new FileCheckpoint(Path.Combine(dbPath, Checkpoint.Writer + ".chk"), Checkpoint.Writer, cached: true); _chaserChk = new FileCheckpoint(Path.Combine(dbPath, Checkpoint.Chaser + ".chk"), Checkpoint.Chaser, cached: true); } else { _writerChk = new MemoryMappedFileCheckpoint(Path.Combine(dbPath, Checkpoint.Writer + ".chk"), Checkpoint.Writer, cached: true); _chaserChk = new MemoryMappedFileCheckpoint(Path.Combine(dbPath, Checkpoint.Chaser + ".chk"), Checkpoint.Chaser, cached: true); } var nodeConfig = new TFChunkDbConfig(dbPath, new VersionedPatternFileNamingStrategy(dbPath, "chunk-"), chunkSize, chunksToCache, _writerChk, new[] { _chaserChk }); var settings = new SingleVNodeSettings(new IPEndPoint(IPAddress.Loopback, 1111), new IPEndPoint(IPAddress.Loopback, 2111), new[] { new IPEndPoint(IPAddress.Loopback, 2111).ToHttpUrl() }); var appsets = new SingleVNodeAppSettings(TimeSpan.FromDays(1)); _db = new TFChunkDb(nodeConfig); _vNode = new SingleVNode(_db, settings, appsets); var startCallback = new EnvelopeCallback <SystemMessage.SystemStart>(); _vNode.Bus.Subscribe <SystemMessage.SystemStart>(startCallback); _vNode.Start(); startCallback.Wait(); }
public ElectionsService(IPublisher publisher, VNodeInfo nodeInfo, int clusterSize, ICheckpoint writerCheckpoint, ICheckpoint chaserCheckpoint, IEpochManager epochManager, Func <long> getLastCommitPosition, int nodePriority, ITimeProvider timeProvider) { Ensure.NotNull(publisher, nameof(publisher)); Ensure.NotNull(nodeInfo, nameof(nodeInfo)); Ensure.Positive(clusterSize, nameof(clusterSize)); Ensure.NotNull(writerCheckpoint, nameof(writerCheckpoint)); Ensure.NotNull(chaserCheckpoint, nameof(chaserCheckpoint)); Ensure.NotNull(epochManager, nameof(epochManager)); Ensure.NotNull(getLastCommitPosition, nameof(getLastCommitPosition)); Ensure.NotNull(timeProvider, nameof(timeProvider)); _publisher = publisher; _nodeInfo = nodeInfo; _publisherEnvelope = new PublishEnvelope(_publisher); _clusterSize = clusterSize; _writerCheckpoint = writerCheckpoint; _chaserCheckpoint = chaserCheckpoint; _epochManager = epochManager; _getLastCommitPosition = getLastCommitPosition; _nodePriority = nodePriority; _timeProvider = timeProvider; var ownInfo = GetOwnInfo(); _servers = new[] { MemberInfo.ForVNode(nodeInfo.InstanceId, _timeProvider.UtcNow, VNodeState.Initializing, true, nodeInfo.InternalTcp, nodeInfo.InternalSecureTcp, nodeInfo.ExternalTcp, nodeInfo.ExternalSecureTcp, nodeInfo.InternalHttp, nodeInfo.ExternalHttp, ownInfo.LastCommitPosition, ownInfo.WriterCheckpoint, ownInfo.ChaserCheckpoint, ownInfo.EpochPosition, ownInfo.EpochNumber, ownInfo.EpochId, ownInfo.NodePriority, nodeInfo.IsReadOnlyReplica) }; }
public StorageReaderService(IPublisher bus, ISubscriber subscriber, IReadIndex readIndex, int threadCount, ICheckpoint writerCheckpoint) { Ensure.NotNull(bus, "bus"); Ensure.NotNull(subscriber, "subscriber"); Ensure.NotNull(readIndex, "readIndex"); Ensure.Positive(threadCount, "threadCount"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); _bus = bus; _readIndex = readIndex; _threadCount = threadCount; StorageReaderWorker[] readerWorkers = new StorageReaderWorker[threadCount]; InMemoryBus[] storageReaderBuses = new InMemoryBus[threadCount]; for (var i = 0; i < threadCount; i++) { readerWorkers[i] = new StorageReaderWorker(bus, readIndex, writerCheckpoint, i); storageReaderBuses[i] = new InMemoryBus("StorageReaderBus", watchSlowMsg: false); storageReaderBuses[i].Subscribe <ClientMessage.ReadEvent>(readerWorkers[i]); storageReaderBuses[i].Subscribe <ClientMessage.ReadStreamEventsBackward>(readerWorkers[i]); storageReaderBuses[i].Subscribe <ClientMessage.ReadStreamEventsForward>(readerWorkers[i]); storageReaderBuses[i].Subscribe <ClientMessage.ReadAllEventsForward>(readerWorkers[i]); storageReaderBuses[i].Subscribe <ClientMessage.ReadAllEventsBackward>(readerWorkers[i]); storageReaderBuses[i].Subscribe <StorageMessage.CheckStreamAccess>(readerWorkers[i]); storageReaderBuses[i].Subscribe <StorageMessage.BatchLogExpiredMessages>(readerWorkers[i]); } _workersMultiHandler = new MultiQueuedHandler( _threadCount, queueNum => new QueuedHandlerThreadPool(storageReaderBuses[queueNum], string.Format("StorageReaderQueue #{0}", queueNum + 1), groupName: "StorageReaderQueue", watchSlowMsg: true, slowMsgThreshold: TimeSpan.FromMilliseconds(200))); _workersMultiHandler.Start(); subscriber.Subscribe(_workersMultiHandler.WidenFrom <ClientMessage.ReadEvent, Message>()); subscriber.Subscribe(_workersMultiHandler.WidenFrom <ClientMessage.ReadStreamEventsBackward, Message>()); subscriber.Subscribe(_workersMultiHandler.WidenFrom <ClientMessage.ReadStreamEventsForward, Message>()); subscriber.Subscribe(_workersMultiHandler.WidenFrom <ClientMessage.ReadAllEventsForward, Message>()); subscriber.Subscribe(_workersMultiHandler.WidenFrom <ClientMessage.ReadAllEventsBackward, Message>()); subscriber.Subscribe(_workersMultiHandler.WidenFrom <StorageMessage.CheckStreamAccess, Message>()); subscriber.Subscribe(_workersMultiHandler.WidenFrom <StorageMessage.BatchLogExpiredMessages, Message>()); }
protected override void AddResult(IResult result) { ICheckpoint[] checkpoints = result.Checkpoints; for (int i = 0, j = checkpoints.Length - 1; i < j; i++) { ICheckpoint checkpoint = checkpoints[i]; if (checkpoint.Error == null && _ignoredCheckpoints.All(name => name != checkpoint.Name)) { string key = "Min: " + checkpoint.Name; TimeSpan momentDiff = checkpoint.Diff(checkpoints[i + 1]); if (_row[key] > momentDiff.TotalMilliseconds) { _row[key] = Convert.ToInt64(momentDiff.TotalMilliseconds); } } } }
public MultifileTransactionFileBulkRetriever(TransactionFileDatabaseConfig config, int bulkSize, int fileBufferSize) { if (config == null) throw new ArgumentNullException("config"); if (fileBufferSize <= 0) throw new ArgumentOutOfRangeException("fileBufferSize"); if (bulkSize > fileBufferSize) throw new ArgumentOutOfRangeException("bulkSize"); _config = config; _bulkSize = bulkSize; _bulkBuffer = new byte[_bulkSize]; _fileBufferSize = fileBufferSize; _segmentSize = config.SegmentSize; _writerCheckpoint = config.WriterCheckpoint; _lastWriterCheck = _writerCheckpoint.Read(); }
void IMetric <IResult> .Add(IResult result) { ICheckpoint[] checkpoints = result.Checkpoints; for (int i = 0, j = checkpoints.Length; i < j; i++) { ICheckpoint checkpoint = checkpoints[i]; if (checkpoint.Error != null) { string key = "Errors: " + checkpoint.Name; _row[key]++; if (_includeTotals) { _row["Errors: Totals"]++; } } } }
public ReadIndex(IPublisher bus, ObjectPool <ITransactionFileReader> readerPool, ITableIndex <TStreamId> tableIndex, IStreamIdLookup <TStreamId> streamIds, IStreamNamesProvider <TStreamId> streamNamesProvider, TStreamId emptyStreamName, IValidator <TStreamId> streamIdValidator, ISizer <TStreamId> sizer, int streamInfoCacheCapacity, bool additionalCommitChecks, long metastreamMaxCount, int hashCollisionReadLimit, bool skipIndexScanOnReads, IReadOnlyCheckpoint replicationCheckpoint, ICheckpoint indexCheckpoint) { Ensure.NotNull(bus, "bus"); Ensure.NotNull(readerPool, "readerPool"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(streamIds, nameof(streamIds)); Ensure.NotNull(streamNamesProvider, nameof(streamNamesProvider)); Ensure.NotNull(streamIdValidator, nameof(streamIdValidator)); Ensure.NotNull(sizer, nameof(sizer)); Ensure.Nonnegative(streamInfoCacheCapacity, "streamInfoCacheCapacity"); Ensure.Positive(metastreamMaxCount, "metastreamMaxCount"); Ensure.NotNull(replicationCheckpoint, "replicationCheckpoint"); Ensure.NotNull(indexCheckpoint, "indexCheckpoint"); var metastreamMetadata = new StreamMetadata(maxCount: metastreamMaxCount); var indexBackend = new IndexBackend <TStreamId>(readerPool, streamInfoCacheCapacity, streamInfoCacheCapacity); _indexReader = new IndexReader <TStreamId>(indexBackend, tableIndex, streamNamesProvider, streamIdValidator, metastreamMetadata, hashCollisionReadLimit, skipIndexScanOnReads); _streamIds = streamIds; _streamNames = streamNamesProvider.StreamNames; var systemStreams = streamNamesProvider.SystemStreams; _indexWriter = new IndexWriter <TStreamId>(indexBackend, _indexReader, _streamIds, _streamNames, systemStreams, emptyStreamName, sizer); _indexCommitter = new IndexCommitter <TStreamId>(bus, indexBackend, _indexReader, tableIndex, _streamNames, systemStreams, indexCheckpoint, additionalCommitChecks); _allReader = new AllReader <TStreamId>(indexBackend, _indexCommitter, _streamNames); }
public ElectionsService(IPublisher publisher, VNodeInfo nodeInfo, int clusterSize, ICheckpoint writerCheckpoint, ICheckpoint chaserCheckpoint, IEpochManager epochManager, Func <long> getLastCommitPosition, int nodePriority) { Ensure.NotNull(publisher, "publisher"); Ensure.NotNull(nodeInfo, "nodeInfo"); Ensure.Positive(clusterSize, "clusterSize"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); Ensure.NotNull(chaserCheckpoint, "chaserCheckpoint"); Ensure.NotNull(epochManager, "epochManager"); Ensure.NotNull(getLastCommitPosition, "getLastCommitPosition"); _publisher = publisher; _nodeInfo = nodeInfo; _publisherEnvelope = new PublishEnvelope(_publisher); _clusterSize = clusterSize; _writerCheckpoint = writerCheckpoint; _chaserCheckpoint = chaserCheckpoint; _epochManager = epochManager; _getLastCommitPosition = getLastCommitPosition; _nodePriority = nodePriority; var ownInfo = GetOwnInfo(); _servers = new[] { MemberInfo.ForVNode(nodeInfo.InstanceId, DateTime.UtcNow, VNodeState.Initializing, true, nodeInfo.InternalTcp, nodeInfo.InternalSecureTcp, nodeInfo.ExternalTcp, nodeInfo.ExternalSecureTcp, nodeInfo.InternalHttp, nodeInfo.ExternalHttp, ownInfo.LastCommitPosition, ownInfo.WriterCheckpoint, ownInfo.ChaserCheckpoint, ownInfo.EpochPosition, ownInfo.EpochNumber, ownInfo.EpochId, ownInfo.NodePriority) }; }
public StorageChaser(IPublisher masterBus, ICheckpoint writerCheckpoint, ITransactionFileChaser chaser, IReadIndex readIndex, IEpochManager epochManager) { Ensure.NotNull(masterBus, "masterBus"); Ensure.NotNull(chaser, "chaser"); Ensure.NotNull(readIndex, "readIndex"); Ensure.NotNull(epochManager, "epochManager"); _masterBus = masterBus; _writerCheckpoint = writerCheckpoint; _chaser = chaser; _readIndex = readIndex; _epochManager = epochManager; _flushDelay = 0; _lastFlush = _watch.ElapsedTicks; }
public void Add(IResult result) { ICheckpoint previousCheckpoint = BlankCheckpoint; foreach (ICheckpoint checkpoint in result.Checkpoints) { if (_ignoredCheckpoints.All(name => name != checkpoint.Name)) { string key = "Max: " + checkpoint.Name; TimeSpan momentDiff = TimeSpan.FromTicks(checkpoint.TimePoint.Ticks - previousCheckpoint.TimePoint.Ticks); if (_row[key] < momentDiff.TotalMilliseconds) { _row[key] = Convert.ToInt64(momentDiff.TotalMilliseconds); } previousCheckpoint = checkpoint; } } }
// verify if a checkpoint is valid when triggered public bool TriggerCheckpoint(ICheckpoint checkpoint) { if (current == checkpoint || deviated) { return(false); } else if ( current == null && checkpoint.IsStart() || current != null && (checkpoints[checkpoint] == checkpoints[current] + 1 || checkpoints[current] == checkpoints.Count - 1 && checkpoint.IsStart())) { current = checkpoint; return(true); } else { Deviated(); return(false); } }
protected virtual void SetUp() { var dbPath = Path.Combine(Path.GetTempPath(), "EventStoreTests", Guid.NewGuid().ToString()); Directory.CreateDirectory(dbPath); var chunkSize = 256*1024*1024; var chunksToCache = 2; if (Runtime.IsMono) { _writerChk = new FileCheckpoint(Path.Combine(dbPath, Checkpoint.Writer + ".chk"), Checkpoint.Writer, cached: true); _chaserChk = new FileCheckpoint(Path.Combine(dbPath, Checkpoint.Chaser + ".chk"), Checkpoint.Chaser, cached: true); } else { _writerChk = new MemoryMappedFileCheckpoint(Path.Combine(dbPath, Checkpoint.Writer + ".chk"), Checkpoint.Writer, cached: true); _chaserChk = new MemoryMappedFileCheckpoint(Path.Combine(dbPath, Checkpoint.Chaser + ".chk"), Checkpoint.Chaser, cached: true); } var nodeConfig = new TFChunkDbConfig(dbPath, new VersionedPatternFileNamingStrategy(dbPath, "chunk-"), chunkSize, chunksToCache, _writerChk, new[] {_chaserChk}); var settings = new SingleVNodeSettings(new IPEndPoint(IPAddress.Loopback, 1111), new IPEndPoint(IPAddress.Loopback, 2111), new[] {new IPEndPoint(IPAddress.Loopback, 2111).ToHttpUrl()}); var appsets = new SingleVNodeAppSettings(TimeSpan.FromDays(1)); _db = new TFChunkDb(nodeConfig); _vNode = new SingleVNode(_db, settings, appsets); var startCallback = new EnvelopeCallback<SystemMessage.SystemStart>(); _vNode.Bus.Subscribe<SystemMessage.SystemStart>(startCallback); _vNode.Start(); startCallback.Wait(); }
public EpochManager(int cachedEpochCount, ICheckpoint checkpoint, ITransactionFileWriter writer, int initialReaderCount, int maxReaderCount, Func<ITransactionFileReader> readerFactory) { Ensure.Nonnegative(cachedEpochCount, "cachedEpochCount"); Ensure.NotNull(checkpoint, "checkpoint"); Ensure.NotNull(writer, "chunkWriter"); Ensure.Nonnegative(initialReaderCount, "initialReaderCount"); Ensure.Positive(maxReaderCount, "maxReaderCount"); if (initialReaderCount > maxReaderCount) throw new ArgumentOutOfRangeException("initialReaderCount", "initialReaderCount is greater than maxReaderCount."); Ensure.NotNull(readerFactory, "readerFactory"); CachedEpochCount = cachedEpochCount; _checkpoint = checkpoint; _readers = new ObjectPool<ITransactionFileReader>("EpochManager readers pool", initialReaderCount, maxReaderCount, readerFactory); _writer = writer; }
public NodeGossipService(IPublisher bus, IGossipSeedSource gossipSeedSource, VNodeInfo nodeInfo, ICheckpoint writerCheckpoint, ICheckpoint chaserCheckpoint, IEpochManager epochManager, Func<long> getLastCommitPosition, int nodePriority) : base(bus, gossipSeedSource, nodeInfo) { Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); Ensure.NotNull(chaserCheckpoint, "chaserCheckpoint"); Ensure.NotNull(epochManager, "epochManager"); Ensure.NotNull(getLastCommitPosition, "getLastCommitPosition"); _writerCheckpoint = writerCheckpoint; _chaserCheckpoint = chaserCheckpoint; _epochManager = epochManager; _getLastCommitPosition = getLastCommitPosition; _nodePriority = nodePriority; }
public override void OnCheckPointEvent(ICheckpoint checkpoint, IEntity entity, bool state) { base.OnCheckPointEvent(checkpoint, entity, state); if (!CheckpointAsyncEventHandler.HasEvents()) { return; } var checkpointReference = new CheckpointRef(checkpoint); var entityReference = new BaseObjectRef(entity); Task.Run(async() => { checkpointReference.DebugCountUp(); entityReference.DebugCountUp(); await CheckpointAsyncEventHandler.CallAsync(@delegate => @delegate(checkpoint, entity, state)); entityReference.DebugCountDown(); checkpointReference.DebugCountDown(); entityReference.Dispose(); checkpointReference.Dispose(); }); }
public StorageChaser(IPublisher masterBus, ICheckpoint writerCheckpoint, ITransactionFileChaser chaser, IIndexCommitter indexCommitter, IEpochManager epochManager) { Ensure.NotNull(masterBus, "masterBus"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); Ensure.NotNull(chaser, "chaser"); Ensure.NotNull(indexCommitter, "indexCommitter"); Ensure.NotNull(epochManager, "epochManager"); _masterBus = masterBus; _writerCheckpoint = writerCheckpoint; _chaser = chaser; _indexCommitter = indexCommitter; _epochManager = epochManager; _flushDelay = 0; _lastFlush = _watch.ElapsedTicks; }
public NodeGossipService(IPublisher bus, IGossipSeedSource gossipSeedSource, VNodeInfo nodeInfo, ICheckpoint writerCheckpoint, ICheckpoint chaserCheckpoint, IEpochManager epochManager, Func <long> getLastCommitPosition, int nodePriority) : base(bus, gossipSeedSource, nodeInfo) { Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); Ensure.NotNull(chaserCheckpoint, "chaserCheckpoint"); Ensure.NotNull(epochManager, "epochManager"); Ensure.NotNull(getLastCommitPosition, "getLastCommitPosition"); _writerCheckpoint = writerCheckpoint; _chaserCheckpoint = chaserCheckpoint; _epochManager = epochManager; _getLastCommitPosition = getLastCommitPosition; _nodePriority = nodePriority; }
public ProjectionManager(IPublisher coreOutput, ICheckpoint checkpointForStatistics) { if (coreOutput == null) { throw new ArgumentNullException("coreOutput"); } _writeDispatcher = new RequestResponseDispatcher <ClientMessage.WriteEvents, ClientMessage.WriteEventsCompleted>( coreOutput, v => v.CorrelationId, v => v.CorrelationId); _readDispatcher = new RequestResponseDispatcher <ClientMessage.ReadEventsBackwards, ClientMessage.ReadEventsBackwardsCompleted>( coreOutput, v => v.CorrelationId, v => v.CorrelationId); _coreOutput = coreOutput; _checkpointForStatistics = checkpointForStatistics; _projectionStateHandlerFactory = new ProjectionStateHandlerFactory(); _projections = new Dictionary <string, ManagedProjection>(); _projectionsMap = new Dictionary <Guid, string>(); }
public TableIndex(string directory, Func<IMemTable> memTableFactory, ICheckpoint commitCheckpoint, int maxSizeForMemory = 1000000, int maxTablesPerLevel = 4, bool additionalReclaim = false) { Ensure.NotNullOrEmpty(directory, "directory"); Ensure.NotNull(memTableFactory, "memTableFactory"); Ensure.NotNull(commitCheckpoint, "CommitCheckpoint"); if (maxTablesPerLevel <= 1) throw new ArgumentOutOfRangeException("maxTablesPerLevel"); _directory = directory; _memTableFactory = memTableFactory; _fileNameProvider = new GuidFilenameProvider(directory); _commitCheckpoint = commitCheckpoint; _maxSizeForMemory = maxSizeForMemory; _maxTablesPerLevel = maxTablesPerLevel; _additionalReclaim = additionalReclaim; _awaitingMemTables = new List<TableItem> { new TableItem(_memTableFactory(), -1, -1) }; }
public EpochManager(IPublisher bus, int cachedEpochCount, ICheckpoint checkpoint, ITransactionFileWriter writer, int initialReaderCount, int maxReaderCount, Func <ITransactionFileReader> readerFactory, IRecordFactory <TStreamId> recordFactory, INameIndex <TStreamId> streamNameIndex, INameIndex <TStreamId> eventTypeIndex, IPartitionManager partitionManager, Guid instanceId) { Ensure.NotNull(bus, "bus"); Ensure.Nonnegative(cachedEpochCount, "cachedEpochCount"); Ensure.NotNull(checkpoint, "checkpoint"); Ensure.NotNull(writer, "chunkWriter"); Ensure.Nonnegative(initialReaderCount, "initialReaderCount"); Ensure.Positive(maxReaderCount, "maxReaderCount"); if (initialReaderCount > maxReaderCount) { throw new ArgumentOutOfRangeException(nameof(initialReaderCount), "initialReaderCount is greater than maxReaderCount."); } Ensure.NotNull(readerFactory, "readerFactory"); _bus = bus; _cacheSize = cachedEpochCount; _checkpoint = checkpoint; _readers = new ObjectPool <ITransactionFileReader>("EpochManager readers pool", initialReaderCount, maxReaderCount, readerFactory); _writer = writer; _recordFactory = recordFactory; _streamNameIndex = streamNameIndex; _eventTypeIndex = eventTypeIndex; _partitionManager = partitionManager; _instanceId = instanceId; }
public StorageChaser(IPublisher leaderBus, ICheckpoint writerCheckpoint, ITransactionFileChaser chaser, IIndexCommitterService indexCommitterService, IEpochManager epochManager, QueueStatsManager queueStatsManager) { Ensure.NotNull(leaderBus, "leaderBus"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); Ensure.NotNull(chaser, "chaser"); Ensure.NotNull(indexCommitterService, "indexCommitterService"); Ensure.NotNull(epochManager, "epochManager"); _leaderBus = leaderBus; _writerCheckpoint = writerCheckpoint; _chaser = chaser; _indexCommitterService = indexCommitterService; _epochManager = epochManager; _queueStats = queueStatsManager.CreateQueueStatsCollector("Storage Chaser"); _flushDelay = 0; _lastFlush = _watch.ElapsedTicks; }
public EpochManager(int cachedEpochCount, ICheckpoint checkpoint, ITransactionFileWriter writer, int initialReaderCount, int maxReaderCount, Func <ITransactionFileReader> readerFactory) { Ensure.Nonnegative(cachedEpochCount, "cachedEpochCount"); Ensure.NotNull(checkpoint, "checkpoint"); Ensure.NotNull(writer, "chunkWriter"); Ensure.Nonnegative(initialReaderCount, "initialReaderCount"); Ensure.Positive(maxReaderCount, "maxReaderCount"); if (initialReaderCount > maxReaderCount) { throw new ArgumentOutOfRangeException("initialReaderCount", "initialReaderCount is greater than maxReaderCount."); } Ensure.NotNull(readerFactory, "readerFactory"); CachedEpochCount = cachedEpochCount; _checkpoint = checkpoint; _readers = new ObjectPool <ITransactionFileReader>("EpochManager readers pool", initialReaderCount, maxReaderCount, readerFactory); _writer = writer; }
public IndexCommitterService( IIndexCommitter indexCommitter, IPublisher publisher, ICheckpoint writerCheckpoint, ICheckpoint replicationCheckpoint, int commitCount, ITableIndex tableIndex, QueueStatsManager queueStatsManager) { Ensure.NotNull(indexCommitter, nameof(indexCommitter)); Ensure.NotNull(publisher, nameof(publisher)); Ensure.NotNull(writerCheckpoint, nameof(writerCheckpoint)); Ensure.NotNull(replicationCheckpoint, nameof(replicationCheckpoint)); Ensure.Positive(commitCount, nameof(commitCount)); _indexCommitter = indexCommitter; _publisher = publisher; _writerCheckpoint = writerCheckpoint; _replicationCheckpoint = replicationCheckpoint; _commitCount = commitCount; _tableIndex = tableIndex; _queueStats = queueStatsManager.CreateQueueStatsCollector("Index Committer"); }
public ProjectionManager(IPublisher inputQueue, IPublisher publisher, IPublisher[] queues, ICheckpoint checkpointForStatistics) { if (inputQueue == null) { throw new ArgumentNullException("inputQueue"); } if (publisher == null) { throw new ArgumentNullException("publisher"); } if (queues == null) { throw new ArgumentNullException("queues"); } if (queues.Length == 0) { throw new ArgumentException("queues"); } _inputQueue = inputQueue; _publisher = publisher; _checkpointForStatistics = checkpointForStatistics; _queues = queues; _writeDispatcher = new RequestResponseDispatcher <ClientMessage.WriteEvents, ClientMessage.WriteEventsCompleted>( publisher, v => v.CorrelationId, v => v.CorrelationId, new PublishEnvelope(_inputQueue)); _readDispatcher = new RequestResponseDispatcher <ClientMessage.ReadStreamEventsBackward, ClientMessage.ReadStreamEventsBackwardCompleted>( publisher, v => v.CorrelationId, v => v.CorrelationId, new PublishEnvelope(_inputQueue)); _projectionStateHandlerFactory = new ProjectionStateHandlerFactory(); _projections = new Dictionary <string, ManagedProjection>(); _projectionsMap = new Dictionary <Guid, string>(); }
//TODO GFY add fluent builder for this public TransactionFileDatabaseConfig(string path, string filePrefix, long segmentSize, ICheckpoint writerCheckpoint, IEnumerable <ICheckpoint> namedCheckpoints) { if (path == null) { throw new ArgumentNullException("path"); } if (filePrefix == null) { throw new ArgumentNullException("filePrefix"); } if (segmentSize <= 0) { throw new ArgumentOutOfRangeException("segmentSize"); } if (writerCheckpoint == null) { throw new ArgumentNullException("writerCheckpoint"); } if (namedCheckpoints == null) { throw new ArgumentNullException("namedCheckpoints"); } // if ((segmentSize & (segmentSize-1)) != 0) // throw new ArgumentException("Segment size should be the power of 2.", "segmentSize"); Path = path; FilePrefix = filePrefix; FileNamingStrategy = new PrefixFileNamingStrategy(path, filePrefix); SegmentSize = segmentSize; WriterCheckpoint = writerCheckpoint; _namedCheckpoints = namedCheckpoints.ToDictionary(x => x.Name); }
public TFChunkDbConfig(string path, IFileNamingStrategy fileNamingStrategy, int chunkSize, int cachedChunkCount, ICheckpoint writerCheckpoint, IEnumerable <ICheckpoint> namedCheckpoints) { Ensure.NotNullOrEmpty(path, "path"); Ensure.NotNull(fileNamingStrategy, "fileNamingStrategy"); Ensure.Positive(chunkSize, "chunkSize"); Ensure.Nonnegative(cachedChunkCount, "cachedChunkCount"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); Ensure.NotNull(namedCheckpoints, "namedCheckpoints"); // if ((chunkSize & (chunkSize-1)) != 0) // throw new ArgumentException("Segment size should be the power of 2.", "chunkSize"); Path = path; ChunkSize = chunkSize; CachedChunkCount = cachedChunkCount; WriterCheckpoint = writerCheckpoint; FileNamingStrategy = fileNamingStrategy; _namedCheckpoints = namedCheckpoints.ToDictionary(x => x.Name); }
public MultifileTransactionFileBulkRetriever(TransactionFileDatabaseConfig config, int bulkSize, int fileBufferSize) { if (config == null) { throw new ArgumentNullException("config"); } if (fileBufferSize <= 0) { throw new ArgumentOutOfRangeException("fileBufferSize"); } if (bulkSize > fileBufferSize) { throw new ArgumentOutOfRangeException("bulkSize"); } _config = config; _bulkSize = bulkSize; _bulkBuffer = new byte[_bulkSize]; _fileBufferSize = fileBufferSize; _segmentSize = config.SegmentSize; _writerCheckpoint = config.WriterCheckpoint; _lastWriterCheck = _writerCheckpoint.Read(); }
public void AggregateCheckpoint(TimeSpan momentDuration, ICheckpoint checkpoint) { Count++; if (checkpoint.Error != null) Errors.Add(checkpoint.Error); SummedMomentTime += momentDuration; SummedTotalTime += checkpoint.TimePoint; if (MomentMin > momentDuration) MomentMin = momentDuration; if (MomentMax < momentDuration) MomentMax = momentDuration; if (TotalMin > checkpoint.TimePoint) TotalMin = checkpoint.TimePoint; if (TotalMax < checkpoint.TimePoint) TotalMax = checkpoint.TimePoint; }
public MultifileTransactionFileReader(TransactionFileDatabaseConfig config, ICheckpoint checkpoint) : this(config, checkpoint, 8096) { }
private TFChunkDbConfig CreateOneTimeDbConfig(int chunkSize, string dbPath, int chunksToCache) { if (Runtime.IsMono) { _writerChk = new FileCheckpoint(Path.Combine(dbPath, Checkpoint.Writer + ".chk"), Checkpoint.Writer, cached: true); _chaserChk = new FileCheckpoint(Path.Combine(dbPath, Checkpoint.Chaser + ".chk"), Checkpoint.Chaser, cached: true); } else { _writerChk = new MemoryMappedFileCheckpoint(Path.Combine(dbPath, Checkpoint.Writer + ".chk"), Checkpoint.Writer, cached: true); _chaserChk = new MemoryMappedFileCheckpoint(Path.Combine(dbPath, Checkpoint.Chaser + ".chk"), Checkpoint.Chaser, cached: true); } var nodeConfig = new TFChunkDbConfig(dbPath, new VersionedPatternFileNamingStrategy(dbPath, "chunk-"), chunkSize, chunksToCache, _writerChk, _chaserChk, new[] {_writerChk, _chaserChk}); return nodeConfig; }
public ICommit Commit(CommitAttempt attempt, ICheckpoint checkpoint) { lock (_commits) { DetectDuplicate(attempt); var commit = new InMemoryCommit(attempt.BucketId, attempt.StreamId, attempt.StreamRevision, attempt.CommitId, attempt.CommitSequence, attempt.CommitStamp, checkpoint.Value, attempt.Headers, attempt.Events, checkpoint); if (_potentialConflicts.Contains(new IdentityForConcurrencyConflictDetection(commit))) { throw new ConcurrencyException(); } _stamps[commit.CommitId] = commit.CommitStamp; _commits.Add(commit); _potentialDuplicates.Add(new IdentityForDuplicationDetection(commit)); _potentialConflicts.Add(new IdentityForConcurrencyConflictDetection(commit)); _undispatched.Add(commit); IStreamHead head = _heads.FirstOrDefault(x => x.StreamId == commit.StreamId); _heads.Remove(head); Logger.Debug(Resources.UpdatingStreamHead, commit.StreamId); int snapshotRevision = head == null ? 0 : head.SnapshotRevision; _heads.Add(new StreamHead(commit.BucketId, commit.StreamId, commit.StreamRevision, snapshotRevision)); return commit; } }
public IEnumerable<ICommit> GetFrom(ICheckpoint checkpoint) { InMemoryCommit startingCommit = _commits.FirstOrDefault(x => x.Checkpoint.CompareTo(checkpoint) == 0); return _commits.Skip(_commits.IndexOf(startingCommit) + 1 /* GetFrom => after the checkpoint*/); }
public InMemoryCommit( string bucketId, string streamId, int streamRevision, Guid commitId, int commitSequence, DateTime commitStamp, string checkpointToken, IDictionary<string, object> headers, IEnumerable<EventMessage> events, ICheckpoint checkpoint) : base(bucketId, streamId, streamRevision, commitId, commitSequence, commitStamp, checkpointToken, headers, events) { _checkpoint = checkpoint; }
private ProjectionManagerNode(IPublisher inputQueue, IPublisher[] queues, ICheckpoint checkpointForStatistics) { _output = new InMemoryBus("ProjectionManagerOutput"); _projectionManager = new ProjectionManager(inputQueue, _output, queues, checkpointForStatistics); }
public static Task <bool> IsVehicleInAsync(this ICheckpoint checkpoint, IVehicle vehicle) => AltVAsync.Schedule(() => checkpoint.IsVehicleIn(vehicle));
public static Task <bool> IsEntityInAsync(this ICheckpoint checkpoint, IEntity entity) => AltVAsync.Schedule(() => checkpoint.IsEntityIn(entity));
public static Task RemoveAsync(this ICheckpoint checkpoint) => AltVAsync.Schedule(checkpoint.Remove);
public IndexCommitterService(IIndexCommitter indexCommitter, IPublisher publisher, ICheckpoint replicationCheckpoint, ICheckpoint writerCheckpoint, int commitCount) { Ensure.NotNull(indexCommitter, "indexCommitter"); Ensure.NotNull(publisher, "publisher"); Ensure.NotNull(replicationCheckpoint, "replicationCheckpoint"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); Ensure.Positive(commitCount, "commitCount"); _indexCommitter = indexCommitter; _publisher = publisher; _replicationCheckpoint = replicationCheckpoint; _writerCheckpoint = writerCheckpoint; _commitCount = commitCount; }
public MultifileTransactionFileChaser(TransactionFileDatabaseConfig config, string checksumName, int bufferSize) { if (config == null) throw new ArgumentNullException("config"); _config = config; _segmentSize = config.SegmentSize; _checksumName = checksumName; _bufferSize = bufferSize; _tmpBuffer = new byte[bufferSize]; _buffer = new MemoryStream(); _bufferReader = new BinaryReader(_buffer); if (_checksumName != null) { _myReaderCheckpoint = config.GetNamedCheckpoint(checksumName); if (_myReaderCheckpoint == null) throw new ArgumentException("checksumName"); } else { _myReaderCheckpoint = new InMemoryCheckpoint(0); } }