public ReadIndex(IPublisher bus, int initialReaderCount, int maxReaderCount, Func<ITransactionFileReader> readerFactory, ITableIndex tableIndex, IHasher hasher, ILRUCache<string, StreamCacheInfo> streamInfoCache, bool additionalCommitChecks, int metastreamMaxCount) { Ensure.NotNull(bus, "bus"); Ensure.Positive(initialReaderCount, "initialReaderCount"); Ensure.Positive(maxReaderCount, "maxReaderCount"); if (initialReaderCount > maxReaderCount) throw new ArgumentOutOfRangeException("initialReaderCount", "initialReaderCount is greater than maxReaderCount."); Ensure.NotNull(readerFactory, "readerFactory"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(hasher, "hasher"); Ensure.NotNull(streamInfoCache, "streamInfoCache"); Ensure.Positive(metastreamMaxCount, "metastreamMaxCount"); _tableIndex = tableIndex; _hasher = hasher; _bus = bus; _streamInfoCache = streamInfoCache; _readers = new ObjectPool<ITransactionFileReader>("ReadIndex readers pool", initialReaderCount, maxReaderCount, readerFactory); _additionalCommitChecks = additionalCommitChecks; _metastreamMetadata = new StreamMetadata(metastreamMaxCount, null, null, null); }
public ReadIndex(IPublisher bus, int readerCount, Func<ITransactionFileSequentialReader> seqReaderFactory, Func<ITransactionFileReader> readerFactory, ITableIndex tableIndex, IHasher hasher, ILRUCache<string, StreamCacheInfo> streamInfoCache) { Ensure.NotNull(bus, "bus"); Ensure.Positive(readerCount, "readerCount"); Ensure.NotNull(seqReaderFactory, "seqReaderFactory"); Ensure.NotNull(readerFactory, "readerFactory"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(hasher, "hasher"); Ensure.NotNull(streamInfoCache, "streamInfoCache"); _bus = bus; _tableIndex = tableIndex; _hasher = hasher; _streamInfoCache = streamInfoCache; for (int i = 0; i < readerCount; ++i) { _seqReaders.Push(seqReaderFactory()); _readers.Push(readerFactory()); } }
public IndexCommitter(IPublisher bus, IIndexBackend backend, IIndexReader indexReader, ITableIndex tableIndex, bool additionalCommitChecks) { _bus = bus; _backend = backend; _indexReader = indexReader; _tableIndex = tableIndex; _additionalCommitChecks = additionalCommitChecks; }
public IndexReader(IIndexBackend backend, ITableIndex tableIndex, StreamMetadata metastreamMetadata, int hashCollisionReadLimit) { Ensure.NotNull(backend, "backend"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(metastreamMetadata, "metastreamMetadata"); _backend = backend; _tableIndex = tableIndex; _metastreamMetadata = metastreamMetadata; _hashCollisionReadLimit = hashCollisionReadLimit; }
public IndexReader(IIndexBackend backend, IHasher hasher, ITableIndex tableIndex, StreamMetadata metastreamMetadata) { Ensure.NotNull(backend, "backend"); Ensure.NotNull(hasher, "hasher"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(metastreamMetadata, "metastreamMetadata"); _backend = backend; _hasher = hasher; _tableIndex = tableIndex; _metastreamMetadata = metastreamMetadata; }
public IndexReader(IIndexCache cache, IHasher hasher, ITableIndex tableIndex, StreamMetadata metastreamMetadata) { Ensure.NotNull(cache, "backend"); Ensure.NotNull(hasher, "hasher"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(metastreamMetadata, "metastreamMetadata"); _cache = cache; _hasher = hasher; _tableIndex = tableIndex; _metastreamMetadata = metastreamMetadata; }
public TFChunkScavenger(TFChunkDb db, ITableIndex tableIndex, IHasher hasher, IReadIndex readIndex, long? maxChunkDataSize = null) { Ensure.NotNull(db, "db"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(hasher, "hasher"); Ensure.NotNull(readIndex, "readIndex"); _db = db; _tableIndex = tableIndex; _hasher = hasher; _readIndex = readIndex; _maxChunkDataSize = maxChunkDataSize ?? db.Config.ChunkSize; }
public IndexCommitterService(IIndexCommitter indexCommitter, IPublisher publisher, ICheckpoint replicationCheckpoint, ICheckpoint writerCheckpoint, int commitCount, ITableIndex tableIndex) { Ensure.NotNull(indexCommitter, "indexCommitter"); Ensure.NotNull(publisher, "publisher"); Ensure.NotNull(replicationCheckpoint, "replicationCheckpoint"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); Ensure.Positive(commitCount, "commitCount"); _indexCommitter = indexCommitter; _publisher = publisher; _replicationCheckpoint = replicationCheckpoint; _writerCheckpoint = writerCheckpoint; _commitCount = commitCount; _tableIndex = tableIndex; }
public StorageScavenger(TFChunkDb db, ITableIndex tableIndex, IReadIndex readIndex, ITFChunkScavengerLogManager logManager, bool alwaysKeepScavenged, bool mergeChunks, bool unsafeIgnoreHardDeletes) { Ensure.NotNull(db, "db"); Ensure.NotNull(logManager, "logManager"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(readIndex, "readIndex"); _db = db; _tableIndex = tableIndex; _readIndex = readIndex; _alwaysKeepScavenged = alwaysKeepScavenged; _mergeChunks = mergeChunks; _unsafeIgnoreHardDeletes = unsafeIgnoreHardDeletes; _logManager = logManager; }
public virtual void TestFixtureSetUp() { _publisher = new InMemoryBus("publisher"); _tfReader = new FakeInMemoryTfReader(RecordOffset); _tableIndex = new FakeInMemoryTableIndex(); _readerPool = new ObjectPool <ITransactionFileReader>( "ReadIndex readers pool", 5, 100, () => _tfReader); _indexBackend = new IndexBackend(_readerPool, 100000, 100000); _indexReader = new IndexReader(_indexBackend, _tableIndex, new StreamMetadata(maxCount: 100000), 100, false); _indexWriter = new IndexWriter(_indexBackend, _indexReader); _indexCommitter = new IndexCommitter(_publisher, _indexBackend, _indexReader, _tableIndex, false); WriteEvents(); }
public StorageScavenger(TFChunkDb db, ITableIndex tableIndex, IHasher hasher, IReadIndex readIndex, bool alwaysKeepScavenged, bool mergeChunks) { Ensure.NotNull(db, "db"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(hasher, "hasher"); Ensure.NotNull(readIndex, "readIndex"); _db = db; _tableIndex = tableIndex; _hasher = hasher; _readIndex = readIndex; _alwaysKeepScavenged = alwaysKeepScavenged; _mergeChunks = mergeChunks; }
public IndexCommitter( IPublisher bus, IIndexBackend backend, IIndexReader indexReader, ITableIndex tableIndex, ICheckpoint indexChk, bool additionalCommitChecks) { _bus = bus; _backend = backend; _indexReader = indexReader; _tableIndex = tableIndex; _indexChk = indexChk; _additionalCommitChecks = additionalCommitChecks; }
public static ITableIndex CreateIndex(string FieldName, Type keytype, string indexfile, bool isunique, int keylen) { // can create an generic constructor.... ITableIndex index = null; if (keytype == typeof(string)) { index = new TableIndexBase <string>(FieldName, indexfile, isunique, keylen); } else if (keytype == typeof(Int32)) { index = new TableIndexBase <Int32>(FieldName, indexfile, isunique, keylen); } else if (keytype == typeof(long)) { index = new TableIndexBase <long>(FieldName, indexfile, isunique, keylen); } else if (keytype == typeof(Int16)) { index = new TableIndexBase <Int16>(FieldName, indexfile, isunique, keylen); } else if (keytype == typeof(byte)) { index = new TableIndexBase <byte>(FieldName, indexfile, isunique, keylen); } else if (keytype == typeof(Guid)) { index = new TableIndexBase <Guid>(FieldName, indexfile, isunique, keylen); } else if (keytype == typeof(float)) { index = new TableIndexBase <float>(FieldName, indexfile, isunique, keylen); } else if (keytype == typeof(double)) { index = new TableIndexBase <double>(FieldName, indexfile, isunique, keylen); } // TODO: add more here... else { throw new Exception(keytype.FullName + " index key type not supported"); } return(index); }
public override IEnumerable <IAnnotation> For(ITableIndex index) { // Model validation ensures that these facets are the same on all mapped indexes var modelIndex = index.MappedIndexes.First(); var table = index.Table; var isClustered = modelIndex.IsClustered(StoreObjectIdentifier.Table(table.Name, table.Schema)); if (isClustered.HasValue) { yield return(new Annotation( SqlServerAnnotationNames.Clustered, isClustered.Value)); } var includeProperties = modelIndex.GetIncludeProperties(); if (includeProperties != null) { var includeColumns = (IReadOnlyList <string>)includeProperties .Select( p => modelIndex.DeclaringEntityType.FindProperty(p) .GetColumnName(StoreObjectIdentifier.Table(table.Name, table.Schema))) .ToArray(); yield return(new Annotation( SqlServerAnnotationNames.Include, includeColumns)); } var isOnline = modelIndex.IsCreatedOnline(); if (isOnline.HasValue) { yield return(new Annotation( SqlServerAnnotationNames.CreatedOnline, isOnline.Value)); } var fillFactor = modelIndex.GetFillFactor(); if (fillFactor.HasValue) { yield return(new Annotation( SqlServerAnnotationNames.FillFactor, fillFactor.Value)); } }
public virtual void TestFixtureSetUp() { IndexCommitter = new FakeIndexCommitter(); ReplicationCheckpoint = new InMemoryCheckpoint(); WriterCheckpoint = new InMemoryCheckpoint(0); TableIndex = new FakeTableIndex(); TfChunkScavengerLogManager = new FakeTfChunkLogManager(); Service = new IndexCommitterService(IndexCommitter, Publisher, WriterCheckpoint, ReplicationCheckpoint, CommitCount, TableIndex, new QueueStatsManager()); Service.Init(0); Publisher.Subscribe(new AdHocHandler <StorageMessage.CommitIndexed>(m => CommitReplicatedMgs.Enqueue(m))); Publisher.Subscribe(new AdHocHandler <ReplicationTrackingMessage.IndexedTo>(m => IndexWrittenMgs.Enqueue(m))); Publisher.Subscribe <ReplicationTrackingMessage.ReplicatedTo>(Service); Given(); When(); }
/// <summary> /// Adds a new index. /// </summary> public ITableIndex AddIndex(string columnName) { PropertyDescriptor colProp = itemProps[columnName]; if (colProp == null) { throw new ArgumentException("Column property not found."); } Type indexType = typeof(TableIndex <,>); Type constructedType = indexType.MakeGenericType(colProp.PropertyType, typeof(T)); ITableIndex index = (ITableIndex)Activator.CreateInstance(constructedType, columnName); Indexes.Add(columnName, index); return(index); }
public override IEnumerable <IAnnotation> For(ITableIndex index, bool designTime) { if (!designTime) { yield break; } // Model validation ensures that these facets are the same on all mapped indexes var modelIndex = index.MappedIndexes.First(); var prefixLength = modelIndex.PrefixLength(); if (prefixLength != null && prefixLength.Length > 0) { yield return(new Annotation( MySqlAnnotationNames.IndexPrefixLength, prefixLength)); } var isFullText = modelIndex.IsFullText(); if (isFullText.HasValue) { yield return(new Annotation( MySqlAnnotationNames.FullTextIndex, isFullText.Value)); } var fullTextParser = modelIndex.FullTextParser(); if (!string.IsNullOrEmpty(fullTextParser)) { yield return(new Annotation( MySqlAnnotationNames.FullTextParser, fullTextParser)); } var isSpatial = modelIndex.IsSpatial(); if (isSpatial.HasValue) { yield return(new Annotation( MySqlAnnotationNames.SpatialIndex, isSpatial.Value)); } }
public IndexCommitterService(IIndexCommitter indexCommitter, IPublisher publisher, ICheckpoint replicationCheckpoint, ICheckpoint writerCheckpoint, int commitCount, ITableIndex tableIndex, QueueStatsManager queueStatsManager) { Ensure.NotNull(indexCommitter, "indexCommitter"); Ensure.NotNull(publisher, "publisher"); Ensure.NotNull(replicationCheckpoint, "replicationCheckpoint"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); Ensure.Positive(commitCount, "commitCount"); _indexCommitter = indexCommitter; _publisher = publisher; _replicationCheckpoint = replicationCheckpoint; _writerCheckpoint = writerCheckpoint; _commitCount = commitCount; _tableIndex = tableIndex; _queueStats = queueStatsManager.CreateQueueStatsCollector("Index Committer"); }
public TFChunkScavenger(TFChunkDb db, IODispatcher ioDispatcher, ITableIndex tableIndex, IReadIndex readIndex, Guid scavengeId, string nodeEndpoint, long?maxChunkDataSize = null, bool unsafeIgnoreHardDeletes = false) { Ensure.NotNull(db, "db"); Ensure.NotNull(ioDispatcher, "ioDispatcher"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(nodeEndpoint, "nodeEndpoint"); Ensure.NotNull(readIndex, "readIndex"); _db = db; _ioDispatcher = ioDispatcher; _tableIndex = tableIndex; _scavengeId = scavengeId; _nodeEndpoint = nodeEndpoint; _readIndex = readIndex; _maxChunkDataSize = maxChunkDataSize ?? db.Config.ChunkSize; _unsafeIgnoreHardDeletes = unsafeIgnoreHardDeletes; }
public TFChunkScavenger(TFChunkDb db, IODispatcher ioDispatcher, ITableIndex tableIndex, IReadIndex readIndex, Guid scavengeId, string nodeEndpoint, long? maxChunkDataSize = null, bool unsafeIgnoreHardDeletes=false) { Ensure.NotNull(db, "db"); Ensure.NotNull(ioDispatcher, "ioDispatcher"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(nodeEndpoint, "nodeEndpoint"); Ensure.NotNull(readIndex, "readIndex"); _db = db; _ioDispatcher = ioDispatcher; _tableIndex = tableIndex; _scavengeId = scavengeId; _nodeEndpoint = nodeEndpoint; _readIndex = readIndex; _maxChunkDataSize = maxChunkDataSize ?? db.Config.ChunkSize; _unsafeIgnoreHardDeletes = unsafeIgnoreHardDeletes; }
/// <summary> /// Creates a new <see cref="CreateIndexOperation" /> from the specified index. /// </summary> /// <param name="index"> The index. </param> /// <returns> The operation. </returns> public static CreateIndexOperation CreateFrom(ITableIndex index) { Check.NotNull(index, nameof(index)); var operation = new CreateIndexOperation { IsUnique = index.IsUnique, Name = index.Name, Schema = index.Table.Schema, Table = index.Table.Name, Columns = index.Columns.Select(p => p.Name).ToArray(), Filter = index.Filter }; operation.AddAnnotations(index.GetAnnotations()); return(operation); }
public TFChunkScavenger(TFChunkDb db, IODispatcher ioDispatcher, ITableIndex tableIndex, IHasher hasher, IReadIndex readIndex, Guid scavengeId, string nodeEndpoint, long?maxChunkDataSize = null) { Ensure.NotNull(db, "db"); Ensure.NotNull(ioDispatcher, "ioDispatcher"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(hasher, "hasher"); Ensure.NotNull(nodeEndpoint, "nodeEndpoint"); Ensure.NotNull(readIndex, "readIndex"); _db = db; _ioDispatcher = ioDispatcher; _tableIndex = tableIndex; _hasher = hasher; _scavengeId = scavengeId; _nodeEndpoint = nodeEndpoint; _readIndex = readIndex; _maxChunkDataSize = maxChunkDataSize ?? db.Config.ChunkSize; }
public ReadIndex(IPublisher bus, ObjectPool <ITransactionFileReader> readerPool, ITableIndex <TStreamId> tableIndex, IStreamIdLookup <TStreamId> streamIds, IStreamNamesProvider <TStreamId> streamNamesProvider, TStreamId emptyStreamName, IValidator <TStreamId> streamIdValidator, ISizer <TStreamId> sizer, int streamInfoCacheCapacity, bool additionalCommitChecks, long metastreamMaxCount, int hashCollisionReadLimit, bool skipIndexScanOnReads, IReadOnlyCheckpoint replicationCheckpoint, ICheckpoint indexCheckpoint) { Ensure.NotNull(bus, "bus"); Ensure.NotNull(readerPool, "readerPool"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(streamIds, nameof(streamIds)); Ensure.NotNull(streamNamesProvider, nameof(streamNamesProvider)); Ensure.NotNull(streamIdValidator, nameof(streamIdValidator)); Ensure.NotNull(sizer, nameof(sizer)); Ensure.Nonnegative(streamInfoCacheCapacity, "streamInfoCacheCapacity"); Ensure.Positive(metastreamMaxCount, "metastreamMaxCount"); Ensure.NotNull(replicationCheckpoint, "replicationCheckpoint"); Ensure.NotNull(indexCheckpoint, "indexCheckpoint"); var metastreamMetadata = new StreamMetadata(maxCount: metastreamMaxCount); var indexBackend = new IndexBackend <TStreamId>(readerPool, streamInfoCacheCapacity, streamInfoCacheCapacity); _indexReader = new IndexReader <TStreamId>(indexBackend, tableIndex, streamNamesProvider, streamIdValidator, metastreamMetadata, hashCollisionReadLimit, skipIndexScanOnReads); _streamIds = streamIds; _streamNames = streamNamesProvider.StreamNames; var systemStreams = streamNamesProvider.SystemStreams; _indexWriter = new IndexWriter <TStreamId>(indexBackend, _indexReader, _streamIds, _streamNames, systemStreams, emptyStreamName, sizer); _indexCommitter = new IndexCommitter <TStreamId>(bus, indexBackend, _indexReader, tableIndex, _streamNames, systemStreams, indexCheckpoint, additionalCommitChecks); _allReader = new AllReader <TStreamId>(indexBackend, _indexCommitter, _streamNames); }
public StorageScavenger(TFChunkDb db, IODispatcher ioDispatcher, ITableIndex tableIndex, IHasher hasher, IReadIndex readIndex, bool alwaysKeepScavenged, string nodeEndpoint, bool mergeChunks, int scavengeHistoryMaxAge) { Ensure.NotNull(db, "db"); Ensure.NotNull(ioDispatcher, "ioDispatcher"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(hasher, "hasher"); Ensure.NotNull(readIndex, "readIndex"); Ensure.NotNull(nodeEndpoint, "nodeEndpoint"); _db = db; _ioDispatcher = ioDispatcher; _tableIndex = tableIndex; _hasher = hasher; _readIndex = readIndex; _alwaysKeepScavenged = alwaysKeepScavenged; _mergeChunks = mergeChunks; _nodeEndpoint = nodeEndpoint; _scavengeHistoryMaxAge = scavengeHistoryMaxAge; }
/// <summary> /// Add rights for the specified role. /// </summary> protected void AddRoleRight(ITableIndex objRight_roleIndex, ITableIndex obj_parentObjIndex, RightByObj rightByObj, int roleID) { // explicitly defined rights have higher priority foreach (ObjRight objRight in objRight_roleIndex.SelectItems(roleID)) { AddObjRight(rightByObj, objRight.ObjNum, new Right(objRight)); } // add rights on child objects foreach (ObjRight objRight in objRight_roleIndex.SelectItems(roleID)) { Right right = new Right(objRight); foreach (Obj childObj in EnumerateChildObjects(obj_parentObjIndex, objRight.ObjNum)) { AddObjRight(rightByObj, childObj.ObjNum, right); } } }
/// <summary> /// Gets an index by the column name, populating it if necessary. /// </summary> public bool TryGetIndex(string columnName, out ITableIndex index) { if (Indexes.TryGetValue(columnName, out index)) { lock (index) { if (!index.IsReady) { index.AddRangeToIndex(Items); } } return(true); } else { index = null; return(false); } }
/// <summary> /// <para> /// Creates a human-readable representation of the given metadata. /// </para> /// <para> /// Warning: Do not rely on the format of the returned string. /// It is designed for debugging only and may change arbitrarily between releases. /// </para> /// </summary> /// <param name="index"> The metadata item. </param> /// <param name="options"> Options for generating the string. </param> /// <param name="indent"> The number of indent spaces to use before each new line. </param> /// <returns> A human-readable representation. </returns> public static string ToDebugString( [NotNull] this ITableIndex index, MetadataDebugStringOptions options, int indent = 0) { var builder = new StringBuilder(); var indentString = new string(' ', indent); builder.Append(indentString); var singleLine = (options & MetadataDebugStringOptions.SingleLine) != 0; if (singleLine) { builder.Append("Index: "); } builder .Append(index.Name) .Append(" ") .Append(ColumnBase.Format(index.Columns)); if (index.IsUnique) { builder .Append(" Unique"); } if (!string.IsNullOrWhiteSpace(index.Filter)) { builder .Append(" Filtered"); } if (!singleLine && (options & MetadataDebugStringOptions.IncludeAnnotations) != 0) { builder.Append(index.AnnotationsToDebugString(indent + 2)); } return(builder.ToString()); }
/// <summary> /// This API supports the Entity Framework Core infrastructure and is not intended to be used /// directly from your code. This API may change or be removed in future releases. /// </summary> public override IEnumerable <IAnnotation> For(ITableIndex index) { var modelIndex = index.MappedIndexes.First(); var isFullText = modelIndex.IsFullText(); if (isFullText.HasValue) { yield return(new Annotation( MySQLAnnotationNames.FullTextIndex, isFullText.Value)); } var isSpatial = modelIndex.IsSpatial(); if (isSpatial.HasValue) { yield return(new Annotation( MySQLAnnotationNames.SpatialIndex, isSpatial.Value)); } }
public ReadIndex(IPublisher bus, ObjectPool <ITransactionFileReader> readerPool, ITableIndex tableIndex, int streamInfoCacheCapacity, bool additionalCommitChecks, int metastreamMaxCount, int hashCollisionReadLimit) { Ensure.NotNull(bus, "bus"); Ensure.NotNull(readerPool, "readerPool"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.Nonnegative(streamInfoCacheCapacity, "streamInfoCacheCapacity"); Ensure.Positive(metastreamMaxCount, "metastreamMaxCount"); var metastreamMetadata = new StreamMetadata(maxCount: metastreamMaxCount); _indexBackend = new IndexBackend(readerPool, streamInfoCacheCapacity, streamInfoCacheCapacity); _indexReader = new IndexReader(_indexBackend, tableIndex, metastreamMetadata, hashCollisionReadLimit); _indexWriter = new IndexWriter(_indexBackend, _indexReader); _indexCommitter = new IndexCommitter(bus, _indexBackend, _indexReader, tableIndex, additionalCommitChecks); _allReader = new AllReader(_indexBackend, _indexCommitter); }
/// <summary> /// This is an internal API that supports the Entity Framework Core infrastructure and not subject to /// the same compatibility standards as public APIs. It may be changed or removed without notice in /// any release. You should only use it directly in your code with extreme caution and knowing that /// doing so can result in application failures when updating to a new Entity Framework Core release. /// </summary> public override IEnumerable <IAnnotation> For(ITableIndex index) { // Model validation ensures that these facets are the same on all mapped indexes var modelIndex = index.MappedIndexes.First(); var table = index.Table; var includeProperties = modelIndex.GetIncludeProperties(); if (includeProperties != null) { var includeColumns = (IReadOnlyList <string>)includeProperties .Select( p => modelIndex.DeclaringEntityType.FindProperty(p) .GetColumnName(StoreObjectIdentifier.Table(table.Name, table.Schema))) .ToArray(); yield return(new Annotation( JetAnnotationNames.Include, includeColumns)); } }
public ReadIndex(IPublisher bus, ObjectPool<ITransactionFileReader> readerPool, ITableIndex tableIndex, int streamInfoCacheCapacity, bool additionalCommitChecks, int metastreamMaxCount, int hashCollisionReadLimit) { Ensure.NotNull(bus, "bus"); Ensure.NotNull(readerPool, "readerPool"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.Nonnegative(streamInfoCacheCapacity, "streamInfoCacheCapacity"); Ensure.Positive(metastreamMaxCount, "metastreamMaxCount"); var metastreamMetadata = new StreamMetadata(maxCount: metastreamMaxCount); _indexBackend = new IndexBackend(readerPool, streamInfoCacheCapacity, streamInfoCacheCapacity); _indexReader = new IndexReader(_indexBackend, tableIndex, metastreamMetadata, hashCollisionReadLimit); _indexWriter = new IndexWriter(_indexBackend, _indexReader); _indexCommitter = new IndexCommitter(bus, _indexBackend, _indexReader, tableIndex, additionalCommitChecks); _allReader = new AllReader(_indexBackend, _indexCommitter); }
/// <summary> /// This is an internal API that supports the Entity Framework Core infrastructure and not subject to /// the same compatibility standards as public APIs. It may be changed or removed without notice in /// any release. You should only use it directly in your code with extreme caution and knowing that /// doing so can result in application failures when updating to a new Entity Framework Core release. /// </summary> public override IEnumerable <IAnnotation> For(ITableIndex index, bool designTime) { if (!designTime) { yield break; } // Model validation ensures that these facets are the same on all mapped indexes var modelIndex = index.MappedIndexes.First(); var table = StoreObjectIdentifier.Table(index.Table.Name, index.Table.Schema); if (modelIndex.IsClustered(table) is bool isClustered) { yield return(new Annotation(SqlServerAnnotationNames.Clustered, isClustered)); } if (modelIndex.GetIncludeProperties(table) is IReadOnlyList <string> includeProperties) { var includeColumns = includeProperties .Select( p => modelIndex.DeclaringEntityType.FindProperty(p) ! .GetColumnName(StoreObjectIdentifier.Table(table.Name, table.Schema))) .ToArray(); yield return(new Annotation( SqlServerAnnotationNames.Include, includeColumns)); } if (modelIndex.IsCreatedOnline(table) is bool isOnline) { yield return(new Annotation(SqlServerAnnotationNames.CreatedOnline, isOnline)); } if (modelIndex.GetFillFactor(table) is int fillFactor) { yield return(new Annotation(SqlServerAnnotationNames.FillFactor, fillFactor)); } }
public TFChunkScavenger(TFChunkDb db, ITFChunkScavengerLog scavengerLog, ITableIndex tableIndex, IReadIndex readIndex, long?maxChunkDataSize = null, bool unsafeIgnoreHardDeletes = false, int threads = 1) { Ensure.NotNull(db, "db"); Ensure.NotNull(scavengerLog, "scavengerLog"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(readIndex, "readIndex"); Ensure.Positive(threads, "threads"); if (threads > MaxThreadCount) { Log.Warn("{numThreads} scavenging threads not allowed. Max threads allowed for scavenging is {maxThreadCount}. Capping.", threads, MaxThreadCount); threads = MaxThreadCount; } _db = db; _scavengerLog = scavengerLog; _tableIndex = tableIndex; _readIndex = readIndex; _maxChunkDataSize = maxChunkDataSize ?? db.Config.ChunkSize; _unsafeIgnoreHardDeletes = unsafeIgnoreHardDeletes; _threads = threads; }
public ReadIndex(IPublisher bus, ObjectPool<ITransactionFileReader> readerPool, ITableIndex tableIndex, IHasher hasher, int streamInfoCacheCapacity, bool additionalCommitChecks, int metastreamMaxCount) { Ensure.NotNull(bus, "bus"); Ensure.NotNull(readerPool, "readerPool"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(hasher, "hasher"); Ensure.Nonnegative(streamInfoCacheCapacity, "streamInfoCacheCapacity"); Ensure.Positive(metastreamMaxCount, "metastreamMaxCount"); var metastreamMetadata = new StreamMetadata(maxCount: metastreamMaxCount); _indexCache = new IndexCache(readerPool, streamInfoCacheCapacity, streamInfoCacheCapacity); _indexReader = new IndexReader(_indexCache, hasher, tableIndex, metastreamMetadata); var writer = new IndexWriter(bus, tableIndex, hasher, _indexCache, _indexReader, additionalCommitChecks); _indexWriter = writer; _indexCommitter = writer; _allReader = new AllReader(_indexCache); }
public ReadIndex(IPublisher bus, Func <long, ITransactionFileChaser> chaserFactory, Func <ITransactionFileReader> readerFactory, int readerCount, ITableIndex tableIndex, IHasher hasher) { Ensure.NotNull(bus, "bus"); Ensure.NotNull(readerFactory, "readerFactory"); Ensure.NotNull(chaserFactory, "chaserFactory"); Ensure.Positive(readerCount, "readerCount"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(hasher, "hasher"); _bus = bus; _chaserFactory = chaserFactory; for (int i = 0; i < readerCount; ++i) { _readers.Push(readerFactory()); } _tableIndex = tableIndex; _hasher = hasher; }
/// <summary> /// Enumerates child objects recursively. /// </summary> protected IEnumerable <Obj> EnumerateChildObjects(ITableIndex obj_parentObjIndex, int parentObjNum, HashSet <int> protectionSet = null) { if (protectionSet == null) { protectionSet = new HashSet <int> { parentObjNum } } ; foreach (Obj childObj in obj_parentObjIndex.SelectItems(parentObjNum)) { if (protectionSet.Add(childObj.ObjNum)) { yield return(childObj); foreach (Obj grandchildObj in EnumerateChildObjects(obj_parentObjIndex, childObj.ObjNum)) { yield return(grandchildObj); } } } }
public ReadIndex(IPublisher bus, int readerCount, Func <ITransactionFileSequentialReader> seqReaderFactory, Func <ITransactionFileReader> readerFactory, ITableIndex tableIndex, IHasher hasher) { Ensure.NotNull(bus, "bus"); Ensure.Positive(readerCount, "readerCount"); Ensure.NotNull(seqReaderFactory, "seqReaderFactory"); Ensure.NotNull(readerFactory, "readerFactory"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(hasher, "hasher"); _bus = bus; _tableIndex = tableIndex; _hasher = hasher; for (int i = 0; i < readerCount; ++i) { _seqReaders.Push(seqReaderFactory()); _readers.Push(readerFactory()); } }
public SystemIndexSetDataSource(SystemTransaction transaction, ITableIndex index) { this.transaction = transaction; this.index = index; }
/// <inheritdoc /> public virtual IEnumerable <IAnnotation> ForRename(ITableIndex index) => Enumerable.Empty <IAnnotation>();
/// <summary> /// prase the filter collection and get an execution plan. /// </summary> /// <returns></returns> public static ExecutionPlan GetExecutionPlan(Query query) { ExecutionPlan executionplan = new ExecutionPlan(); ITableIndex startindex = null; if (!string.IsNullOrEmpty(query.OrderByFieldName)) { startindex = query.table.Indexs.Find(o => o.FieldName == query.OrderByFieldName); if (startindex == null) { executionplan.RequireOrderBy = true; } } // find other where fields...... if (startindex == null) { startindex = query.table.Indexs.Find(o => o.IsSystem); } if (!string.IsNullOrEmpty(query.OrderByFieldName) && !executionplan.RequireOrderBy) { Range <byte[]> range = getRange(query.OrderByFieldName, query.items); if (range != null) { executionplan.startCollection = startindex.GetCollection(range.lower, range.upper, range.lowerOpen, range.upperOpen, query.Ascending); } else { executionplan.startCollection = startindex.AllItems(query.Ascending); } } else { executionplan.startCollection = startindex.AllItems(query.Ascending); } // check all index fields that has been used in the filter. foreach (var item in query.table.Indexs) { if (item.FieldName != startindex.FieldName) { Range <byte[]> indexrange = getRange(item.FieldName, query.items); if (indexrange != null) { executionplan.indexRanges.Add(item.FieldName, indexrange); } } } // now the left columns.. foreach (var item in query.items) { var column = query.table.ObjectConverter.Fields.Find(o => o.FieldName == item.FieldOrProperty); if (column != null) { ColumnScan colplan = new ColumnScan(); colplan.ColumnName = column.FieldName; colplan.relativeStartPosition = column.RelativePosition; colplan.length = column.Length; colplan.Evaluator = ColumnEvaluator.GetEvaluator(column.ClrType, item.Compare, item.Value, column.Length); executionplan.scanColumns.Add(colplan); } else { throw new Exception("filter field must be column with fixed len"); } } // the left column query. foreach (var item in query.InItems) { var column = query.table.ObjectConverter.Fields.Find(o => o.FieldName == item.Key); if (column != null) { ColumnScan colplan = new ColumnScan(); colplan.ColumnName = column.FieldName; colplan.relativeStartPosition = column.RelativePosition; colplan.length = column.Length; colplan.Evaluator = ColumnInEvaluator.GetInEvaluator(column.ClrType, item.Value, column.Length); executionplan.scanColumns.Add(colplan); } else { throw new Exception("filter field must be a column with fixed length"); } } /// for the methods calls. foreach (var item in query.calls) { MemberExpression memberaccess = null; foreach (var xitem in item.Arguments) { if (xitem.NodeType == ExpressionType.MemberAccess) { memberaccess = xitem as MemberExpression; } } if (memberaccess == null) { throw new Exception("Method call require use one of the Fields or Property as parameters"); } string fieldname = memberaccess.Member.Name; var column = query.table.ObjectConverter.Fields.Find(o => o.FieldName == fieldname); if (column != null) { ColumnScan colplan = new ColumnScan(); colplan.ColumnName = column.FieldName; colplan.relativeStartPosition = column.RelativePosition; colplan.length = column.Length; colplan.Evaluator = ColumnMethodCallEvaluator.GetMethodEvaluator(column.ClrType, column.Length, item); executionplan.scanColumns.Add(colplan); } else { throw new Exception("methed call parameter must be a column, add the field to colomn creating creating the store, otherwise use the fullscan option"); } } return(executionplan); }
private bool KeepOnlyFirstEventOfDuplicate(ITableIndex tableIndex, PrepareLogRecord prepare, long eventNumber){ var result = _readIndex.ReadEvent(prepare.EventStreamId, eventNumber); if(result.Result == ReadEventResult.Success && result.Record.LogPosition != prepare.LogPosition) return false; return true; }
private bool KeepOnlyFirstEventOfDuplicate(ITableIndex tableIndex, PrepareLogRecord prepare, int eventNumber){ var result = _readIndex.ReadEvent(prepare.EventStreamId, eventNumber); if(result.Result == ReadEventResult.Success && result.Record.LogPosition != prepare.LogPosition) return false; return true; }
public IndexWriter(IPublisher bus, ITableIndex tableIndex, IHasher hasher, IIndexCache indexCache, IIndexReader indexReader, bool additionalCommitChecks) { Ensure.NotNull(indexCache, "indexBackend"); Ensure.NotNull(indexReader, "indexReader"); Ensure.NotNull(bus, "bus"); Ensure.NotNull(tableIndex, "tableIndex"); Ensure.NotNull(hasher, "hasher"); _bus = bus; _tableIndex = tableIndex; _hasher = hasher; _indexCache = indexCache; _indexReader = indexReader; _additionalCommitChecks = additionalCommitChecks; }