public TableIndex(string directory, Func <IMemTable> memTableFactory, Func <TFReaderLease> tfReaderFactory, int maxSizeForMemory = 1000000, int maxTablesPerLevel = 4, bool additionalReclaim = false, bool inMem = false) { Ensure.NotNullOrEmpty(directory, "directory"); Ensure.NotNull(memTableFactory, "memTableFactory"); Ensure.NotNull(tfReaderFactory, "tfReaderFactory"); if (maxTablesPerLevel <= 1) { throw new ArgumentOutOfRangeException("maxTablesPerLevel"); } _directory = directory; _memTableFactory = memTableFactory; _tfReaderFactory = tfReaderFactory; _fileNameProvider = new GuidFilenameProvider(directory); _maxSizeForMemory = maxSizeForMemory; _maxTablesPerLevel = maxTablesPerLevel; _additionalReclaim = additionalReclaim; _inMem = inMem; _awaitingMemTables = new List <TableItem> { new TableItem(_memTableFactory(), -1, -1) }; }
public TableIndex(string directory, Func<IMemTable> memTableFactory, Func<TFReaderLease> tfReaderFactory, int maxSizeForMemory = 1000000, int maxTablesPerLevel = 4, bool additionalReclaim = false, bool inMem = false, int indexCacheDepth = 16) { Ensure.NotNullOrEmpty(directory, "directory"); Ensure.NotNull(memTableFactory, "memTableFactory"); Ensure.NotNull(tfReaderFactory, "tfReaderFactory"); if (maxTablesPerLevel <= 1) throw new ArgumentOutOfRangeException("maxTablesPerLevel"); if(indexCacheDepth > 28 || indexCacheDepth < 8) throw new ArgumentOutOfRangeException("indexCacheDepth"); _directory = directory; _memTableFactory = memTableFactory; _tfReaderFactory = tfReaderFactory; _fileNameProvider = new GuidFilenameProvider(directory); _maxSizeForMemory = maxSizeForMemory; _maxTablesPerLevel = maxTablesPerLevel; _additionalReclaim = additionalReclaim; _inMem = inMem; _indexCacheDepth = indexCacheDepth; _awaitingMemTables = new List<TableItem> { new TableItem(_memTableFactory(), -1, -1) }; }
public MergeResult AddPTable(PTable tableToAdd, long prepareCheckpoint, long commitCheckpoint, Func <IndexEntry, bool> recordExistsAt, IIndexFilenameProvider filenameProvider) { Ensure.Nonnegative(prepareCheckpoint, "prepareCheckpoint"); Ensure.Nonnegative(commitCheckpoint, "commitCheckpoint"); var tables = CopyFrom(_map); CreateIfNeeded(0, tables); tables[0].Add(tableToAdd); var toDelete = new List <PTable>(); for (int level = 0; level < tables.Count; level++) { if (tables[level].Count >= _maxTablesPerLevel) { var filename = filenameProvider.GetFilenameNewTable(); PTable table = PTable.MergeTo(tables[level], filename, recordExistsAt); CreateIfNeeded(level + 1, tables); tables[level + 1].Add(table); toDelete.AddRange(tables[level]); tables[level].Clear(); } } var indexMap = new IndexMap(Version, tables, prepareCheckpoint, commitCheckpoint, _maxTablesPerLevel); return(new MergeResult(indexMap, toDelete)); }
public TableIndex(string directory, IHasher lowHasher, IHasher highHasher, Func <IMemTable> memTableFactory, Func <TFReaderLease> tfReaderFactory, byte ptableVersion, int maxAutoMergeIndexLevel, int pTableMaxReaderCount, int maxSizeForMemory = 1000000, int maxTablesPerLevel = 4, bool additionalReclaim = false, bool inMem = false, bool skipIndexVerify = false, int indexCacheDepth = 16, int initializationThreads = 1) { Ensure.NotNullOrEmpty(directory, "directory"); Ensure.NotNull(memTableFactory, "memTableFactory"); Ensure.NotNull(lowHasher, "lowHasher"); Ensure.NotNull(highHasher, "highHasher"); Ensure.NotNull(tfReaderFactory, "tfReaderFactory"); Ensure.Positive(initializationThreads, "initializationThreads"); Ensure.Positive(pTableMaxReaderCount, "pTableMaxReaderCount"); if (maxTablesPerLevel <= 1) { throw new ArgumentOutOfRangeException("maxTablesPerLevel"); } if (indexCacheDepth > 28 || indexCacheDepth < 8) { throw new ArgumentOutOfRangeException("indexCacheDepth"); } _directory = directory; _memTableFactory = memTableFactory; _tfReaderFactory = tfReaderFactory; _fileNameProvider = new GuidFilenameProvider(directory); _maxSizeForMemory = maxSizeForMemory; _maxTablesPerLevel = maxTablesPerLevel; _additionalReclaim = additionalReclaim; _inMem = inMem; _skipIndexVerify = ShouldForceIndexVerify() ? false : skipIndexVerify; _indexCacheDepth = indexCacheDepth; _initializationThreads = initializationThreads; _ptableVersion = ptableVersion; _awaitingMemTables = new List <TableItem> { new TableItem(_memTableFactory(), -1, -1, 0) }; _lowHasher = lowHasher; _highHasher = highHasher; _maxAutoMergeIndexLevel = maxAutoMergeIndexLevel; _pTableMaxReaderCount = pTableMaxReaderCount; }
public MergeResult AddPTableForManualMerge(long prepareCheckpoint, long commitCheckpoint, Func <string, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <string, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int indexCacheDepth = 16, bool skipIndexVerify = false) { Ensure.Nonnegative(prepareCheckpoint, "prepareCheckpoint"); Ensure.Nonnegative(commitCheckpoint, "commitCheckpoint"); var tables = CopyFrom(_map); if (tables.Count < _maxTableLevelsForAutomaticMerge) { return(new MergeResult(this, new List <PTable>())); } var toDelete = new List <PTable>(); var tablesToMerge = tables.Skip(_maxTableLevelsForAutomaticMerge).SelectMany(a => a).ToList(); if (tablesToMerge.Count == 1) { return(new MergeResult(this, new List <PTable>())); } var filename = filenameProvider.GetFilenameNewTable(); PTable mergedTable = PTable.MergeTo(tablesToMerge, filename, upgradeHash, existsAt, recordExistsAt, version, ESConsts.PTableInitialReaderCount, _pTableMaxReaderCount, indexCacheDepth, skipIndexVerify); for (int i = tables.Count - 1; i > _maxTableLevelsForAutomaticMerge; i--) { tables.RemoveAt(i); } tables[_maxTableLevelsForAutomaticMerge].Clear(); AddTableToTables(tables, _maxTableLevelsForAutomaticMerge + 1, mergedTable); toDelete.AddRange(tablesToMerge); var indexMap = new IndexMap(Version, tables, prepareCheckpoint, commitCheckpoint, _maxTablesPerLevel, _maxTableLevelsForAutomaticMerge, _pTableMaxReaderCount); return(new MergeResult(indexMap, toDelete)); }
public MergeResult TryManualMerge <TStreamId>( Func <TStreamId, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <TStreamId, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int indexCacheDepth = 16, bool skipIndexVerify = false, bool useBloomFilter = true, int lruCacheSize = 1_000_000) { var tables = CopyFrom(_map); if (tables.Count <= _maxTableLevelsForAutomaticMerge) { return(new MergeResult(this, new List <PTable>(), false, false)); } var toDelete = new List <PTable>(); var tablesToMerge = tables.Skip(_maxTableLevelsForAutomaticMerge).SelectMany(a => a).ToList(); if (tablesToMerge.Count == 1) { return(new MergeResult(this, new List <PTable>(), false, false)); } var filename = filenameProvider.GetFilenameNewTable(); PTable mergedTable = PTable.MergeTo(tablesToMerge, filename, upgradeHash, existsAt, recordExistsAt, version, ESConsts.PTableInitialReaderCount, _pTableMaxReaderCount, indexCacheDepth, skipIndexVerify, useBloomFilter, lruCacheSize); for (int i = tables.Count - 1; i > _maxTableLevelsForAutomaticMerge; i--) { tables.RemoveAt(i); } tables[_maxTableLevelsForAutomaticMerge].Clear(); AddTableToTables(tables, _maxTableLevelsForAutomaticMerge + 1, mergedTable); toDelete.AddRange(tablesToMerge); var indexMap = new IndexMap(Version, tables, PrepareCheckpoint, CommitCheckpoint, _maxTablesPerLevel, _maxTableLevelsForAutomaticMerge, _pTableMaxReaderCount); return(new MergeResult(indexMap, toDelete, true, false)); }
public MergeResult TryMergeOneLevel <TStreamId>( Func <TStreamId, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <TStreamId, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int indexCacheDepth = 16, bool skipIndexVerify = false, bool useBloomFilter = true, int lruCacheSize = 1_000_000) { var tables = CopyFrom(_map); var hasMergedAny = false; var canMergeAny = false; var toDelete = new List <PTable>(); var maxTableLevelsToMerge = Math.Min(tables.Count, _maxTableLevelsForAutomaticMerge); for (int level = 0; level < maxTableLevelsToMerge; level++) { if (tables[level].Count >= _maxTablesPerLevel) { if (hasMergedAny) { canMergeAny = true; break; } var filename = filenameProvider.GetFilenameNewTable(); PTable mergedTable = PTable.MergeTo(tables[level], filename, upgradeHash, existsAt, recordExistsAt, version, ESConsts.PTableInitialReaderCount, _pTableMaxReaderCount, indexCacheDepth, skipIndexVerify, useBloomFilter, lruCacheSize); hasMergedAny = true; AddTableToTables(tables, level + 1, mergedTable); toDelete.AddRange(tables[level]); tables[level].Clear(); } } var indexMap = new IndexMap(Version, tables, PrepareCheckpoint, CommitCheckpoint, _maxTablesPerLevel, _maxTableLevelsForAutomaticMerge, _pTableMaxReaderCount); return(new MergeResult(indexMap, toDelete, hasMergedAny, canMergeAny)); }
public static MergeResult AddAndMergePTable( this IndexMap indexMap, PTable tableToAdd, int prepareCheckpoint, int commitCheckpoint, Func <string, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <string, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int indexCacheDepth = 16, bool skipIndexVerify = false, bool useBloomFilter = true, int lruCacheSize = 1_000_000) { var addResult = indexMap.AddPTable(tableToAdd, prepareCheckpoint, commitCheckpoint); if (addResult.CanMergeAny) { var toDelete = new List <PTable>(); MergeResult mergeResult; IndexMap curMap = addResult.NewMap; do { mergeResult = curMap.TryMergeOneLevel( upgradeHash, existsAt, recordExistsAt, filenameProvider, version, indexCacheDepth, skipIndexVerify, useBloomFilter, lruCacheSize ); curMap = mergeResult.MergedMap; toDelete.AddRange(mergeResult.ToDelete); } while (mergeResult.CanMergeAny); return(new MergeResult(curMap, toDelete, true, false)); } return(new MergeResult(addResult.NewMap, new List <PTable>(), false, false)); }
public TableIndex(string directory, IHasher lowHasher, IHasher highHasher, Func <IMemTable> memTableFactory, Func <TFReaderLease> tfReaderFactory, byte ptableVersion, int maxSizeForMemory = 1000000, int maxTablesPerLevel = 4, bool additionalReclaim = false, bool inMem = false, int indexCacheDepth = 16) { Ensure.NotNullOrEmpty(directory, "directory"); Ensure.NotNull(memTableFactory, "memTableFactory"); Ensure.NotNull(lowHasher, "lowHasher"); Ensure.NotNull(highHasher, "highHasher"); Ensure.NotNull(tfReaderFactory, "tfReaderFactory"); if (maxTablesPerLevel <= 1) { throw new ArgumentOutOfRangeException("maxTablesPerLevel"); } if (indexCacheDepth > 28 || indexCacheDepth < 8) { throw new ArgumentOutOfRangeException("indexCacheDepth"); } _directory = directory; _memTableFactory = memTableFactory; _tfReaderFactory = tfReaderFactory; _fileNameProvider = new GuidFilenameProvider(directory); _maxSizeForMemory = maxSizeForMemory; _maxTablesPerLevel = maxTablesPerLevel; _additionalReclaim = additionalReclaim; _inMem = inMem; _indexCacheDepth = indexCacheDepth; _ptableVersion = ptableVersion; _awaitingMemTables = new List <TableItem> { new TableItem(_memTableFactory(), -1, -1) }; _lowHasher = lowHasher; _highHasher = highHasher; }
public TableIndex(string directory, Func<IMemTable> memTableFactory, int maxSizeForMemory = 1000000, int maxTablesPerLevel = 4, bool additionalReclaim = false) { Ensure.NotNullOrEmpty(directory, "directory"); Ensure.NotNull(memTableFactory, "memTableFactory"); if (maxTablesPerLevel <= 1) throw new ArgumentOutOfRangeException("maxTablesPerLevel"); _directory = directory; _memTableFactory = memTableFactory; _fileNameProvider = new GuidFilenameProvider(directory); _maxSizeForMemory = maxSizeForMemory; _maxTablesPerLevel = maxTablesPerLevel; _additionalReclaim = additionalReclaim; _awaitingMemTables = new List<TableItem> { new TableItem(_memTableFactory(), -1, -1) }; }
public ScavengeResult Scavenge <TStreamId>(Guid toScavenge, CancellationToken ct, Func <TStreamId, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <TStreamId, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int indexCacheDepth = 16, bool skipIndexVerify = false, bool useBloomFilter = true, int lruCacheSize = 1_000_000) { var scavengedMap = CopyFrom(_map); for (int level = 0; level < scavengedMap.Count; level++) { for (int i = 0; i < scavengedMap[level].Count; i++) { if (scavengedMap[level][i].Id == toScavenge) { long spaceSaved; var filename = filenameProvider.GetFilenameNewTable(); var oldTable = scavengedMap[level][i]; PTable scavenged = PTable.Scavenged(oldTable, filename, upgradeHash, existsAt, recordExistsAt, version, out spaceSaved, ESConsts.PTableInitialReaderCount, _pTableMaxReaderCount, indexCacheDepth, skipIndexVerify, useBloomFilter, lruCacheSize, ct); if (scavenged == null) { return(ScavengeResult.Failed(oldTable, level, i)); } scavengedMap[level][i] = scavenged; var indexMap = new IndexMap(Version, scavengedMap, PrepareCheckpoint, CommitCheckpoint, _maxTablesPerLevel, _maxTableLevelsForAutomaticMerge, _pTableMaxReaderCount); return(ScavengeResult.Success(indexMap, oldTable, scavenged, spaceSaved, level, i)); } } } throw new ArgumentException("Unable to find table in map.", nameof(toScavenge)); }
public MergeResult AddPTableForAutomaticMerge(PTable tableToAdd, long prepareCheckpoint, long commitCheckpoint, Func <string, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <string, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int indexCacheDepth = 16, bool skipIndexVerify = false) { Ensure.Nonnegative(prepareCheckpoint, "prepareCheckpoint"); Ensure.Nonnegative(commitCheckpoint, "commitCheckpoint"); var tables = CopyFrom(_map); AddTableToTables(tables, 0, tableToAdd); var toDelete = new List <PTable>(); var maxTableLevelsToMerge = Math.Min(tables.Count, _maxTableLevelsForAutomaticMerge); for (int level = 0; level < maxTableLevelsToMerge; level++) { if (tables[level].Count >= _maxTablesPerLevel) { var filename = filenameProvider.GetFilenameNewTable(); PTable mergedTable = PTable.MergeTo(tables[level], filename, upgradeHash, existsAt, recordExistsAt, version, ESConsts.PTableInitialReaderCount, _pTableMaxReaderCount, indexCacheDepth, skipIndexVerify); AddTableToTables(tables, level + 1, mergedTable); toDelete.AddRange(tables[level]); tables[level].Clear(); } } var indexMap = new IndexMap(Version, tables, prepareCheckpoint, commitCheckpoint, _maxTablesPerLevel, _maxTableLevelsForAutomaticMerge, _pTableMaxReaderCount); return(new MergeResult(indexMap, toDelete)); }
public MergeResult AddPTable(PTable tableToAdd, long prepareCheckpoint, long commitCheckpoint, Func <string, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <string, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int level, int indexCacheDepth = 16, bool skipIndexVerify = false) { if (level < _maxTableLevelsForAutomaticMerge) { return(AddPTableForAutomaticMerge(tableToAdd, prepareCheckpoint, commitCheckpoint, upgradeHash, existsAt, recordExistsAt, filenameProvider, version, indexCacheDepth, skipIndexVerify)); } //For manual merge, we are never adding any extra entries, just merging existing files, so the index p/c checkpoint won't change return(AddPTableForManualMerge(PrepareCheckpoint, CommitCheckpoint, upgradeHash, existsAt, recordExistsAt, filenameProvider, version, indexCacheDepth, skipIndexVerify)); }
public MergeResult AddPTable(PTable tableToAdd, long prepareCheckpoint, long commitCheckpoint, Func <string, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <string, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int level, int indexCacheDepth = 16, bool skipIndexVerify = false) { bool isManual; //if (_maxTableLevelsForAutomaticMerge == 0) //{ // //when we are not auto merging at all, a manual merge will only be triggered if // //there are entries in the index map. the table it passes is always the first table // //at the maximum automerge level, so we only need to hit the first table and see if it // //matches, otherwise it must be an an add of a memtable. // //although we are not automatically merging, the automerge process is also responsible // //for writing out the memtable that just got persisted so we want to call auto merge still // isManual = _map.Count != 0 && _map[0].FirstOrDefault() == tableToAdd; //} //else //{ isManual = level > _maxTableLevelsForAutomaticMerge; //} if (isManual) { //For manual merge, we are never adding any extra entries, just merging existing files, so the index p/c checkpoint won't change return(AddPTableForManualMerge(PrepareCheckpoint, CommitCheckpoint, upgradeHash, existsAt, recordExistsAt, filenameProvider, version, indexCacheDepth, skipIndexVerify)); } return(AddPTableForAutomaticMerge(tableToAdd, prepareCheckpoint, commitCheckpoint, upgradeHash, existsAt, recordExistsAt, filenameProvider, version, indexCacheDepth, skipIndexVerify)); }
public ScavengeResult Scavenge(Guid toScavenge, CancellationToken ct, Func <string, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <string, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int indexCacheDepth = 16, bool skipIndexVerify = false) { var scavengedMap = CopyFrom(_map); for (int level = 0; level < scavengedMap.Count; level++) { for (int i = 0; i < scavengedMap[level].Count; i++) { if (scavengedMap[level][i].Id == toScavenge) { long spaceSaved; var filename = filenameProvider.GetFilenameNewTable(); var oldTable = scavengedMap[level][i]; PTable scavenged = PTable.Scavenged(oldTable, filename, upgradeHash, existsAt, recordExistsAt, version, out spaceSaved, indexCacheDepth, skipIndexVerify, ct); if (scavenged == null) { return(ScavengeResult.Failed(oldTable, level, i)); } scavengedMap[level][i] = scavenged; var indexMap = new IndexMap(Version, scavengedMap, PrepareCheckpoint, CommitCheckpoint, _maxTablesPerLevel); return(ScavengeResult.Success(indexMap, oldTable, scavenged, spaceSaved, level, i)); } } } throw new ArgumentException("Unable to find table in map.", nameof(toScavenge)); }
public MergeResult AddPTable(PTable tableToAdd, long prepareCheckpoint, long commitCheckpoint, Func<IndexEntry, bool> recordExistsAt, IIndexFilenameProvider filenameProvider) { Ensure.Nonnegative(prepareCheckpoint, "prepareCheckpoint"); Ensure.Nonnegative(commitCheckpoint, "commitCheckpoint"); var tables = CopyFrom(_map); CreateIfNeeded(0, tables); tables[0].Add(tableToAdd); var toDelete = new List<PTable>(); for (int level = 0; level < tables.Count; level++) { if (tables[level].Count >= _maxTablesPerLevel) { var filename = filenameProvider.GetFilenameNewTable(); PTable table = PTable.MergeTo(tables[level], filename, recordExistsAt); CreateIfNeeded(level + 1, tables); tables[level + 1].Add(table); toDelete.AddRange(tables[level]); tables[level].Clear(); } } var indexMap = new IndexMap(Version, tables, prepareCheckpoint, commitCheckpoint, _maxTablesPerLevel); return new MergeResult(indexMap, toDelete); }