public ScavengeResult Scavenge(Guid toScavenge, CancellationToken ct, Func <string, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <string, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int indexCacheDepth = 16, bool skipIndexVerify = false) { var scavengedMap = CopyFrom(_map); for (int level = 0; level < scavengedMap.Count; level++) { for (int i = 0; i < scavengedMap[level].Count; i++) { if (scavengedMap[level][i].Id == toScavenge) { long spaceSaved; var filename = filenameProvider.GetFilenameNewTable(); var oldTable = scavengedMap[level][i]; PTable scavenged = PTable.Scavenged(oldTable, filename, upgradeHash, existsAt, recordExistsAt, version, out spaceSaved, ESConsts.PTableInitialReaderCount, _pTableMaxReaderCount, indexCacheDepth, skipIndexVerify, ct); if (scavenged == null) { return(ScavengeResult.Failed(oldTable, level, i)); } scavengedMap[level][i] = scavenged; var indexMap = new IndexMap(Version, scavengedMap, PrepareCheckpoint, CommitCheckpoint, _maxTablesPerLevel, _maxTableLevelsForAutomaticMerge, _pTableMaxReaderCount); return(ScavengeResult.Success(indexMap, oldTable, scavenged, spaceSaved, level, i)); } } } throw new ArgumentException("Unable to find table in map.", nameof(toScavenge)); }
public MergeResult AddPTableForAutomaticMerge(PTable tableToAdd, long prepareCheckpoint, long commitCheckpoint, Func <string, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <string, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int indexCacheDepth = 16, bool skipIndexVerify = false) { Ensure.Nonnegative(prepareCheckpoint, "prepareCheckpoint"); Ensure.Nonnegative(commitCheckpoint, "commitCheckpoint"); var tables = CopyFrom(_map); AddTableToTables(tables, 0, tableToAdd); var toDelete = new List <PTable>(); var maxTableLevelsToMerge = Math.Min(tables.Count, _maxTableLevelsForAutomaticMerge); for (int level = 0; level < maxTableLevelsToMerge; level++) { if (tables[level].Count >= _maxTablesPerLevel) { var filename = filenameProvider.GetFilenameNewTable(); PTable mergedTable = PTable.MergeTo(tables[level], filename, upgradeHash, existsAt, recordExistsAt, version, indexCacheDepth, skipIndexVerify); AddTableToTables(tables, level + 1, mergedTable); toDelete.AddRange(tables[level]); tables[level].Clear(); } } var indexMap = new IndexMap(Version, tables, prepareCheckpoint, commitCheckpoint, _maxTablesPerLevel, _maxTableLevelsForAutomaticMerge); return(new MergeResult(indexMap, toDelete)); }
public MergeResult AddPTableForManualMerge(long prepareCheckpoint, long commitCheckpoint, Func <string, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <string, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int indexCacheDepth = 16, bool skipIndexVerify = false) { Ensure.Nonnegative(prepareCheckpoint, "prepareCheckpoint"); Ensure.Nonnegative(commitCheckpoint, "commitCheckpoint"); var tables = CopyFrom(_map); if (tables.Count < _maxTableLevelsForAutomaticMerge) { return(new MergeResult(this, new List <PTable>())); } var toDelete = new List <PTable>(); var tablesToMerge = tables.Skip(_maxTableLevelsForAutomaticMerge).SelectMany(a => a).ToList(); var filename = filenameProvider.GetFilenameNewTable(); PTable mergedTable = PTable.MergeTo(tablesToMerge, filename, upgradeHash, existsAt, recordExistsAt, version, indexCacheDepth, skipIndexVerify); for (int i = tables.Count - 1; i > _maxTableLevelsForAutomaticMerge; i--) { tables.RemoveAt(i); } tables[_maxTableLevelsForAutomaticMerge].Clear(); AddTableToTables(tables, _maxTableLevelsForAutomaticMerge + 1, mergedTable); toDelete.AddRange(tablesToMerge); var indexMap = new IndexMap(Version, tables, prepareCheckpoint, commitCheckpoint, _maxTablesPerLevel, _maxTableLevelsForAutomaticMerge); return(new MergeResult(indexMap, toDelete)); }
public MergeResult AddPTable(PTable tableToAdd, long prepareCheckpoint, long commitCheckpoint, Func <string, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <string, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int indexCacheDepth = 16) { Ensure.Nonnegative(prepareCheckpoint, "prepareCheckpoint"); Ensure.Nonnegative(commitCheckpoint, "commitCheckpoint"); var tables = CopyFrom(_map); CreateIfNeeded(0, tables); tables[0].Add(tableToAdd); var toDelete = new List <PTable>(); for (int level = 0; level < tables.Count; level++) { if (tables[level].Count >= _maxTablesPerLevel) { var filename = filenameProvider.GetFilenameNewTable(); PTable table = PTable.MergeTo(tables[level], filename, upgradeHash, existsAt, recordExistsAt, version, indexCacheDepth); CreateIfNeeded(level + 1, tables); tables[level + 1].Add(table); toDelete.AddRange(tables[level]); tables[level].Clear(); } } var indexMap = new IndexMap(Version, tables, prepareCheckpoint, commitCheckpoint, _maxTablesPerLevel); return(new MergeResult(indexMap, toDelete)); }
private void ReadOffQueue() { try { while (true) { TableItem tableItem; //ISearchTable table; lock (_awaitingTablesLock) { Log.Trace("Awaiting tables queue size is: {awaitingMemTables}.", _awaitingMemTables.Count); if (_awaitingMemTables.Count == 1) { return; } tableItem = _awaitingMemTables[_awaitingMemTables.Count - 1]; } PTable ptable; var memtable = tableItem.Table as IMemTable; if (memtable != null) { memtable.MarkForConversion(); ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable(), _indexCacheDepth, _skipIndexVerify); } else { ptable = (PTable)tableItem.Table; } var indexmapFile = Path.Combine(_directory, IndexMapFilename); MergeResult mergeResult; using (var reader = _tfReaderFactory()) { mergeResult = _indexMap.AddPTable(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint, (streamId, currentHash) => UpgradeHash(streamId, currentHash), entry => reader.ExistsAt(entry.Position), entry => ReadEntry(reader, entry.Position), _fileNameProvider, _ptableVersion, tableItem.Level, _indexCacheDepth, _skipIndexVerify); } _indexMap = mergeResult.MergedMap; _indexMap.SaveToFile(indexmapFile); lock (_awaitingTablesLock) { var memTables = _awaitingMemTables.ToList(); var corrTable = memTables.First(x => x.Table.Id == ptable.Id); memTables.Remove(corrTable); // parallel thread could already switch table, // so if we have another PTable instance with same ID, // we need to kill that instance as we added ours already if (!ReferenceEquals(corrTable.Table, ptable) && corrTable.Table is PTable) { ((PTable)corrTable.Table).MarkForDestruction(); } Log.Trace("There are now {awaitingMemTables} awaiting tables.", memTables.Count); _awaitingMemTables = memTables; } mergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } } catch (FileBeingDeletedException exc) { Log.ErrorException(exc, "Could not acquire chunk in TableIndex.ReadOffQueue. It is OK if node is shutting down."); } catch (Exception exc) { Log.ErrorException(exc, "Error in TableIndex.ReadOffQueue"); throw; } finally { lock (_awaitingTablesLock) { _backgroundRunning = false; _backgroundRunningEvent.Set(); } } }
public MergeResult AddPTable(PTable tableToAdd, long prepareCheckpoint, long commitCheckpoint, Func<IndexEntry, bool> recordExistsAt, IIndexFilenameProvider filenameProvider) { Ensure.Nonnegative(prepareCheckpoint, "prepareCheckpoint"); Ensure.Nonnegative(commitCheckpoint, "commitCheckpoint"); var tables = CopyFrom(_map); CreateIfNeeded(0, tables); tables[0].Add(tableToAdd); var toDelete = new List<PTable>(); for (int level = 0; level < tables.Count; level++) { if (tables[level].Count >= _maxTablesPerLevel) { var filename = filenameProvider.GetFilenameNewTable(); PTable table = PTable.MergeTo(tables[level], filename, recordExistsAt); CreateIfNeeded(level + 1, tables); tables[level + 1].Add(table); toDelete.AddRange(tables[level]); tables[level].Clear(); } } var indexMap = new IndexMap(Version, tables, prepareCheckpoint, commitCheckpoint, _maxTablesPerLevel); return new MergeResult(indexMap, toDelete); }