public override void SetUp() { base.SetUp(); _filename = GetFilePathFor("indexfile"); _map = IndexMap.FromFile(_filename, x => false); _map.SaveToFile(_filename); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _filename = GetFilePathFor("indexfile"); _map = IndexMap.FromFile(_filename, _ptableVersion); _map.SaveToFile(_filename); }
private void ScavengeInternal(IIndexScavengerLog log, CancellationToken ct) { var toScavenge = _indexMap.InOrder().ToList(); foreach (var pTable in toScavenge) { var startNew = Stopwatch.StartNew(); try { ct.ThrowIfCancellationRequested(); using (var reader = _tfReaderFactory()) { var indexmapFile = Path.Combine(_directory, IndexMapFilename); var scavengeResult = _indexMap.Scavenge(pTable.Id, ct, (streamId, currentHash) => UpgradeHash(streamId, currentHash), entry => reader.ExistsAt(entry.Position), entry => ReadEntry(reader, entry.Position), _fileNameProvider, _ptableVersion, _indexCacheDepth, _skipIndexVerify); if (scavengeResult.IsSuccess) { _indexMap = scavengeResult.ScavengedMap; _indexMap.SaveToFile(indexmapFile); scavengeResult.OldTable.MarkForDestruction(); var entriesDeleted = scavengeResult.OldTable.Count - scavengeResult.NewTable.Count; log.IndexTableScavenged(scavengeResult.Level, scavengeResult.Index, startNew.Elapsed, entriesDeleted, scavengeResult.NewTable.Count, scavengeResult.SpaceSaved); } else { log.IndexTableNotScavenged(scavengeResult.Level, scavengeResult.Index, startNew.Elapsed, pTable.Count, ""); } } } catch (OperationCanceledException) { log.IndexTableNotScavenged(-1, -1, startNew.Elapsed, pTable.Count, "Scavenge cancelled"); throw; } catch (Exception ex) { log.IndexTableNotScavenged(-1, -1, startNew.Elapsed, pTable.Count, ex.Message); throw; } } }
private void ReadOffQueue() { try { while (true) { TableItem tableItem; //ISearchTable table; lock (_awaitingTablesLock) { Log.Trace("Awaiting tables queue size is: {0}.", _awaitingMemTables.Count); if (_awaitingMemTables.Count == 1) { _backgroundRunning = false; _backgroundRunningEvent.Set(); return; } tableItem = _awaitingMemTables[_awaitingMemTables.Count - 1]; } PTable ptable; var memtable = tableItem.Table as IMemTable; if (memtable != null) { memtable.MarkForConversion(); ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable(), _indexCacheDepth); } else { ptable = (PTable)tableItem.Table; } var indexmapFile = Path.Combine(_directory, IndexMapFilename); MergeResult mergeResult; using (var reader = _tfReaderFactory()) { mergeResult = _indexMap.AddPTable(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint, (streamId, currentHash) => UpgradeHash(streamId, currentHash), entry => reader.ExistsAt(entry.Position), entry => ReadEntry(reader, entry.Position), _fileNameProvider, _ptableVersion, _indexCacheDepth); } _indexMap = mergeResult.MergedMap; _indexMap.SaveToFile(indexmapFile); lock (_awaitingTablesLock) { var memTables = _awaitingMemTables.ToList(); var corrTable = memTables.First(x => x.Table.Id == ptable.Id); memTables.Remove(corrTable); // parallel thread could already switch table, // so if we have another PTable instance with same ID, // we need to kill that instance as we added ours already if (!ReferenceEquals(corrTable.Table, ptable) && corrTable.Table is PTable) { ((PTable)corrTable.Table).MarkForDestruction(); } Log.Trace("There are now {0} awaiting tables.", memTables.Count); _awaitingMemTables = memTables; } mergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } } catch (FileBeingDeletedException exc) { Log.ErrorException(exc, "Could not acquire chunk in TableIndex.ReadOffQueue. It is OK if node is shutting down."); } catch (Exception exc) { Log.ErrorException(exc, "Error in TableIndex.ReadOffQueue"); throw; } }
private void ReadOffQueue() { try { while (true) { TableItem tableItem; //ISearchTable table; lock (_awaitingTablesLock) { Log.Trace("Awaiting tables queue size is: {0}.", _awaitingMemTables.Count); if (_awaitingMemTables.Count == 1) { _backgroundRunning = false; _backgroundRunningEvent.Set(); return; } tableItem = _awaitingMemTables[_awaitingMemTables.Count - 1]; } PTable ptable; var memtable = tableItem.Table as IMemTable; if (memtable != null) { memtable.MarkForConversion(); ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable()); } else { ptable = (PTable)tableItem.Table; } // backup current version of IndexMap in case following switch will be left in unsafe state // this will allow to rebuild just part of index var backupFile = Path.Combine(_directory, IndexMapBackupFilename); var indexmapFile = Path.Combine(_directory, IndexMapFilename); Helper.EatException(() => { if (File.Exists(backupFile)) { File.Delete(backupFile); } if (File.Exists(indexmapFile)) { // same as File.Copy(indexmapFile, backupFile); but with forced flush var indexmapContent = File.ReadAllBytes(indexmapFile); using (var f = File.Create(backupFile)) { f.Write(indexmapContent, 0, indexmapContent.Length); f.FlushToDisk(); } } }); EnterUnsafeState(_directory); MergeResult mergeResult; using (var reader = _tfReaderFactory()) { mergeResult = _indexMap.AddPTable(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint, entry => reader.ExistsAt(entry.Position), _fileNameProvider); } _indexMap = mergeResult.MergedMap; _indexMap.SaveToFile(indexmapFile); LeaveUnsafeState(_directory); lock (_awaitingTablesLock) { var memTables = _awaitingMemTables.ToList(); var corrTable = memTables.First(x => x.Table.Id == ptable.Id); memTables.Remove(corrTable); // parallel thread could already switch table, // so if we have another PTable instance with same ID, // we need to kill that instance as we added ours already if (!ReferenceEquals(corrTable.Table, ptable) && corrTable.Table is PTable) { ((PTable)corrTable.Table).MarkForDestruction(); } Log.Trace("There are now {0} awaiting tables.", memTables.Count); _awaitingMemTables = memTables; } // We'll keep indexmap.backup in case of crash. In case of crash we hope that all necessary // PTables for previous version of IndexMap are still there, so we can rebuild // from last step, not to do full rebuild. mergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } } catch (FileBeingDeletedException exc) { Log.ErrorException(exc, "Couldn't acquire chunk in TableIndex.ReadOffQueue. It is ok if node is shutting down."); } catch (Exception exc) { Log.ErrorException(exc, "Error in TableIndex.ReadOffQueue"); throw; } }
private void ReadOffQueue() { try { while (true) { var indexmapFile = Path.Combine(_directory, IndexMapFilename); if (_isManualMergePending) { Log.Debug("Performing manual index merge."); _isManualMergePending = false; using (var reader = _tfReaderFactory()) { var manualMergeResult = _indexMap.TryManualMerge( (streamId, currentHash) => UpgradeHash(streamId, currentHash), entry => reader.ExistsAt(entry.Position), entry => ReadEntry(reader, entry.Position), _fileNameProvider, _ptableVersion, _indexCacheDepth, _skipIndexVerify); if (manualMergeResult.HasMergedAny) { _indexMap = manualMergeResult.MergedMap; _indexMap.SaveToFile(indexmapFile); manualMergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } Log.Debug("Manual index merge completed: {numMergedPTables} PTable(s) merged.", manualMergeResult.ToDelete.Count); } } TableItem tableItem; //ISearchTable table; lock (_awaitingTablesLock) { Log.Debug("Awaiting tables queue size is: {awaitingMemTables}.", _awaitingMemTables.Count); if (_awaitingMemTables.Count == 1) { return; } tableItem = _awaitingMemTables[_awaitingMemTables.Count - 1]; } PTable ptable; var memtable = tableItem.Table as IMemTable; if (memtable != null) { memtable.MarkForConversion(); ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable(), ESConsts.PTableInitialReaderCount, _pTableMaxReaderCount, _indexCacheDepth, _skipIndexVerify); } else { ptable = (PTable)tableItem.Table; } var addResult = _indexMap.AddPTable(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint); _indexMap = addResult.NewMap; _indexMap.SaveToFile(indexmapFile); if (addResult.CanMergeAny) { using (var reader = _tfReaderFactory()) { MergeResult mergeResult; do { mergeResult = _indexMap.TryMergeOneLevel( (streamId, currentHash) => UpgradeHash(streamId, currentHash), entry => reader.ExistsAt(entry.Position), entry => ReadEntry(reader, entry.Position), _fileNameProvider, _ptableVersion, _indexCacheDepth, _skipIndexVerify); if (mergeResult.HasMergedAny) { _indexMap = mergeResult.MergedMap; _indexMap.SaveToFile(indexmapFile); mergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } } while (mergeResult.CanMergeAny); } } lock (_awaitingTablesLock) { var memTables = _awaitingMemTables.ToList(); var corrTable = memTables.First(x => x.Table.Id == ptable.Id); memTables.Remove(corrTable); // parallel thread could already switch table, // so if we have another PTable instance with same ID, // we need to kill that instance as we added ours already if (!ReferenceEquals(corrTable.Table, ptable) && corrTable.Table is PTable) { ((PTable)corrTable.Table).MarkForDestruction(); } Log.Debug("There are now {awaitingMemTables} awaiting tables.", memTables.Count); _awaitingMemTables = memTables; } } } catch (FileBeingDeletedException exc) { Log.Error(exc, "Could not acquire chunk in TableIndex.ReadOffQueue. It is OK if node is shutting down."); } catch (Exception exc) { Log.Error(exc, "Error in TableIndex.ReadOffQueue"); throw; } finally { lock (_awaitingTablesLock) { _backgroundRunning = false; _backgroundRunningEvent.Set(); } } }
private void ReadOffQueue() { try { while (true) { TableItem tableItem; //ISearchTable table; lock (_awaitingTablesLock) { Log.Trace("Awaiting tables queue size is: {0}.", _awaitingMemTables.Count); if (_awaitingMemTables.Count == 1) { _backgroundRunning = false; _backgroundRunningEvent.Set(); return; } tableItem = _awaitingMemTables[_awaitingMemTables.Count - 1]; } PTable ptable; var memtable = tableItem.Table as IMemTable; if (memtable != null) { memtable.MarkForConversion(); ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable(), _indexCacheDepth); } else ptable = (PTable)tableItem.Table; var indexmapFile = Path.Combine(_directory, IndexMapFilename); MergeResult mergeResult; using (var reader = _tfReaderFactory()) { mergeResult = _indexMap.AddPTable(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint, (streamId, currentHash) => UpgradeHash(streamId, currentHash), entry => reader.ExistsAt(entry.Position), entry => ReadEntry(reader, entry.Position), _fileNameProvider, _ptableVersion, _indexCacheDepth); } _indexMap = mergeResult.MergedMap; _indexMap.SaveToFile(indexmapFile); lock (_awaitingTablesLock) { var memTables = _awaitingMemTables.ToList(); var corrTable = memTables.First(x => x.Table.Id == ptable.Id); memTables.Remove(corrTable); // parallel thread could already switch table, // so if we have another PTable instance with same ID, // we need to kill that instance as we added ours already if (!ReferenceEquals(corrTable.Table, ptable) && corrTable.Table is PTable) ((PTable)corrTable.Table).MarkForDestruction(); Log.Trace("There are now {0} awaiting tables.", memTables.Count); _awaitingMemTables = memTables; } mergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } } catch (FileBeingDeletedException exc) { Log.ErrorException(exc, "Could not acquire chunk in TableIndex.ReadOffQueue. It is OK if node is shutting down."); } catch (Exception exc) { Log.ErrorException(exc, "Error in TableIndex.ReadOffQueue"); throw; } }
private void ReadOffQueue() { try { while (true) { TableItem tableItem; //ISearchTable table; lock (_awaitingTablesLock) { Log.Trace("Awaiting tables queue size is: {0}.", _awaitingMemTables.Count); if (_awaitingMemTables.Count == 1) { _backgroundRunning = false; _backgroundRunningEvent.Set(); return; } tableItem = _awaitingMemTables[_awaitingMemTables.Count - 1]; } PTable ptable; var memtable = tableItem.Table as IMemTable; if (memtable != null) { memtable.MarkForConversion(); ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable()); } else ptable = (PTable) tableItem.Table; // backup current version of IndexMap in case following switch will be left in unsafe state // this will allow to rebuild just part of index var backupFile = Path.Combine(_directory, IndexMapBackupFilename); var indexmapFile = Path.Combine(_directory, IndexMapFilename); Helper.EatException(() => { if (File.Exists(backupFile)) File.Delete(backupFile); if (File.Exists(indexmapFile)) { // same as File.Copy(indexmapFile, backupFile); but with forced flush var indexmapContent = File.ReadAllBytes(indexmapFile); using (var f = File.Create(backupFile)) { f.Write(indexmapContent, 0, indexmapContent.Length); f.FlushToDisk(); } } }); EnterUnsafeState(_directory); MergeResult mergeResult; using (var reader = _tfReaderFactory()) { mergeResult = _indexMap.AddPTable(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint, entry => reader.ExistsAt(entry.Position), _fileNameProvider); } _indexMap = mergeResult.MergedMap; _indexMap.SaveToFile(indexmapFile); LeaveUnsafeState(_directory); lock (_awaitingTablesLock) { var memTables = _awaitingMemTables.ToList(); var corrTable = memTables.First(x => x.Table.Id == ptable.Id); memTables.Remove(corrTable); // parallel thread could already switch table, // so if we have another PTable instance with same ID, // we need to kill that instance as we added ours already if (!ReferenceEquals(corrTable.Table, ptable) && corrTable.Table is PTable) ((PTable)corrTable.Table).MarkForDestruction(); Log.Trace("There are now {0} awaiting tables.", memTables.Count); _awaitingMemTables = memTables; } // We'll keep indexmap.backup in case of crash. In case of crash we hope that all necessary // PTables for previous version of IndexMap are still there, so we can rebuild // from last step, not to do full rebuild. mergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } } catch (FileBeingDeletedException exc) { Log.ErrorException(exc, "Couldn't acquire chunk in TableIndex.ReadOffQueue. It is ok if node is shutting down."); } catch (Exception exc) { Log.ErrorException(exc, "Error in TableIndex.ReadOffQueue"); throw; } }
private void ReadOffQueue() { _backgroundRunningEvent.Reset(); try { while (true) { TableItem tableItem; //ISearchTable table; lock (_awaitingTablesLock) { Log.Trace("Awaiting tables queue size is: {0}.", _awaitingMemTables.Count); if (_awaitingMemTables.Count == 1) { _backgroundRunning = false; return; } tableItem = _awaitingMemTables[_awaitingMemTables.Count - 1]; } PTable ptable; var memtable = tableItem.Table as IMemTable; if (memtable != null) { memtable.MarkForConversion(); ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable()); } else ptable = (PTable) tableItem.Table; _indexMap.EnterUnsafeState(_directory); var mergeResult = _indexMap.AddFile(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint, _fileNameProvider); _indexMap = mergeResult.MergedMap; _indexMap.SaveToFile(Path.Combine(_directory, IndexMapFilename)); _indexMap.LeaveUnsafeState(_directory); lock (_awaitingTablesLock) { //oh well at least its only a small lock that is very unlikely to ever be hit var memTables = _awaitingMemTables.ToList(); var corrTable = memTables.First(x => x.Table.Id == ptable.Id); memTables.Remove(corrTable); // parallel thread could already switch table, // so if we have another PTable instance with same ID, // we need to kill that instance as we added ours already if (!ReferenceEquals(corrTable.Table, ptable) && corrTable.Table is PTable) ((PTable)corrTable.Table).MarkForDestruction(); Log.Trace("There are now {0} awaiting tables.", memTables.Count); _awaitingMemTables = memTables; } mergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } } finally { _backgroundRunning = false; _backgroundRunningEvent.Set(); } }
private void ReadOffQueue() { try { while (true) { TableItem tableItem; //ISearchTable table; lock (_awaitingTablesLock) { Log.Trace("Awaiting tables queue size is: {0}.", _awaitingMemTables.Count); if (_awaitingMemTables.Count == 1) { _backgroundRunning = false; _backgroundRunningEvent.Set(); return; } tableItem = _awaitingMemTables[_awaitingMemTables.Count - 1]; } PTable ptable; var memtable = tableItem.Table as IMemTable; if (memtable != null) { memtable.MarkForConversion(); ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable()); } else { ptable = (PTable)tableItem.Table; } _indexMap.EnterUnsafeState(_directory); var mergeResult = _indexMap.AddFile(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint, _fileNameProvider); _indexMap = mergeResult.MergedMap; _indexMap.SaveToFile(Path.Combine(_directory, IndexMapFilename)); _indexMap.LeaveUnsafeState(_directory); lock (_awaitingTablesLock) { //oh well at least its only a small lock that is very unlikely to ever be hit var memTables = _awaitingMemTables.ToList(); var corrTable = memTables.First(x => x.Table.Id == ptable.Id); memTables.Remove(corrTable); // parallel thread could already switch table, // so if we have another PTable instance with same ID, // we need to kill that instance as we added ours already if (!ReferenceEquals(corrTable.Table, ptable) && corrTable.Table is PTable) { ((PTable)corrTable.Table).MarkForDestruction(); } Log.Trace("There are now {0} awaiting tables.", memTables.Count); _awaitingMemTables = memTables; } mergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } } catch (Exception exc) { Log.ErrorException(exc, "Error in TableIndex.ReadOffQueue"); throw; } }