public override void SetUp() { base.SetUp(); _filename = GetFilePathFor("indexfile"); _tablename = GetTempFilePath(); _mergeFile = GetFilePathFor("outfile"); _map = IndexMap.FromFile(_filename, maxTablesPerLevel: 4); var memtable = new HashListMemTable(maxSize: 10); memtable.Add(0, 2, 123); var table = PTable.FromMemtable(memtable, _tablename); _result = _map.AddPTable(table, 0, 0, _ => true, new FakeFilenameProvider(_mergeFile)); _result = _result.MergedMap.AddPTable(table, 0, 0, _ => true, new FakeFilenameProvider(_mergeFile)); _result = _result.MergedMap.AddPTable(table, 0, 0, _ => true, new FakeFilenameProvider(_mergeFile)); var merged = _result.MergedMap.AddPTable(table, 0, 0, _ => true, new FakeFilenameProvider(_mergeFile)); _result = merged.MergedMap.AddPTable(table, 0, 0, _ => true, new FakeFilenameProvider(_mergeFile)); _result = _result.MergedMap.AddPTable(table, 7, 11, _ => true, new FakeFilenameProvider(_mergeFile)); _result.MergedMap.SaveToFile(_filename); table.Dispose(); merged.MergedMap.InOrder().ToList().ForEach(x => x.Dispose()); merged.ToDelete.ForEach(x => x.Dispose()); _result.MergedMap.InOrder().ToList().ForEach(x => x.Dispose()); _result.ToDelete.ForEach(x => x.Dispose()); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _mergeFile = GetTempFilePath(); _filename = GetTempFilePath(); _map = IndexMap.FromFile(_filename, maxTablesPerLevel: 4); var memtable = new HashListMemTable(maxSize: 10); memtable.Add(0, 1, 0); _result = _map.AddPTable(PTable.FromMemtable(memtable, GetTempFilePath()), 1, 2, _ => true, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddPTable(PTable.FromMemtable(memtable, GetTempFilePath()), 3, 4, _ => true, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddPTable(PTable.FromMemtable(memtable, GetTempFilePath()), 4, 5, _ => true, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddPTable(PTable.FromMemtable(memtable, GetTempFilePath()), 0, 1, _ => true, new FakeFilenameProvider(_mergeFile)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _mergeFile = GetTempFilePath(); _filename = GetTempFilePath(); _map = IndexMap.FromFile(_filename, maxTablesPerLevel: 4); var memtable = new HashListMemTable(_ptableVersion, maxSize: 10); memtable.Add(0, 1, 0); _result = _map.AddPTable(PTable.FromMemtable(memtable, GetTempFilePath()), 1, 2, (streamId, hash) => hash, _ => true, _ => new System.Tuple<string, bool>("", true), new GuidFilenameProvider(PathName), _ptableVersion); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddPTable(PTable.FromMemtable(memtable, GetTempFilePath()), 3, 4, (streamId, hash) => hash, _ => true, _ => new System.Tuple<string, bool>("", true), new GuidFilenameProvider(PathName), _ptableVersion); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddPTable(PTable.FromMemtable(memtable, GetTempFilePath()), 4, 5, (streamId, hash) => hash, _ => true, _ => new System.Tuple<string, bool>("", true), new GuidFilenameProvider(PathName), _ptableVersion); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddPTable(PTable.FromMemtable(memtable, GetTempFilePath()), 0, 1, (streamId, hash) => hash, _ => true, _ => new System.Tuple<string, bool>("", true), new FakeFilenameProvider(_mergeFile), _ptableVersion); _result.ToDelete.ForEach(x => x.MarkForDestruction()); }
public void Setup() { _map = IndexMap.FromFile(_file, x => false); var directory = new FileInfo(_file).DirectoryName; _map.EnterUnsafeState(directory); _map.LeaveUnsafeState(directory); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _map = IndexMap.FromFile(GetTempFilePath(), x => false); _map.EnterUnsafeState(PathName); _map.LeaveUnsafeState(PathName); }
public override void SetUp() { base.SetUp(); _filename = GetFilePathFor("indexfile"); _map = IndexMap.FromFile(_filename, x => false); _map.SaveToFile(_filename); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _filename = GetFilePathFor("indexfile"); _map = IndexMap.FromFile(_filename, _ptableVersion); _map.SaveToFile(_filename); }
public void Initialize(long chaserCheckpoint) { Ensure.Nonnegative(chaserCheckpoint, "chaserCheckpoint"); //NOT THREAD SAFE (assumes one thread) if (_initialized) throw new IOException("TableIndex is already initialized."); _initialized = true; if (_inMem) { _indexMap = IndexMap.CreateEmpty(_maxTablesPerLevel); _prepareCheckpoint = _indexMap.PrepareCheckpoint; _commitCheckpoint = _indexMap.CommitCheckpoint; return; } CreateIfDoesNotExist(_directory); var indexmapFile = Path.Combine(_directory, IndexMapFilename); // if TableIndex's CommitCheckpoint is >= amount of written TFChunk data, // we'll have to remove some of PTables as they point to non-existent data // this can happen (very unlikely, though) on master crash try { _indexMap = IndexMap.FromFile(indexmapFile, _maxTablesPerLevel); if (_indexMap.CommitCheckpoint >= chaserCheckpoint) { _indexMap.Dispose(TimeSpan.FromMilliseconds(5000)); throw new CorruptIndexException("IndexMap's CommitCheckpoint is greater than WriterCheckpoint."); } } catch (CorruptIndexException exc) { Log.ErrorException(exc, "ReadIndex is corrupted..."); LogIndexMapContent(indexmapFile); DumpAndCopyIndex(); File.Delete(indexmapFile); _indexMap = IndexMap.FromFile(indexmapFile, _maxTablesPerLevel); } _prepareCheckpoint = _indexMap.PrepareCheckpoint; _commitCheckpoint = _indexMap.CommitCheckpoint; // clean up all other remaining files var indexFiles = _indexMap.InOrder().Select(x => Path.GetFileName(x.Filename)) .Union(new[] { IndexMapFilename }); var toDeleteFiles = Directory.EnumerateFiles(_directory).Select(Path.GetFileName) .Except(indexFiles, StringComparer.OrdinalIgnoreCase); foreach (var filePath in toDeleteFiles) { var file = Path.Combine(_directory, filePath); File.SetAttributes(file, FileAttributes.Normal); File.Delete(file); } }
public override void SetUp() { base.SetUp(); _indexMapFileName = GetFilePathFor("index.map"); _ptableFileName = GetFilePathFor("ptable"); _emptyIndexMap = IndexMap.FromFile(_indexMapFileName, _ptableVersion); var memTable = new HashListMemTable(_ptableVersion, maxSize: 10); memTable.Add(0, 1, 2); _ptable = PTable.FromMemtable(memTable, _ptableFileName, _ptableVersion); }
public override void SetUp() { base.SetUp(); _indexMapFileName = Path.Combine(PathName, "index.map"); _ptableFileName = Path.Combine(PathName, "ptable"); _emptyIndexMap = IndexMap.FromFile(_indexMapFileName, x => false); var memTable = new HashListMemTable(); memTable.Add(0, 1, 2); _ptable = PTable.FromMemtable(memTable, _ptableFileName); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _filename = GetTempFilePath(); _tablename = GetTempFilePath(); _mergeFile = GetFilePathFor("mergefile"); _map = IndexMap.FromFile(_filename, _ptableVersion); var memtable = new HashListMemTable(_ptableVersion, maxSize: 10); memtable.Add(0, 1, 0); var table = PTable.FromMemtable(memtable, _tablename); _result = _map.AddPTable(table, 7, 11, (streamId, hash) => hash, _ => true, _ => new System.Tuple<string, bool>("", true), new FakeFilenameProvider(_mergeFile), _ptableVersion); table.MarkForDestruction(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _filename = GetTempFilePath(); _tablename = GetTempFilePath(); _mergeFile = Path.Combine(PathName, "mergefile"); _map = IndexMap.FromFile(_filename, x => false); var memtable = new HashListMemTable(); memtable.Add(0, 1, 0); var table = PTable.FromMemtable(memtable, _tablename); _result = _map.AddFile(table, 7, 11, new FakeFilenameProvider(_mergeFile)); table.MarkForDestruction(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _filename = GetTempFilePath(); _tablename = GetTempFilePath(); _mergeFile = GetFilePathFor("mergefile"); _map = IndexMap.FromFile(_filename); var memtable = new HashListMemTable(maxSize: 10); memtable.Add(0, 1, 0); var table = PTable.FromMemtable(memtable, _tablename); _result = _map.AddPTable(table, 7, 11, _ => true, new FakeFilenameProvider(_mergeFile)); table.MarkForDestruction(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _filename = GetFilePathFor("indexfile"); _tablename = GetTempFilePath(); _mergeFile = GetFilePathFor("outputfile"); _map = IndexMap.FromFile(_filename, x => false); var memtable = new HashListMemTable(maxSize: 10); memtable.Add(0, 2, 7); var table = PTable.FromMemtable(memtable, _tablename); _result = _map.AddFile(table, 7, 11, new FakeFilenameProvider(_mergeFile)); _result.MergedMap.SaveToFile(_filename); _result.ToDelete.ForEach(x => x.Dispose()); _result.MergedMap.InOrder().ToList().ForEach(x => x.Dispose()); table.Dispose(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _filename = GetFilePathFor("indexfile"); _tablename = GetTempFilePath(); _mergeFile = GetFilePathFor("outputfile"); _map = IndexMap.FromFile(_filename, _ptableVersion); var memtable = new HashListMemTable(_ptableVersion, maxSize: 10); memtable.Add(0, 2, 7); var table = PTable.FromMemtable(memtable, _tablename); _result = _map.AddPTable(table, 7, 11, (streamId, hash) => hash, _ => true, _ => new Tuple<string, bool>("", true), new FakeFilenameProvider(_mergeFile), _ptableVersion); _result.MergedMap.SaveToFile(_filename); _result.ToDelete.ForEach(x => x.Dispose()); _result.MergedMap.InOrder().ToList().ForEach(x => x.Dispose()); table.Dispose(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _filename = GetTempFilePath(); _finalmergefile = GetTempFilePath(); _finalmergefile2 = GetTempFilePath(); _map = IndexMap.FromFile(_filename, x => false); var memtable = new HashListMemTable(); memtable.Add(0, 1, 0); _result = _map.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 0, 0, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 0, 0, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 0, 0, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 0, 0, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 0, 0, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 0, 0, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 0, 0, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 0, 0, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 0, 0, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 0, 0, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 0, 0, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 0, 0, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 0, 0, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 0, 0, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 0, 0, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 1, 2, new FakeFilenameProvider(_finalmergefile, _finalmergefile2)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _mergeFile = GetTempFilePath(); _filename = GetTempFilePath(); _map = IndexMap.FromFile(_filename, x => false, maxTablesPerLevel: 2); var memtable = new HashListMemTable(); memtable.Add(0, 1, 0); _result = _map.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 10, 20, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 20, 30, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 30, 40, new GuidFilenameProvider(PathName)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); _result = _result.MergedMap.AddFile(PTable.FromMemtable(memtable, GetTempFilePath()), 50, 60, new FakeFilenameProvider(_mergeFile + ".firstmerge", _mergeFile)); _result.ToDelete.ForEach(x => x.MarkForDestruction()); }
public MergeResult AddPTable(PTable tableToAdd, long prepareCheckpoint, long commitCheckpoint, Func<IndexEntry, bool> recordExistsAt, IIndexFilenameProvider filenameProvider) { Ensure.Nonnegative(prepareCheckpoint, "prepareCheckpoint"); Ensure.Nonnegative(commitCheckpoint, "commitCheckpoint"); var tables = CopyFrom(_map); CreateIfNeeded(0, tables); tables[0].Add(tableToAdd); var toDelete = new List<PTable>(); for (int level = 0; level < tables.Count; level++) { if (tables[level].Count >= _maxTablesPerLevel) { var filename = filenameProvider.GetFilenameNewTable(); PTable table = PTable.MergeTo(tables[level], filename, recordExistsAt); CreateIfNeeded(level + 1, tables); tables[level + 1].Add(table); toDelete.AddRange(tables[level]); tables[level].Clear(); } } var indexMap = new IndexMap(Version, tables, prepareCheckpoint, commitCheckpoint, _maxTablesPerLevel); return new MergeResult(indexMap, toDelete); }
public MergeResult(IndexMap mergedMap, List <PTable> toDelete) { MergedMap = mergedMap; ToDelete = toDelete; }
public void Setup() { _map = IndexMap.FromFile("shitbird", x => false); }
public void Setup() { _map = IndexMap.FromFile("shitbird"); }
private void ReadOffQueue() { try { while (true) { TableItem tableItem; //ISearchTable table; lock (_awaitingTablesLock) { Log.Trace("Awaiting tables queue size is: {0}.", _awaitingMemTables.Count); if (_awaitingMemTables.Count == 1) { _backgroundRunning = false; _backgroundRunningEvent.Set(); return; } tableItem = _awaitingMemTables[_awaitingMemTables.Count - 1]; } PTable ptable; var memtable = tableItem.Table as IMemTable; if (memtable != null) { memtable.MarkForConversion(); ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable()); } else { ptable = (PTable)tableItem.Table; } // backup current version of IndexMap in case following switch will be left in unsafe state // this will allow to rebuild just part of index var backupFile = Path.Combine(_directory, IndexMapBackupFilename); var indexmapFile = Path.Combine(_directory, IndexMapFilename); Helper.EatException(() => { if (File.Exists(backupFile)) { File.Delete(backupFile); } if (File.Exists(indexmapFile)) { // same as File.Copy(indexmapFile, backupFile); but with forced flush var indexmapContent = File.ReadAllBytes(indexmapFile); using (var f = File.Create(backupFile)) { f.Write(indexmapContent, 0, indexmapContent.Length); f.FlushToDisk(); } } }); EnterUnsafeState(_directory); var mergeResult = _indexMap.AddFile(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint, _fileNameProvider); _indexMap = mergeResult.MergedMap; _indexMap.SaveToFile(indexmapFile); LeaveUnsafeState(_directory); lock (_awaitingTablesLock) { var memTables = _awaitingMemTables.ToList(); var corrTable = memTables.First(x => x.Table.Id == ptable.Id); memTables.Remove(corrTable); // parallel thread could already switch table, // so if we have another PTable instance with same ID, // we need to kill that instance as we added ours already if (!ReferenceEquals(corrTable.Table, ptable) && corrTable.Table is PTable) { ((PTable)corrTable.Table).MarkForDestruction(); } Log.Trace("There are now {0} awaiting tables.", memTables.Count); _awaitingMemTables = memTables; } // We'll keep indexmap.backup in case of crash. In case of crash we hope that all necessary // PTables for previous version of IndexMap are still there, so we can rebuild // from last step, not to do full rebuild. mergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } } catch (Exception exc) { Log.ErrorException(exc, "Error in TableIndex.ReadOffQueue"); throw; } }
public void Setup() { _map = IndexMap.FromFile("thisfiledoesnotexist"); }
public AddResult(IndexMap newMap, bool canMergeAny) { NewMap = newMap; CanMergeAny = canMergeAny; }
public void Initialize(long chaserCheckpoint) { Ensure.Nonnegative(chaserCheckpoint, "chaserCheckpoint"); //NOT THREAD SAFE (assumes one thread) if (_initialized) { throw new IOException("TableIndex is already initialized."); } _initialized = true; if (_inMem) { _indexMap = IndexMap.CreateEmpty(_maxTablesPerLevel); _prepareCheckpoint = _indexMap.PrepareCheckpoint; _commitCheckpoint = _indexMap.CommitCheckpoint; return; } if (ShouldForceIndexVerify()) { Log.Debug("Forcing verification of index files..."); } CreateIfDoesNotExist(_directory); var indexmapFile = Path.Combine(_directory, IndexMapFilename); // if TableIndex's CommitCheckpoint is >= amount of written TFChunk data, // we'll have to remove some of PTables as they point to non-existent data // this can happen (very unlikely, though) on master crash try { _indexMap = IndexMap.FromFile(indexmapFile, maxTablesPerLevel: _maxTablesPerLevel, cacheDepth: _indexCacheDepth, skipIndexVerify: _skipIndexVerify, threads: _initializationThreads); if (_indexMap.CommitCheckpoint >= chaserCheckpoint) { _indexMap.Dispose(TimeSpan.FromMilliseconds(5000)); throw new CorruptIndexException(String.Format("IndexMap's CommitCheckpoint ({0}) is greater than ChaserCheckpoint ({1}).", _indexMap.CommitCheckpoint, chaserCheckpoint)); } //verification should be completed by now DeleteForceIndexVerifyFile(); } catch (CorruptIndexException exc) { Log.ErrorException(exc, "ReadIndex is corrupted..."); LogIndexMapContent(indexmapFile); DumpAndCopyIndex(); File.SetAttributes(indexmapFile, FileAttributes.Normal); File.Delete(indexmapFile); DeleteForceIndexVerifyFile(); _indexMap = IndexMap.FromFile(indexmapFile, maxTablesPerLevel: _maxTablesPerLevel, cacheDepth: _indexCacheDepth, skipIndexVerify: _skipIndexVerify, threads: _initializationThreads); } _prepareCheckpoint = _indexMap.PrepareCheckpoint; _commitCheckpoint = _indexMap.CommitCheckpoint; // clean up all other remaining files var indexFiles = _indexMap.InOrder().Select(x => Path.GetFileName(x.Filename)) .Union(new[] { IndexMapFilename }); var toDeleteFiles = Directory.EnumerateFiles(_directory).Select(Path.GetFileName) .Except(indexFiles, StringComparer.OrdinalIgnoreCase); foreach (var filePath in toDeleteFiles) { var file = Path.Combine(_directory, filePath); File.SetAttributes(file, FileAttributes.Normal); File.Delete(file); } }
public void Setup() { _map = IndexMap.FromFile(_file, x => false); _map.EnterUnsafeState(new FileInfo(_file).DirectoryName); }
private void ReadOffQueue() { try { while (true) { TableItem tableItem; //ISearchTable table; lock (_awaitingTablesLock) { Log.Trace("Awaiting tables queue size is: {0}.", _awaitingMemTables.Count); if (_awaitingMemTables.Count == 1) { _backgroundRunning = false; _backgroundRunningEvent.Set(); return; } tableItem = _awaitingMemTables[_awaitingMemTables.Count - 1]; } PTable ptable; var memtable = tableItem.Table as IMemTable; if (memtable != null) { memtable.MarkForConversion(); ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable(), _indexCacheDepth); } else ptable = (PTable)tableItem.Table; var indexmapFile = Path.Combine(_directory, IndexMapFilename); MergeResult mergeResult; using (var reader = _tfReaderFactory()) { mergeResult = _indexMap.AddPTable(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint, (streamId, currentHash) => UpgradeHash(streamId, currentHash), entry => reader.ExistsAt(entry.Position), entry => ReadEntry(reader, entry.Position), _fileNameProvider, _ptableVersion, _indexCacheDepth); } _indexMap = mergeResult.MergedMap; _indexMap.SaveToFile(indexmapFile); lock (_awaitingTablesLock) { var memTables = _awaitingMemTables.ToList(); var corrTable = memTables.First(x => x.Table.Id == ptable.Id); memTables.Remove(corrTable); // parallel thread could already switch table, // so if we have another PTable instance with same ID, // we need to kill that instance as we added ours already if (!ReferenceEquals(corrTable.Table, ptable) && corrTable.Table is PTable) ((PTable)corrTable.Table).MarkForDestruction(); Log.Trace("There are now {0} awaiting tables.", memTables.Count); _awaitingMemTables = memTables; } mergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } } catch (FileBeingDeletedException exc) { Log.ErrorException(exc, "Could not acquire chunk in TableIndex.ReadOffQueue. It is OK if node is shutting down."); } catch (Exception exc) { Log.ErrorException(exc, "Error in TableIndex.ReadOffQueue"); throw; } }
private void ReadOffQueue() { try { while (true) { TableItem tableItem; //ISearchTable table; lock (_awaitingTablesLock) { Log.Trace("Awaiting tables queue size is: {0}.", _awaitingMemTables.Count); if (_awaitingMemTables.Count == 1) { _backgroundRunning = false; _backgroundRunningEvent.Set(); return; } tableItem = _awaitingMemTables[_awaitingMemTables.Count - 1]; } PTable ptable; var memtable = tableItem.Table as IMemTable; if (memtable != null) { memtable.MarkForConversion(); ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable(), _indexCacheDepth, _skipIndexVerify); } else { ptable = (PTable)tableItem.Table; } var indexmapFile = Path.Combine(_directory, IndexMapFilename); MergeResult mergeResult; using (var reader = _tfReaderFactory()) { mergeResult = _indexMap.AddPTable(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint, (streamId, currentHash) => UpgradeHash(streamId, currentHash), entry => reader.ExistsAt(entry.Position), entry => ReadEntry(reader, entry.Position), _fileNameProvider, _ptableVersion, _indexCacheDepth, _skipIndexVerify); } _indexMap = mergeResult.MergedMap; _indexMap.SaveToFile(indexmapFile); lock (_awaitingTablesLock) { var memTables = _awaitingMemTables.ToList(); var corrTable = memTables.First(x => x.Table.Id == ptable.Id); memTables.Remove(corrTable); // parallel thread could already switch table, // so if we have another PTable instance with same ID, // we need to kill that instance as we added ours already if (!ReferenceEquals(corrTable.Table, ptable) && corrTable.Table is PTable) { ((PTable)corrTable.Table).MarkForDestruction(); } Log.Trace("There are now {0} awaiting tables.", memTables.Count); _awaitingMemTables = memTables; } mergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } } catch (FileBeingDeletedException exc) { Log.ErrorException(exc, "Could not acquire chunk in TableIndex.ReadOffQueue. It is OK if node is shutting down."); } catch (Exception exc) { Log.ErrorException(exc, "Error in TableIndex.ReadOffQueue"); throw; } }
public void Setup() { _map = IndexMap.FromFile("thisfiledoesnotexist", _ptableVersion); }
private void ReadOffQueue() { try { while (true) { TableItem tableItem; //ISearchTable table; lock (_awaitingTablesLock) { Log.Trace("Awaiting tables queue size is: {0}.", _awaitingMemTables.Count); if (_awaitingMemTables.Count == 1) { _backgroundRunning = false; _backgroundRunningEvent.Set(); return; } tableItem = _awaitingMemTables[_awaitingMemTables.Count - 1]; } PTable ptable; var memtable = tableItem.Table as IMemTable; if (memtable != null) { memtable.MarkForConversion(); ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable()); } else { ptable = (PTable)tableItem.Table; } _indexMap.EnterUnsafeState(_directory); var mergeResult = _indexMap.AddFile(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint, _fileNameProvider); _indexMap = mergeResult.MergedMap; _indexMap.SaveToFile(Path.Combine(_directory, IndexMapFilename)); _indexMap.LeaveUnsafeState(_directory); lock (_awaitingTablesLock) { //oh well at least its only a small lock that is very unlikely to ever be hit var memTables = _awaitingMemTables.ToList(); var corrTable = memTables.First(x => x.Table.Id == ptable.Id); memTables.Remove(corrTable); // parallel thread could already switch table, // so if we have another PTable instance with same ID, // we need to kill that instance as we added ours already if (!ReferenceEquals(corrTable.Table, ptable) && corrTable.Table is PTable) { ((PTable)corrTable.Table).MarkForDestruction(); } Log.Trace("There are now {0} awaiting tables.", memTables.Count); _awaitingMemTables = memTables; } mergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } } catch (Exception exc) { Log.ErrorException(exc, "Error in TableIndex.ReadOffQueue"); throw; } }
private void ReadOffQueue() { _backgroundRunningEvent.Reset(); try { while (true) { TableItem tableItem; //ISearchTable table; lock (_awaitingTablesLock) { Log.Trace("Awaiting tables queue size is: {0}.", _awaitingMemTables.Count); if (_awaitingMemTables.Count == 1) { _backgroundRunning = false; return; } tableItem = _awaitingMemTables[_awaitingMemTables.Count - 1]; } PTable ptable; var memtable = tableItem.Table as IMemTable; if (memtable != null) { memtable.MarkForConversion(); ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable()); } else ptable = (PTable) tableItem.Table; _indexMap.EnterUnsafeState(_directory); var mergeResult = _indexMap.AddFile(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint, _fileNameProvider); _indexMap = mergeResult.MergedMap; _indexMap.SaveToFile(Path.Combine(_directory, IndexMapFilename)); _indexMap.LeaveUnsafeState(_directory); lock (_awaitingTablesLock) { //oh well at least its only a small lock that is very unlikely to ever be hit var memTables = _awaitingMemTables.ToList(); var corrTable = memTables.First(x => x.Table.Id == ptable.Id); memTables.Remove(corrTable); // parallel thread could already switch table, // so if we have another PTable instance with same ID, // we need to kill that instance as we added ours already if (!ReferenceEquals(corrTable.Table, ptable) && corrTable.Table is PTable) ((PTable)corrTable.Table).MarkForDestruction(); Log.Trace("There are now {0} awaiting tables.", memTables.Count); _awaitingMemTables = memTables; } mergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } } finally { _backgroundRunning = false; _backgroundRunningEvent.Set(); } }
public MergeResult(IndexMap mergedMap, List<PTable> toDelete) { MergedMap = mergedMap; ToDelete = toDelete; }
public void Initialize(long writerCheckpoint) { Ensure.Nonnegative(writerCheckpoint, "writerCheckpoint"); //NOT THREAD SAFE (assumes one thread) if (_initialized) { throw new IOException("TableIndex is already initialized."); } _initialized = true; CreateIfDoesNotExist(_directory); var indexmapFile = Path.Combine(_directory, IndexMapFilename); var backupFile = Path.Combine(_directory, IndexMapBackupFilename); // if TableIndex's CommitCheckpoint is >= amount of written TFChunk data, // we'll have to remove some of PTables as they point to non-existent data // this can happen (very unlikely, though) on master crash try { if (IsCorrupt(_directory)) { throw new CorruptIndexException("IndexMap is in unsafe state."); } _indexMap = IndexMap.FromFile(indexmapFile, IsHashCollision, _maxTablesPerLevel); if (_indexMap.CommitCheckpoint >= writerCheckpoint) { _indexMap.Dispose(TimeSpan.FromMilliseconds(5000)); throw new CorruptIndexException("IndexMap's CommitCheckpoint is greater than WriterCheckpoint."); } } catch (CorruptIndexException exc) { Log.ErrorException(exc, "ReadIndex is corrupted..."); LogIndexMapContent(indexmapFile); DumpAndCopyIndex(); File.Delete(indexmapFile); bool createEmptyIndexMap = true; if (File.Exists(backupFile)) { File.Copy(backupFile, indexmapFile); try { _indexMap = IndexMap.FromFile(indexmapFile, IsHashCollision, _maxTablesPerLevel); if (_indexMap.CommitCheckpoint >= writerCheckpoint) { _indexMap.Dispose(TimeSpan.FromMilliseconds(5000)); throw new CorruptIndexException("Back-up IndexMap's CommitCheckpoint is still greater than WriterCheckpoint."); } createEmptyIndexMap = false; Log.Info("Using back-up index map..."); } catch (CorruptIndexException ex) { Log.ErrorException(ex, "Backup IndexMap is also corrupted..."); LogIndexMapContent(backupFile); File.Delete(indexmapFile); File.Delete(backupFile); } } if (createEmptyIndexMap) { _indexMap = IndexMap.FromFile(indexmapFile, IsHashCollision, _maxTablesPerLevel); } if (IsCorrupt(_directory)) { LeaveUnsafeState(_directory); } } _prepareCheckpoint = _indexMap.PrepareCheckpoint; _commitCheckpoint = _indexMap.CommitCheckpoint; // clean up all other remaining files var indexFiles = _indexMap.InOrder().Select(x => Path.GetFileName(x.Filename)) .Union(new[] { IndexMapFilename, IndexMapBackupFilename }); var toDeleteFiles = Directory.EnumerateFiles(_directory).Select(Path.GetFileName) .Except(indexFiles, StringComparer.OrdinalIgnoreCase); foreach (var filePath in toDeleteFiles) { var file = Path.Combine(_directory, filePath); File.SetAttributes(file, FileAttributes.Normal); File.Delete(file); } }
public void Initialize() { //NOT THREAD SAFE (assumes one thread) if (_initialized) throw new IOException("TableIndex is already initialized."); _initialized = true; CreateIfDoesNotExist(_directory); try { _indexMap = IndexMap.FromFile(Path.Combine(_directory, IndexMapFilename), IsHashCollision, _maxTablesPerLevel); if (_indexMap.IsCorrupt(_directory)) { foreach (var ptable in _indexMap.InOrder()) { ptable.MarkForDestruction(); } foreach (var ptable in _indexMap.InOrder()) { ptable.WaitForDestroy(5000); } throw new CorruptIndexException("IndexMap is in unsafe state."); } } catch (CorruptIndexException exc) { Log.ErrorException(exc, "ReadIndex was corrupted. Rebuilding from scratch..."); foreach (var filePath in Directory.EnumerateFiles(_directory)) { File.Delete(filePath); } _indexMap = IndexMap.FromFile(Path.Combine(_directory, IndexMapFilename), IsHashCollision, _maxTablesPerLevel); } _prepareCheckpoint = _indexMap.PrepareCheckpoint; _commitCheckpoint = _indexMap.CommitCheckpoint; }
public void Initialize() { //NOT THREAD SAFE (assumes one thread) CreateIfDoesNotExist(_directory); _indexMap = IndexMap.FromFile(Path.Combine(_directory, IndexMapFilename), IsHashCollision, _maxTablesPerLevel); _prepareCheckpoint = _indexMap.PrepareCheckpoint; _commitCheckpoint.Write(_indexMap.CommitCheckpoint); }
private void ReadOffQueue() { try { while (true) { TableItem tableItem; //ISearchTable table; lock (_awaitingTablesLock) { Log.Trace("Awaiting tables queue size is: {0}.", _awaitingMemTables.Count); if (_awaitingMemTables.Count == 1) { _backgroundRunning = false; _backgroundRunningEvent.Set(); return; } tableItem = _awaitingMemTables[_awaitingMemTables.Count - 1]; } PTable ptable; var memtable = tableItem.Table as IMemTable; if (memtable != null) { memtable.MarkForConversion(); ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable()); } else ptable = (PTable) tableItem.Table; // backup current version of IndexMap in case following switch will be left in unsafe state // this will allow to rebuild just part of index var backupFile = Path.Combine(_directory, IndexMapBackupFilename); var indexmapFile = Path.Combine(_directory, IndexMapFilename); Helper.EatException(() => { if (File.Exists(backupFile)) File.Delete(backupFile); if (File.Exists(indexmapFile)) { // same as File.Copy(indexmapFile, backupFile); but with forced flush var indexmapContent = File.ReadAllBytes(indexmapFile); using (var f = File.Create(backupFile)) { f.Write(indexmapContent, 0, indexmapContent.Length); f.FlushToDisk(); } } }); EnterUnsafeState(_directory); MergeResult mergeResult; using (var reader = _tfReaderFactory()) { mergeResult = _indexMap.AddPTable(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint, entry => reader.ExistsAt(entry.Position), _fileNameProvider); } _indexMap = mergeResult.MergedMap; _indexMap.SaveToFile(indexmapFile); LeaveUnsafeState(_directory); lock (_awaitingTablesLock) { var memTables = _awaitingMemTables.ToList(); var corrTable = memTables.First(x => x.Table.Id == ptable.Id); memTables.Remove(corrTable); // parallel thread could already switch table, // so if we have another PTable instance with same ID, // we need to kill that instance as we added ours already if (!ReferenceEquals(corrTable.Table, ptable) && corrTable.Table is PTable) ((PTable)corrTable.Table).MarkForDestruction(); Log.Trace("There are now {0} awaiting tables.", memTables.Count); _awaitingMemTables = memTables; } // We'll keep indexmap.backup in case of crash. In case of crash we hope that all necessary // PTables for previous version of IndexMap are still there, so we can rebuild // from last step, not to do full rebuild. mergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } } catch (FileBeingDeletedException exc) { Log.ErrorException(exc, "Couldn't acquire chunk in TableIndex.ReadOffQueue. It is ok if node is shutting down."); } catch (Exception exc) { Log.ErrorException(exc, "Error in TableIndex.ReadOffQueue"); throw; } }
public void Initialize() { //NOT THREAD SAFE (assumes one thread) CreateIfDoesNotExist(_directory); try { _indexMap = IndexMap.FromFile(Path.Combine(_directory, IndexMapFilename), IsHashCollision, _maxTablesPerLevel); if (_indexMap.IsCorrupt(_directory)) throw new CorruptIndexException("IndexMap is in unsafe state."); } catch (CorruptIndexException exc) { Log.ErrorException(exc, "ReadIndex was corrupted. Rebuilding from scratch..."); foreach (var filePath in Directory.EnumerateFiles(_directory)) { File.Delete(filePath); } _indexMap = IndexMap.FromFile(Path.Combine(_directory, IndexMapFilename), IsHashCollision, _maxTablesPerLevel); } _prepareCheckpoint = _indexMap.PrepareCheckpoint; _commitCheckpoint = _indexMap.CommitCheckpoint; }