private void ValidateCache(PTable.Midpoint[] cache, int count, int depth) { if (count == 0 || depth == 0) { Assert.IsNull(cache); return; } if (count == 1) { Assert.IsNotNull(cache); Assert.AreEqual(2, cache.Length); Assert.AreEqual(0, cache[1].ItemIndex); Assert.AreEqual(0, cache[1].ItemIndex); return; } Assert.IsNotNull(cache); Assert.AreEqual(Math.Min(count, 1<<depth), cache.Length); Assert.AreEqual(0, cache[0].ItemIndex); for (int i = 1; i < cache.Length; ++i) { Assert.IsTrue(cache[i - 1].Key.GreaterEqualsThan(cache[i].Key), "Expected {0} to be >= {1}", cache[i - 1].Key, cache[i].Key); Assert.Less(cache[i-1].ItemIndex, cache[i].ItemIndex); } Assert.AreEqual(count-1, cache[cache.Length-1].ItemIndex); }
public void Setup() { var mtable = new HashListMemTable(maxSize: 10); mtable.Add(0x0101, 0x0001, 0x0001); mtable.Add(0x0105, 0x0001, 0x0002); _table = PTable.FromMemtable(mtable, Filename); _table.MarkForDestruction(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); var table = new HashListMemTable(maxSize: 2000); table.Add(0x0101, 0x0001, 0x0001); _ptable = PTable.FromMemtable(table, Filename, cacheDepth: 0); }
public void when_ptable_file_is_deleted() { _ptable.MarkForDestruction(); _ptable = null; File.Delete(_ptableFileName); Assert.Throws<CorruptIndexException>(() => IndexMap.FromFile(_indexMapFileName, 2)); }
public void Setup() { _filename = Path.GetRandomFileName(); var table = new HashListMemTable(); AddItemsForScenario(table); PTable = PTable.FromMemtable(table, _filename, cacheDepth: _midpointCacheDepth); }
public void Setup() { _filename = Path.GetRandomFileName(); var mtable = new HashListMemTable(); mtable.Add(0x0101, 0x0001, 0x0001); mtable.Add(0x0105, 0x0001, 0x0002); _table = PTable.FromMemtable(mtable, _filename); _table.MarkForDestruction(); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _ptableCount = (long)(uint.MaxValue + 10000000L); _size = _ptableCount * (long)indexEntrySize + PTableHeader.Size + PTable.MD5Size; Console.WriteLine("Creating PTable at {0}. Size of PTable: {1}", Filename, _size); CreatePTableFile(Filename, _size, indexEntrySize); _ptable = PTable.FromFile(Filename, _ptableVersion, 22); }
public override void SetUp() { base.SetUp(); var table = new HashListMemTable(maxSize: 50); AddItemsForScenario(table); PTable = PTable.FromMemtable(table, Filename, cacheDepth: _midpointCacheDepth); }
public override void SetUp() { base.SetUp(); _indexMapFileName = GetFilePathFor("index.map"); _ptableFileName = GetFilePathFor("ptable"); _emptyIndexMap = IndexMap.FromFile(_indexMapFileName, _ptableVersion); var memTable = new HashListMemTable(_ptableVersion, maxSize: 10); memTable.Add(0, 1, 2); _ptable = PTable.FromMemtable(memTable, _ptableFileName, _ptableVersion); }
public override void SetUp() { base.SetUp(); _indexMapFileName = Path.Combine(PathName, "index.map"); _ptableFileName = Path.Combine(PathName, "ptable"); _emptyIndexMap = IndexMap.FromFile(_indexMapFileName, x => false); var memTable = new HashListMemTable(); memTable.Add(0, 1, 2); _ptable = PTable.FromMemtable(memTable, _ptableFileName); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); var table = new HashListMemTable(); table.Add(0x0101, 0x0001, 0x0001); table.Add(0x0105, 0x0001, 0x0002); table.Add(0x0102, 0x0001, 0x0003); table.Add(0x0102, 0x0002, 0x0004); table.Add(0x0103, 0x0001, 0xFFF1); table.Add(0x0103, 0x0003, 0xFFF3); table.Add(0x0103, 0x0005, 0xFFF5); _ptable = PTable.FromMemtable(table, Filename, cacheDepth: 0); }
public override void SetUp() { base.SetUp(); _indexMapFileName = GetFilePathFor("index.map"); _ptableFileName = GetFilePathFor("ptable"); var indexMap = IndexMap.FromFile(_indexMapFileName, maxTablesPerLevel: 2); var memtable = new HashListMemTable(_ptableVersion, maxSize: 10); memtable.Add(0,0,0); memtable.Add(1,1,100); _ptable = PTable.FromMemtable(memtable, _ptableFileName); indexMap = indexMap.AddPTable(_ptable, 0, 0, (streamId, hash) => hash, _ => true, _ => new Tuple<string, bool>("", true), new GuidFilenameProvider(PathName), _ptableVersion).MergedMap; indexMap.SaveToFile(_indexMapFileName); }
public override void SetUp() { base.SetUp(); _indexMapFileName = Path.Combine(PathName, "index.map"); _ptableFileName = Path.Combine(PathName, "ptable"); var indexMap = IndexMap.FromFile(_indexMapFileName, x => false, maxTablesPerLevel: 2); var memtable = new HashListMemTable(maxSize: 2000); memtable.Add(0,0,0); memtable.Add(1,1,100); _ptable = PTable.FromMemtable(memtable, _ptableFileName); indexMap = indexMap.AddFile(_ptable, 0, 0, new GuidFilenameProvider(PathName)).MergedMap; indexMap.SaveToFile(_indexMapFileName); }
public override void SetUp() { base.SetUp(); _indexMapFileName = GetFilePathFor("index.map"); _ptableFileName = GetFilePathFor("ptable"); var indexMap = IndexMap.FromFile(_indexMapFileName, maxTablesPerLevel: 2); var memtable = new HashListMemTable(maxSize: 10); memtable.Add(0,0,0); memtable.Add(1,1,100); _ptable = PTable.FromMemtable(memtable, _ptableFileName); indexMap = indexMap.AddPTable(_ptable, 0, 0, _ => true, new GuidFilenameProvider(PathName)).MergedMap; indexMap.SaveToFile(_indexMapFileName); }
private static void InsertTableToTables(List <List <PTable> > tables, int level, int position, PTable table) { while (level >= tables.Count) { tables.Add(new List <PTable>()); } var innerTables = tables[level] ?? (tables[level] = new List <PTable>()); while (position >= innerTables.Count) { innerTables.Add(null); } innerTables[position] = table; }
public void Setup() { for (int i = 0; i < 2; i++) { _files.Add(Path.GetRandomFileName()); var table = new HashListMemTable(); for (int j = 0; j < 10; j++) { table.Add((UInt32)j + 1, i + 1, i * j); } _tables.Add(PTable.FromMemtable(table, _files[i])); } _files.Add(Path.GetRandomFileName()); _newtable = PTable.MergeTo(_tables, _files[2], x => false); }
public MergeResult AddPTableForManualMerge(long prepareCheckpoint, long commitCheckpoint, Func <string, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <string, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int indexCacheDepth = 16, bool skipIndexVerify = false) { Ensure.Nonnegative(prepareCheckpoint, "prepareCheckpoint"); Ensure.Nonnegative(commitCheckpoint, "commitCheckpoint"); var tables = CopyFrom(_map); if (tables.Count < _maxTableLevelsForAutomaticMerge) { return(new MergeResult(this, new List <PTable>())); } var toDelete = new List <PTable>(); var tablesToMerge = tables.Skip(_maxTableLevelsForAutomaticMerge).SelectMany(a => a).ToList(); if (tablesToMerge.Count == 1) { return(new MergeResult(this, new List <PTable>())); } var filename = filenameProvider.GetFilenameNewTable(); PTable mergedTable = PTable.MergeTo(tablesToMerge, filename, upgradeHash, existsAt, recordExistsAt, version, ESConsts.PTableInitialReaderCount, _pTableMaxReaderCount, indexCacheDepth, skipIndexVerify); for (int i = tables.Count - 1; i > _maxTableLevelsForAutomaticMerge; i--) { tables.RemoveAt(i); } tables[_maxTableLevelsForAutomaticMerge].Clear(); AddTableToTables(tables, _maxTableLevelsForAutomaticMerge + 1, mergedTable); toDelete.AddRange(tablesToMerge); var indexMap = new IndexMap(Version, tables, prepareCheckpoint, commitCheckpoint, _maxTablesPerLevel, _maxTableLevelsForAutomaticMerge, _pTableMaxReaderCount); return(new MergeResult(indexMap, toDelete)); }
public void Setup() { for (int i = 0; i < 4; i++) { _files.Add(Path.GetRandomFileName()); var table = new HashListMemTable(); for (int j = 0; j < 10; j++) { table.Add(0, 0, 1000000 - i*1000 - j); table.Add(0, 0, i*1000 + j); } _tables.Add(PTable.FromMemtable(table, _files[i])); } _files.Add(Path.GetRandomFileName()); _newtable = PTable.MergeTo(_tables, _files[4], x => false); }
public void setup() { _filename = Path.GetRandomFileName(); _copiedfilename = Path.GetRandomFileName(); var mtable = new HashListMemTable(); mtable.Add(0x0101, 0x0001, 0x0001); mtable.Add(0x0105, 0x0001, 0x0002); _table = PTable.FromMemtable(mtable, _filename); File.Copy(_filename, _copiedfilename); _table.MarkForDestruction(); using(var f = new FileStream(_copiedfilename, FileMode.Open, FileAccess.ReadWrite, FileShare.ReadWrite)) { f.Seek(130, SeekOrigin.Begin); f.WriteByte(0x22); } _table = PTable.FromFile(_copiedfilename); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); for (int i = 0; i < 4; i++) { _files.Add(GetTempFilePath()); var table = new HashListMemTable(maxSize: 30); for (int j = 0; j < 10; j++) { table.Add((uint)i, j, i*10 + j); } _tables.Add(PTable.FromMemtable(table, _files[i])); } _files.Add(GetTempFilePath()); _newtable = PTable.MergeTo(_tables, _files[4], x => x.Position % 2 == 0); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); for (int i = 0; i < 4; i++) { _files.Add(GetTempFilePath()); var table = new HashListMemTable(_ptableVersion, maxSize: 30); for (int j = 0; j < 10; j++) { table.Add((ulong)(0x010100000000 << i), j, i*10 + j); } _tables.Add(PTable.FromMemtable(table, _files[i])); } _files.Add(GetTempFilePath()); _newtable = PTable.MergeTo(_tables, _files[4], (streamId, hash) => hash, x => x.Position % 2 == 0, x => new Tuple<string, bool>("", x.Position % 2 == 0), _ptableVersion); }
public MergeResult TryMergeOneLevel <TStreamId>( Func <TStreamId, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <TStreamId, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int indexCacheDepth = 16, bool skipIndexVerify = false, bool useBloomFilter = true, int lruCacheSize = 1_000_000) { var tables = CopyFrom(_map); var hasMergedAny = false; var canMergeAny = false; var toDelete = new List <PTable>(); var maxTableLevelsToMerge = Math.Min(tables.Count, _maxTableLevelsForAutomaticMerge); for (int level = 0; level < maxTableLevelsToMerge; level++) { if (tables[level].Count >= _maxTablesPerLevel) { if (hasMergedAny) { canMergeAny = true; break; } var filename = filenameProvider.GetFilenameNewTable(); PTable mergedTable = PTable.MergeTo(tables[level], filename, upgradeHash, existsAt, recordExistsAt, version, ESConsts.PTableInitialReaderCount, _pTableMaxReaderCount, indexCacheDepth, skipIndexVerify, useBloomFilter, lruCacheSize); hasMergedAny = true; AddTableToTables(tables, level + 1, mergedTable); toDelete.AddRange(tables[level]); tables[level].Clear(); } } var indexMap = new IndexMap(Version, tables, PrepareCheckpoint, CommitCheckpoint, _maxTablesPerLevel, _maxTableLevelsForAutomaticMerge, _pTableMaxReaderCount); return(new MergeResult(indexMap, toDelete, hasMergedAny, canMergeAny)); }
public void Setup() { _filename = GetTempFilePath(); _copiedfilename = GetTempFilePath(); var mtable = new HashListMemTable(_ptableVersion, maxSize: 10); mtable.Add(0x010100000000, 0x0001, 0x0001); mtable.Add(0x010500000000, 0x0001, 0x0002); _table = PTable.FromMemtable(mtable, _filename); _table.Dispose(); File.Copy(_filename, _copiedfilename); using (var f = new FileStream(_copiedfilename, FileMode.Open, FileAccess.ReadWrite, FileShare.ReadWrite)) { f.Seek(22, SeekOrigin.Begin); f.WriteByte(0x22); } }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); for (int i = 0; i < 2; i++) { _files.Add(GetTempFilePath()); var table = new HashListMemTable(maxSize: 20); for (int j = 0; j < 10; j++) { table.Add((UInt32)j + 1, i + 1, i * j); } _tables.Add(PTable.FromMemtable(table, _files[i])); } _files.Add(GetTempFilePath()); _newtable = PTable.MergeTo(_tables, _files[2], x => true); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); for (int i = 0; i < 2; i++) { _files.Add(GetTempFilePath()); var table = new HashListMemTable(_ptableVersion, maxSize: 20); for (int j = 0; j < 10; j++) { table.Add((ulong)(0x010100000000 << (j + 1)), i + 1, i*j); } _tables.Add(PTable.FromMemtable(table, _files[i])); } _files.Add(GetTempFilePath()); _newtable = PTable.MergeTo(_tables, _files[2], (streamId, hash) => hash, x => true, x => new System.Tuple<string, bool>("", true), _ptableVersion); }
public MergeResult TryManualMerge <TStreamId>( Func <TStreamId, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <TStreamId, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int indexCacheDepth = 16, bool skipIndexVerify = false, bool useBloomFilter = true, int lruCacheSize = 1_000_000) { var tables = CopyFrom(_map); if (tables.Count <= _maxTableLevelsForAutomaticMerge) { return(new MergeResult(this, new List <PTable>(), false, false)); } var toDelete = new List <PTable>(); var tablesToMerge = tables.Skip(_maxTableLevelsForAutomaticMerge).SelectMany(a => a).ToList(); if (tablesToMerge.Count == 1) { return(new MergeResult(this, new List <PTable>(), false, false)); } var filename = filenameProvider.GetFilenameNewTable(); PTable mergedTable = PTable.MergeTo(tablesToMerge, filename, upgradeHash, existsAt, recordExistsAt, version, ESConsts.PTableInitialReaderCount, _pTableMaxReaderCount, indexCacheDepth, skipIndexVerify, useBloomFilter, lruCacheSize); for (int i = tables.Count - 1; i > _maxTableLevelsForAutomaticMerge; i--) { tables.RemoveAt(i); } tables[_maxTableLevelsForAutomaticMerge].Clear(); AddTableToTables(tables, _maxTableLevelsForAutomaticMerge + 1, mergedTable); toDelete.AddRange(tablesToMerge); var indexMap = new IndexMap(Version, tables, PrepareCheckpoint, CommitCheckpoint, _maxTablesPerLevel, _maxTableLevelsForAutomaticMerge, _pTableMaxReaderCount); return(new MergeResult(indexMap, toDelete, true, false)); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); for (int i = 0; i < 4; i++) { _files.Add(GetTempFilePath()); var table = new HashListMemTable(maxSize: 50); for (int j = 0; j < 10; j++) { table.Add(0, 0, 1000000 - i*1000 - j); table.Add(0, 0, i*1000 + j); } _tables.Add(PTable.FromMemtable(table, _files[i])); } _files.Add(GetTempFilePath()); _newtable = PTable.MergeTo(_tables, _files[4], x => false); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _files.Add(GetTempFilePath()); var table = new HashListMemTable(PTableVersions.Index32Bit, maxSize: 20); table.Add(0x010100000000, 0, 0x0101); table.Add(0x010200000000, 0, 0x0102); table.Add(0x010300000000, 0, 0x0103); table.Add(0x010400000000, 0, 0x0104); _tables.Add(PTable.FromMemtable(table, GetTempFilePath())); table = new HashListMemTable(PTableVersions.Index32Bit, maxSize: 20); table.Add(0x010500000000, 0, 0x0105); table.Add(0x010600000000, 0, 0x0106); table.Add(0x010700000000, 0, 0x0107); table.Add(0x010800000000, 0, 0x0108); _tables.Add(PTable.FromMemtable(table, GetTempFilePath())); _newtable = PTable.MergeTo(_tables, GetTempFilePath(), (streamId, hash) => hash + 1, x => true, x => new Tuple<string, bool>(x.Stream.ToString(), true), PTableVersions.Index32Bit); }
private void ReclaimMemoryIfNeeded(List <TableItem> awaitingMemTables) { var toPutOnDisk = awaitingMemTables.OfType <IMemTable>().Count() - MaxMemoryTables; for (var i = awaitingMemTables.Count - 1; i >= 1 && toPutOnDisk > 0; i--) { var memtable = awaitingMemTables[i].Table as IMemTable; if (memtable == null || !memtable.MarkForConversion()) { continue; } Log.Debug("Putting awaiting file as PTable instead of MemTable [{id}].", memtable.Id); var ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable(), ESConsts.PTableInitialReaderCount, _pTableMaxReaderCount, _indexCacheDepth, _skipIndexVerify); var swapped = false; lock (_awaitingTablesLock) { for (var j = _awaitingMemTables.Count - 1; j >= 1; j--) { var tableItem = _awaitingMemTables[j]; if (!(tableItem.Table is IMemTable) || tableItem.Table.Id != ptable.Id) { continue; } swapped = true; _awaitingMemTables[j] = new TableItem(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint, tableItem.Level); break; } } if (!swapped) { ptable.MarkForDestruction(); } toPutOnDisk--; } }
public override void SetUp() { base.SetUp(); _filename = GetTempFilePath(); _copiedfilename = GetTempFilePath(); var mtable = new HashListMemTable(maxSize: 10); mtable.Add(0x0101, 0x0001, 0x0001); mtable.Add(0x0105, 0x0001, 0x0002); _table = PTable.FromMemtable(mtable, _filename); _table.Dispose(); File.Copy(_filename, _copiedfilename); using (var f = new FileStream(_copiedfilename, FileMode.Open, FileAccess.ReadWrite, FileShare.ReadWrite)) { f.Seek(130, SeekOrigin.Begin); f.WriteByte(0x22); } _table = PTable.FromFile(_copiedfilename); }
public ScavengeResult Scavenge <TStreamId>(Guid toScavenge, CancellationToken ct, Func <TStreamId, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <TStreamId, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int indexCacheDepth = 16, bool skipIndexVerify = false, bool useBloomFilter = true, int lruCacheSize = 1_000_000) { var scavengedMap = CopyFrom(_map); for (int level = 0; level < scavengedMap.Count; level++) { for (int i = 0; i < scavengedMap[level].Count; i++) { if (scavengedMap[level][i].Id == toScavenge) { long spaceSaved; var filename = filenameProvider.GetFilenameNewTable(); var oldTable = scavengedMap[level][i]; PTable scavenged = PTable.Scavenged(oldTable, filename, upgradeHash, existsAt, recordExistsAt, version, out spaceSaved, ESConsts.PTableInitialReaderCount, _pTableMaxReaderCount, indexCacheDepth, skipIndexVerify, useBloomFilter, lruCacheSize, ct); if (scavenged == null) { return(ScavengeResult.Failed(oldTable, level, i)); } scavengedMap[level][i] = scavenged; var indexMap = new IndexMap(Version, scavengedMap, PrepareCheckpoint, CommitCheckpoint, _maxTablesPerLevel, _maxTableLevelsForAutomaticMerge, _pTableMaxReaderCount); return(ScavengeResult.Success(indexMap, oldTable, scavenged, spaceSaved, level, i)); } } } throw new ArgumentException("Unable to find table in map.", nameof(toScavenge)); }
public MergeResult AddPTableForAutomaticMerge(PTable tableToAdd, long prepareCheckpoint, long commitCheckpoint, Func <string, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <string, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int indexCacheDepth = 16, bool skipIndexVerify = false) { Ensure.Nonnegative(prepareCheckpoint, "prepareCheckpoint"); Ensure.Nonnegative(commitCheckpoint, "commitCheckpoint"); var tables = CopyFrom(_map); AddTableToTables(tables, 0, tableToAdd); var toDelete = new List <PTable>(); var maxTableLevelsToMerge = Math.Min(tables.Count, _maxTableLevelsForAutomaticMerge); for (int level = 0; level < maxTableLevelsToMerge; level++) { if (tables[level].Count >= _maxTablesPerLevel) { var filename = filenameProvider.GetFilenameNewTable(); PTable mergedTable = PTable.MergeTo(tables[level], filename, upgradeHash, existsAt, recordExistsAt, version, ESConsts.PTableInitialReaderCount, _pTableMaxReaderCount, indexCacheDepth, skipIndexVerify); AddTableToTables(tables, level + 1, mergedTable); toDelete.AddRange(tables[level]); tables[level].Clear(); } } var indexMap = new IndexMap(Version, tables, prepareCheckpoint, commitCheckpoint, _maxTablesPerLevel, _maxTableLevelsForAutomaticMerge, _pTableMaxReaderCount); return(new MergeResult(indexMap, toDelete)); }
public MergeResult AddPTable(PTable tableToAdd, long prepareCheckpoint, long commitCheckpoint, Func <string, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <string, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int level, int indexCacheDepth = 16, bool skipIndexVerify = false) { if (level < _maxTableLevelsForAutomaticMerge) { return(AddPTableForAutomaticMerge(tableToAdd, prepareCheckpoint, commitCheckpoint, upgradeHash, existsAt, recordExistsAt, filenameProvider, version, indexCacheDepth, skipIndexVerify)); } //For manual merge, we are never adding any extra entries, just merging existing files, so the index p/c checkpoint won't change return(AddPTableForManualMerge(PrepareCheckpoint, CommitCheckpoint, upgradeHash, existsAt, recordExistsAt, filenameProvider, version, indexCacheDepth, skipIndexVerify)); }
public MergeResult AddPTable(PTable tableToAdd, long prepareCheckpoint, long commitCheckpoint, Func <string, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <string, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int level, int indexCacheDepth = 16, bool skipIndexVerify = false) { bool isManual; //if (_maxTableLevelsForAutomaticMerge == 0) //{ // //when we are not auto merging at all, a manual merge will only be triggered if // //there are entries in the index map. the table it passes is always the first table // //at the maximum automerge level, so we only need to hit the first table and see if it // //matches, otherwise it must be an an add of a memtable. // //although we are not automatically merging, the automerge process is also responsible // //for writing out the memtable that just got persisted so we want to call auto merge still // isManual = _map.Count != 0 && _map[0].FirstOrDefault() == tableToAdd; //} //else //{ isManual = level > _maxTableLevelsForAutomaticMerge; //} if (isManual) { //For manual merge, we are never adding any extra entries, just merging existing files, so the index p/c checkpoint won't change return(AddPTableForManualMerge(PrepareCheckpoint, CommitCheckpoint, upgradeHash, existsAt, recordExistsAt, filenameProvider, version, indexCacheDepth, skipIndexVerify)); } return(AddPTableForAutomaticMerge(tableToAdd, prepareCheckpoint, commitCheckpoint, upgradeHash, existsAt, recordExistsAt, filenameProvider, version, indexCacheDepth, skipIndexVerify)); }
public ScavengeResult Scavenge(Guid toScavenge, CancellationToken ct, Func <string, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <string, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int indexCacheDepth = 16, bool skipIndexVerify = false) { var scavengedMap = CopyFrom(_map); for (int level = 0; level < scavengedMap.Count; level++) { for (int i = 0; i < scavengedMap[level].Count; i++) { if (scavengedMap[level][i].Id == toScavenge) { long spaceSaved; var filename = filenameProvider.GetFilenameNewTable(); var oldTable = scavengedMap[level][i]; PTable scavenged = PTable.Scavenged(oldTable, filename, upgradeHash, existsAt, recordExistsAt, version, out spaceSaved, indexCacheDepth, skipIndexVerify, ct); if (scavenged == null) { return(ScavengeResult.Failed(oldTable, level, i)); } scavengedMap[level][i] = scavenged; var indexMap = new IndexMap(Version, scavengedMap, PrepareCheckpoint, CommitCheckpoint, _maxTablesPerLevel); return(ScavengeResult.Success(indexMap, oldTable, scavenged, spaceSaved, level, i)); } } } throw new ArgumentException("Unable to find table in map.", nameof(toScavenge)); }
private void ReclaimMemoryIfNeeded(List <TableItem> awaitingMemTables) { var toPutOnDisk = awaitingMemTables.OfType <IMemTable>().Count() - MaxMemoryTables; for (var i = awaitingMemTables.Count - 1; i >= 1 && toPutOnDisk > 0; i--) { var memtable = awaitingMemTables[i].Table as IMemTable; if (memtable == null || !memtable.MarkForConversion()) { continue; } Log.Trace("Putting awaiting file as PTable instead of MemTable [{0}].", memtable.Id); var ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable()); bool swapped = false; lock (_awaitingTablesLock) { for (int j = _awaitingMemTables.Count - 1; j >= 1; j--) { var tableItem = _awaitingMemTables[j]; if (tableItem.Table is IMemTable && tableItem.Table.Id == ptable.Id) { swapped = true; _awaitingMemTables[j] = new TableItem(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint); break; } } } if (!swapped) { ptable.MarkForDestruction(); } toPutOnDisk--; } }
public MergeResult AddPTable(PTable tableToAdd, long prepareCheckpoint, long commitCheckpoint, Func <string, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <string, bool> > recordExistsAt, IIndexFilenameProvider filenameProvider, byte version, int indexCacheDepth = 16, bool skipIndexVerify = false) { Ensure.Nonnegative(prepareCheckpoint, "prepareCheckpoint"); Ensure.Nonnegative(commitCheckpoint, "commitCheckpoint"); var tables = CopyFrom(_map); CreateIfNeeded(0, tables); tables[0].Add(tableToAdd); var toDelete = new List <PTable>(); for (int level = 0; level < tables.Count; level++) { if (tables[level].Count >= _maxTablesPerLevel) { var filename = filenameProvider.GetFilenameNewTable(); PTable table = PTable.MergeTo(tables[level], filename, upgradeHash, existsAt, recordExistsAt, version, indexCacheDepth, skipIndexVerify); CreateIfNeeded(level + 1, tables); tables[level + 1].Add(table); toDelete.AddRange(tables[level]); tables[level].Clear(); } } var indexMap = new IndexMap(Version, tables, prepareCheckpoint, commitCheckpoint, _maxTablesPerLevel); return(new MergeResult(indexMap, toDelete)); }
public void Setup() { for (int i = 0; i < 4; i++) { _files.Add(Path.GetRandomFileName()); var table = new HashListMemTable(); for (int j = 0; j < 10; j++) { table.Add((UInt32)j%8, i, i * j * 100 + i + j); // 0 collisions with 8, 1 collisions with 9 } if (i == 3) { table.Add(0, int.MaxValue, 45); table.Add(1, int.MaxValue, 45); table.Add(2, int.MaxValue, 45); table.Add(3, int.MaxValue, 45); } _tables.Add(PTable.FromMemtable(table, _files[i])); } _files.Add(Path.GetRandomFileName()); _newtable = PTable.MergeTo(_tables, _files[4], x => x.Stream <= 1); }
public void Setup() { for (int i = 0; i < 2; i++) { _files.Add(Path.GetRandomFileName()); var table = new HashListMemTable(maxSize: 2000); for (int j = 0; j < 10; j++) { table.Add((UInt32)j + 1, i + 1, i * j); } if (i == 1) { table.Add(1, int.MaxValue, 45); table.Add(2, int.MaxValue, 45); table.Add(3, int.MaxValue, 45); table.Add(4, int.MaxValue, 45); } _tables.Add(PTable.FromMemtable(table, _files[i])); } _files.Add(Path.GetRandomFileName()); _newtable = PTable.MergeTo(_tables, _files[2], x => false); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); for (int i = 0; i < 2; i++) { _files.Add(GetTempFilePath()); var table = new HashListMemTable(maxSize: 30); for (int j = 0; j < 10; j++) { table.Add((UInt32)j % 8, i, i * j * 100 + i + j); // 0 collisions with 8, 1 collisions with 9 } if (i == 1) { table.Add(0, int.MaxValue, 45); table.Add(1, int.MaxValue, 45); table.Add(2, int.MaxValue, 45); table.Add(3, int.MaxValue, 45); } _tables.Add(PTable.FromMemtable(table, _files[i])); } _files.Add(GetTempFilePath()); _newtable = PTable.MergeTo(_tables, _files[2], x => x.Stream <= 1); }
public override void TestFixtureSetUp() { base.TestFixtureSetUp(); for (int i = 0; i < 4; i++) { _files.Add(GetTempFilePath()); var table = new HashListMemTable(maxSize: 20); for (int j = 0; j < 10; j++) { table.Add((UInt32)j + 1, i + 1, i * j); } if (i == 3) { table.Add(1, int.MaxValue, 45); table.Add(2, int.MaxValue, 45); table.Add(3, int.MaxValue, 45); table.Add(4, int.MaxValue, 45); } _tables.Add(PTable.FromMemtable(table, _files[i])); } _files.Add(GetTempFilePath()); _newtable = PTable.MergeTo(_tables, _files[4], x => false); }
public static IndexMap FromFile(string filename, Func <IndexEntry, bool> isHashCollision, int maxTablesPerLevel = 4, bool loadPTables = true) { var tables = new List <List <PTable> >(); int version; long prepareCheckpoint = -1; long commitCheckpoint = -1; if (!File.Exists(filename)) { return(new IndexMap(IndexMapVersion, tables, prepareCheckpoint, commitCheckpoint, isHashCollision, maxTablesPerLevel)); } using (var f = File.OpenRead(filename)) using (var reader = new StreamReader(f)) { // calculate real MD5 hash except first 32 bytes which are string representation of stored hash f.Position = 32; var realHash = MD5Hash.GetHashFor(f); f.Position = 0; // read stored MD5 hash and convert it from string to byte array string text; if ((text = reader.ReadLine()) == null) { throw new CorruptIndexException("IndexMap file is empty."); } if (text.Length != 32 || !text.All(x => char.IsDigit(x) || (x >= 'A' && x <= 'F'))) { throw new CorruptIndexException("Corrupted MD5 hash."); } // check expected and real hashes are the same var expectedHash = new byte[16]; for (int i = 0; i < 16; ++i) { expectedHash[i] = Convert.ToByte(text.Substring(i * 2, 2), 16); } if (expectedHash.Length != realHash.Length) { throw new InvalidOperationException("Invalid length of expected and real hash."); } for (int i = 0; i < realHash.Length; ++i) { if (expectedHash[i] != realHash[i]) { throw new CorruptIndexException("Expected and real hash are different."); } } // at this point we can assume the format is ok, so actually no need to check errors. if ((text = reader.ReadLine()) == null) { throw new CorruptIndexException("Corrupted version."); } version = int.Parse(text); // read and check prepare/commit checkpoint if ((text = reader.ReadLine()) == null) { throw new CorruptIndexException("Corrupted commit checkpoint."); } try { var checkpoints = text.Split('/'); if (!long.TryParse(checkpoints[0], out prepareCheckpoint) || prepareCheckpoint < -1) { throw new CorruptIndexException("Invalid prepare checkpoint."); } if (!long.TryParse(checkpoints[1], out commitCheckpoint) || commitCheckpoint < -1) { throw new CorruptIndexException("Invalid commit checkpoint."); } } catch (Exception exc) { throw new CorruptIndexException("Corrupted prepare/commit checkpoints pair.", exc); } // all next lines are PTables sorted by levels while ((text = reader.ReadLine()) != null) { if (prepareCheckpoint < 0 || commitCheckpoint < 0) { throw new CorruptIndexException("Negative prepare/commit checkpoint in non-empty IndexMap."); } if (!loadPTables) { break; } PTable ptable = null; var pieces = text.Split(','); try { var level = int.Parse(pieces[0]); var position = int.Parse(pieces[1]); var file = pieces[2]; var path = Path.GetDirectoryName(filename); var ptablePath = Path.Combine(path, file); ptable = PTable.FromFile(ptablePath); ptable.VerifyFileHash(); CreateIfNeeded(level, tables); tables[level].Insert(position, ptable); } catch (Exception exc) { // if PTable file path was correct, but data is corrupted, we still need to dispose opened streams if (ptable != null) { ptable.Dispose(); } // also dispose all previously loaded correct PTables for (int i = 0; i < tables.Count; ++i) { for (int j = 0; j < tables[i].Count; ++j) { tables[i][j].Dispose(); } } throw new CorruptIndexException("Error while loading IndexMap.", exc); } } } return(new IndexMap(version, tables, prepareCheckpoint, commitCheckpoint, isHashCollision, maxTablesPerLevel)); }
public void when_ptable_data_is_corrupted() { _ptable.Dispose(); _ptable = null; using (var fs = File.Open(_ptableFileName, FileMode.Open)) { fs.Position = new Random().Next(PTableHeader.Size, (int)fs.Length); var b = (byte)fs.ReadByte(); b ^= 1; fs.Position -= 1; fs.WriteByte(b); } Assert.Throws<CorruptIndexException>(() => IndexMap.FromFile(_indexMapFileName, 2)); }
public EnumerablePTable(PTable table, IEnumerator <IndexEntry> enumerator) { Table = table; _enumerator = enumerator; }
private void ReadOffQueue() { try { while (true) { var indexmapFile = Path.Combine(_directory, IndexMapFilename); if (_isManualMergePending) { Log.Debug("Performing manual index merge."); _isManualMergePending = false; using (var reader = _tfReaderFactory()) { var manualMergeResult = _indexMap.TryManualMerge( (streamId, currentHash) => UpgradeHash(streamId, currentHash), entry => reader.ExistsAt(entry.Position), entry => ReadEntry(reader, entry.Position), _fileNameProvider, _ptableVersion, _indexCacheDepth, _skipIndexVerify); if (manualMergeResult.HasMergedAny) { _indexMap = manualMergeResult.MergedMap; _indexMap.SaveToFile(indexmapFile); manualMergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } Log.Debug("Manual index merge completed: {numMergedPTables} PTable(s) merged.", manualMergeResult.ToDelete.Count); } } TableItem tableItem; //ISearchTable table; lock (_awaitingTablesLock) { Log.Debug("Awaiting tables queue size is: {awaitingMemTables}.", _awaitingMemTables.Count); if (_awaitingMemTables.Count == 1) { return; } tableItem = _awaitingMemTables[_awaitingMemTables.Count - 1]; } PTable ptable; var memtable = tableItem.Table as IMemTable; if (memtable != null) { memtable.MarkForConversion(); ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable(), ESConsts.PTableInitialReaderCount, _pTableMaxReaderCount, _indexCacheDepth, _skipIndexVerify); } else { ptable = (PTable)tableItem.Table; } var addResult = _indexMap.AddPTable(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint); _indexMap = addResult.NewMap; _indexMap.SaveToFile(indexmapFile); if (addResult.CanMergeAny) { using (var reader = _tfReaderFactory()) { MergeResult mergeResult; do { mergeResult = _indexMap.TryMergeOneLevel( (streamId, currentHash) => UpgradeHash(streamId, currentHash), entry => reader.ExistsAt(entry.Position), entry => ReadEntry(reader, entry.Position), _fileNameProvider, _ptableVersion, _indexCacheDepth, _skipIndexVerify); if (mergeResult.HasMergedAny) { _indexMap = mergeResult.MergedMap; _indexMap.SaveToFile(indexmapFile); mergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } } while (mergeResult.CanMergeAny); } } lock (_awaitingTablesLock) { var memTables = _awaitingMemTables.ToList(); var corrTable = memTables.First(x => x.Table.Id == ptable.Id); memTables.Remove(corrTable); // parallel thread could already switch table, // so if we have another PTable instance with same ID, // we need to kill that instance as we added ours already if (!ReferenceEquals(corrTable.Table, ptable) && corrTable.Table is PTable) { ((PTable)corrTable.Table).MarkForDestruction(); } Log.Debug("There are now {awaitingMemTables} awaiting tables.", memTables.Count); _awaitingMemTables = memTables; } } } catch (FileBeingDeletedException exc) { Log.Error(exc, "Could not acquire chunk in TableIndex.ReadOffQueue. It is OK if node is shutting down."); } catch (Exception exc) { Log.Error(exc, "Error in TableIndex.ReadOffQueue"); throw; } finally { lock (_awaitingTablesLock) { _backgroundRunning = false; _backgroundRunningEvent.Set(); } } }
private static void AddTableToTables(List <List <PTable> > tables, int level, PTable table) { while (level >= tables.Count) { tables.Add(new List <PTable>()); } var innerTables = tables[level] ?? (tables[level] = new List <PTable>()); innerTables.Add(table); }
private void ReadOffQueue() { try { while (true) { TableItem tableItem; //ISearchTable table; lock (_awaitingTablesLock) { Log.Trace("Awaiting tables queue size is: {0}.", _awaitingMemTables.Count); if (_awaitingMemTables.Count == 1) { _backgroundRunning = false; _backgroundRunningEvent.Set(); return; } tableItem = _awaitingMemTables[_awaitingMemTables.Count - 1]; } PTable ptable; var memtable = tableItem.Table as IMemTable; if (memtable != null) { memtable.MarkForConversion(); ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable()); } else { ptable = (PTable)tableItem.Table; } _indexMap.EnterUnsafeState(_directory); var mergeResult = _indexMap.AddFile(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint, _fileNameProvider); _indexMap = mergeResult.MergedMap; _indexMap.SaveToFile(Path.Combine(_directory, IndexMapFilename)); _indexMap.LeaveUnsafeState(_directory); lock (_awaitingTablesLock) { //oh well at least its only a small lock that is very unlikely to ever be hit var memTables = _awaitingMemTables.ToList(); var corrTable = memTables.First(x => x.Table.Id == ptable.Id); memTables.Remove(corrTable); // parallel thread could already switch table, // so if we have another PTable instance with same ID, // we need to kill that instance as we added ours already if (!ReferenceEquals(corrTable.Table, ptable) && corrTable.Table is PTable) { ((PTable)corrTable.Table).MarkForDestruction(); } Log.Trace("There are now {0} awaiting tables.", memTables.Count); _awaitingMemTables = memTables; } mergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } } catch (Exception exc) { Log.ErrorException(exc, "Error in TableIndex.ReadOffQueue"); throw; } }
private static List <List <PTable> > LoadPTables(StreamReader reader, string indexmapFilename, TFPos checkpoints, int cacheDepth, bool skipIndexVerify, int threads) { var tables = new List <List <PTable> >(); try { try { Parallel.ForEach(GetAllLines(reader).Reverse(), // Reverse so we load the highest levels (biggest files) first - ensures we use concurrency in the most efficient way. new ParallelOptions { MaxDegreeOfParallelism = threads }, indexMapEntry => { if (checkpoints.PreparePosition < 0 || checkpoints.CommitPosition < 0) { throw new CorruptIndexException( string.Format("Negative prepare/commit checkpoint in non-empty IndexMap: {0}.", checkpoints)); } PTable ptable = null; var pieces = indexMapEntry.Split(','); try { var level = int.Parse(pieces[0]); var position = int.Parse(pieces[1]); var file = pieces[2]; var path = Path.GetDirectoryName(indexmapFilename); var ptablePath = Path.Combine(path, file); ptable = PTable.FromFile(ptablePath, cacheDepth, skipIndexVerify); lock (tables) { InsertTableToTables(tables, level, position, ptable); } } catch (Exception) { // if PTable file path was correct, but data is corrupted, we still need to dispose opened streams if (ptable != null) { ptable.Dispose(); } throw; } }); // Verify map is correct for (int i = 0; i < tables.Count; ++i) { for (int j = 0; j < tables[i].Count; ++j) { if (tables[i][j] == null) { throw new CorruptIndexException($"indexmap is missing contiguous level,position {i},{j}"); } } } } catch (AggregateException aggEx) { // We only care that *something* has gone wrong, throw the first exception throw aggEx.InnerException; } } catch (Exception exc) { // also dispose all previously loaded correct PTables for (int i = 0; i < tables.Count; ++i) { for (int j = 0; j < tables[i].Count; ++j) { if (tables[i][j] != null) { tables[i][j].Dispose(); } } } throw new CorruptIndexException("Error while loading IndexMap.", exc); } return(tables); }
private void ReadOffQueue() { try { while (true) { TableItem tableItem; //ISearchTable table; lock (_awaitingTablesLock) { Log.Trace("Awaiting tables queue size is: {0}.", _awaitingMemTables.Count); if (_awaitingMemTables.Count == 1) { _backgroundRunning = false; _backgroundRunningEvent.Set(); return; } tableItem = _awaitingMemTables[_awaitingMemTables.Count - 1]; } PTable ptable; var memtable = tableItem.Table as IMemTable; if (memtable != null) { memtable.MarkForConversion(); ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable(), _indexCacheDepth); } else { ptable = (PTable)tableItem.Table; } var indexmapFile = Path.Combine(_directory, IndexMapFilename); MergeResult mergeResult; using (var reader = _tfReaderFactory()) { mergeResult = _indexMap.AddPTable(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint, (streamId, currentHash) => UpgradeHash(streamId, currentHash), entry => reader.ExistsAt(entry.Position), entry => ReadEntry(reader, entry.Position), _fileNameProvider, _ptableVersion, _indexCacheDepth); } _indexMap = mergeResult.MergedMap; _indexMap.SaveToFile(indexmapFile); lock (_awaitingTablesLock) { var memTables = _awaitingMemTables.ToList(); var corrTable = memTables.First(x => x.Table.Id == ptable.Id); memTables.Remove(corrTable); // parallel thread could already switch table, // so if we have another PTable instance with same ID, // we need to kill that instance as we added ours already if (!ReferenceEquals(corrTable.Table, ptable) && corrTable.Table is PTable) { ((PTable)corrTable.Table).MarkForDestruction(); } Log.Trace("There are now {0} awaiting tables.", memTables.Count); _awaitingMemTables = memTables; } mergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } } catch (FileBeingDeletedException exc) { Log.ErrorException(exc, "Could not acquire chunk in TableIndex.ReadOffQueue. It is OK if node is shutting down."); } catch (Exception exc) { Log.ErrorException(exc, "Error in TableIndex.ReadOffQueue"); throw; } }
public static PTable Scavenged(PTable table, string outputFile, Func <string, ulong, ulong> upgradeHash, Func <IndexEntry, bool> existsAt, Func <IndexEntry, Tuple <string, bool> > readRecord, byte version, out long spaceSaved, int cacheDepth = 16, bool skipIndexVerify = false, CancellationToken ct = default(CancellationToken)) { Ensure.NotNull(table, "table"); Ensure.NotNullOrEmpty(outputFile, "outputFile"); Ensure.Nonnegative(cacheDepth, "cacheDepth"); var indexEntrySize = GetIndexEntrySize(version); var numIndexEntries = table.Count; var fileSizeUpToIndexEntries = GetFileSizeUpToIndexEntries(numIndexEntries, version); Log.Trace("PTables scavenge started with {numIndexEntries} entries.", numIndexEntries); var watch = Stopwatch.StartNew(); long keptCount = 0L; long droppedCount; try { using (var f = new FileStream(outputFile, FileMode.CreateNew, FileAccess.ReadWrite, FileShare.None, DefaultSequentialBufferSize, FileOptions.SequentialScan)) { f.SetLength(fileSizeUpToIndexEntries); f.Seek(0, SeekOrigin.Begin); using (var md5 = MD5.Create()) using (var cs = new CryptoStream(f, md5, CryptoStreamMode.Write)) using (var bs = new BufferedStream(cs, DefaultSequentialBufferSize)) { // WRITE HEADER var headerBytes = new PTableHeader(version).AsByteArray(); cs.Write(headerBytes, 0, headerBytes.Length); // WRITE SCAVENGED INDEX ENTRIES var buffer = new byte[indexEntrySize]; using (var enumerator = new EnumerableTable(version, table, upgradeHash, existsAt, readRecord)) { while (enumerator.MoveNext()) { ct.ThrowIfCancellationRequested(); if (existsAt(enumerator.Current)) { AppendRecordTo(bs, buffer, version, enumerator.Current, indexEntrySize); keptCount++; } } } // We calculate this as the EnumerableTable can silently drop entries too. droppedCount = numIndexEntries - keptCount; var forceKeep = version > table.Version; if (droppedCount == 0 && !forceKeep) { Log.Trace( "PTable scavenge finished in {elapsed}. No entries removed so not keeping scavenged table.", watch.Elapsed); try { bs.Close(); File.Delete(outputFile); } catch (Exception ex) { Log.ErrorException(ex, "Unable to delete unwanted scavenged PTable: {outputFile}", outputFile); } spaceSaved = 0; return(null); } if (droppedCount == 0 && forceKeep) { Log.Trace("Keeping scavenged index even though it isn't smaller; version upgraded."); } //CALCULATE AND WRITE MIDPOINTS if (version >= PTableVersions.IndexV4) { var requiredMidpointCount = GetRequiredMidpointCount(keptCount, version, cacheDepth); var midpoints = ComputeMidpoints(bs, f, version, indexEntrySize, keptCount, requiredMidpointCount, new List <Midpoint>(), ct); WriteMidpointsTo(bs, f, version, indexEntrySize, buffer, keptCount, keptCount, requiredMidpointCount, midpoints); } bs.Flush(); cs.FlushFinalBlock(); f.FlushToDisk(); f.SetLength(f.Position + MD5Size); // WRITE MD5 var hash = md5.Hash; f.Write(hash, 0, hash.Length); f.FlushToDisk(); } } Log.Trace("PTable scavenge finished in {elapsed} ({droppedCount} entries removed, {keptCount} remaining).", watch.Elapsed, droppedCount, keptCount); var scavengedTable = new PTable(outputFile, Guid.NewGuid(), depth: cacheDepth, skipIndexVerify: skipIndexVerify); spaceSaved = table._size - scavengedTable._size; return(scavengedTable); } catch (Exception) { try { File.Delete(outputFile); } catch (Exception ex) { Log.ErrorException(ex, "Unable to delete unwanted scavenged PTable: {outputFile}", outputFile); } throw; } }
private void ReadOffQueue() { try { while (true) { TableItem tableItem; //ISearchTable table; lock (_awaitingTablesLock) { Log.Trace("Awaiting tables queue size is: {0}.", _awaitingMemTables.Count); if (_awaitingMemTables.Count == 1) { _backgroundRunning = false; _backgroundRunningEvent.Set(); return; } tableItem = _awaitingMemTables[_awaitingMemTables.Count - 1]; } PTable ptable; var memtable = tableItem.Table as IMemTable; if (memtable != null) { memtable.MarkForConversion(); ptable = PTable.FromMemtable(memtable, _fileNameProvider.GetFilenameNewTable()); } else { ptable = (PTable)tableItem.Table; } // backup current version of IndexMap in case following switch will be left in unsafe state // this will allow to rebuild just part of index var backupFile = Path.Combine(_directory, IndexMapBackupFilename); var indexmapFile = Path.Combine(_directory, IndexMapFilename); Helper.EatException(() => { if (File.Exists(backupFile)) { File.Delete(backupFile); } if (File.Exists(indexmapFile)) { // same as File.Copy(indexmapFile, backupFile); but with forced flush var indexmapContent = File.ReadAllBytes(indexmapFile); using (var f = File.Create(backupFile)) { f.Write(indexmapContent, 0, indexmapContent.Length); f.FlushToDisk(); } } }); EnterUnsafeState(_directory); MergeResult mergeResult; using (var reader = _tfReaderFactory()) { mergeResult = _indexMap.AddPTable(ptable, tableItem.PrepareCheckpoint, tableItem.CommitCheckpoint, entry => reader.ExistsAt(entry.Position), _fileNameProvider); } _indexMap = mergeResult.MergedMap; _indexMap.SaveToFile(indexmapFile); LeaveUnsafeState(_directory); lock (_awaitingTablesLock) { var memTables = _awaitingMemTables.ToList(); var corrTable = memTables.First(x => x.Table.Id == ptable.Id); memTables.Remove(corrTable); // parallel thread could already switch table, // so if we have another PTable instance with same ID, // we need to kill that instance as we added ours already if (!ReferenceEquals(corrTable.Table, ptable) && corrTable.Table is PTable) { ((PTable)corrTable.Table).MarkForDestruction(); } Log.Trace("There are now {0} awaiting tables.", memTables.Count); _awaitingMemTables = memTables; } // We'll keep indexmap.backup in case of crash. In case of crash we hope that all necessary // PTables for previous version of IndexMap are still there, so we can rebuild // from last step, not to do full rebuild. mergeResult.ToDelete.ForEach(x => x.MarkForDestruction()); } } catch (FileBeingDeletedException exc) { Log.ErrorException(exc, "Couldn't acquire chunk in TableIndex.ReadOffQueue. It is ok if node is shutting down."); } catch (Exception exc) { Log.ErrorException(exc, "Error in TableIndex.ReadOffQueue"); throw; } }