public void TestBlockIndex() { using var key = Key.Create(SignatureAlgorithm.Ed25519); var blockTree = new BlockTree(Array.Empty <Byte>(), key); var verifiedBlocks = new List <Block>(); for (var i = 0; i != 3; i++) { Assert.True( blockTree.TryAdd(blockTree.Root, Encoding.UTF8.GetBytes($"Hello #{i}"), key, out var newBlock) ); verifiedBlocks.Add(newBlock !); } var blockIndex = new BlockIndex(); foreach (var verifiedBlock in verifiedBlocks) { blockIndex.Add(verifiedBlock); } blockIndex.Add(blockTree.Root); var blocks = blockIndex.GetAllBlocks(); var blockTree2 = new BlockTree(blocks); }
public Scratch(IFileSource source, CancellationToken cancel) { _index = new BlockIndex(); // When a (realm,hash,address) is about to be deleted, remove it from // the index as well. _wheel = new FileWheel(source, _index.Remove); // Discovered anything on disk ? Register them with the index. foreach (var(realm, hash, address) in _wheel.EnumerateBlocks()) { _index.Add(realm, hash, address); } _wheel.StartBackgroundThread(cancel); }
/// <summary> /// Loads the block chain index from the server, or regenerates it if it could not be located /// </summary> /// <returns>A value indicating whether the genesis block could be located</returns> private bool LoadIndex() { Logger.Info("Loading block index... "); var genesisFoundLocal = false; var indexPath = Path.Combine(this._directory, "index.json"); if (!File.Exists(indexPath)) { Logger.Warn($"No index found at {indexPath}!"); if (!Directory.Exists(this._directory)) { // TODO: Get block chain; bootstrap local storage } else { Logger.Warn($"Rebuilding index from blocks located in: {this._directory}..."); var index = new BlockIndex(); Parallel.ForEach( Directory.GetFiles(this._directory, "*.block"), blockFile => { Logger.Debug($"Reading block header from {blockFile}"); try { Debug.Assert(blockFile != null, "blockFile != null"); var header = BlockHeader.ReadFromFile(blockFile); index.Add(new BlockIndexEntry(blockFile, header)); } catch (Exception ex) { Logger.Error($"Problem reading block header from {blockFile}: {ex.Message}", ex); throw; } }); index.Sort(new BlockIndexEntryComparer()); // Walk the list to ensure no holes. Logger.Info("Verifying the integrity of the block chain..."); bool genesisFound, continuous; if (!index.Verify(out genesisFound, out continuous)) { if (!genesisFound) { Console.WriteLine("GENESIS BLOCK NOT FOUND"); } if (!continuous) { Console.WriteLine("BREAK IN CHAIN"); } if (genesisFound && continuous) { Console.WriteLine(); } } else { Logger.Info("...Verification of the block chain complete."); genesisFoundLocal = genesisFound; } Console.Write("\tSaving... "); index.SaveToFile(this._directory); Console.WriteLine("DONE"); } } else { Logger.Info($"Reading index found at {indexPath}..."); // TODO: Read index BlockIndex index; using (var sr = new StreamReader(indexPath, System.Text.Encoding.UTF8)) { var indexString = sr.ReadToEnd(); index = JsonConvert.DeserializeObject <BlockIndex>(indexString); sr.Close(); } if (index != null) { Logger.Info("...Read of index file complete."); index.Sort(new BlockIndexEntryComparer()); // Walk the list to ensure no holes. Logger.Info("Verifying the integrity of the block chain..."); bool genesisFound, continuous; if (!index.Verify(out genesisFound, out continuous)) { if (!genesisFound) { Console.WriteLine("GENESIS BLOCK NOT FOUND"); } if (!continuous) { Console.WriteLine("BREAK IN CHAIN"); } } else { Logger.Info("...Verification of the block chain complete."); genesisFoundLocal = genesisFound; } } else { // TODO: Could not open index Console.WriteLine("FAILED"); } } return(genesisFoundLocal); }
public void return_added() { var a = new BlockAddress(1, 0); var h = HashInBucket(0, 0); Assert.True(_index.Add(0, h, a)); Assert.Equal(a, _index.Get(0, h)); }
/// <summary> /// This implements the <c>in</c> command. /// </summary> /// <param name="table"></param> /// <param name="other"></param> /// <param name="column1"></param> /// <param name="column2"></param> /// <returns> /// Returns the rows selected from <paramref name="table1"/>. /// </returns> public static IEnumerable<int> SelectRowsIn(this ITable table, ITable other, int column1, int column2) { // First pick the the smallest and largest table. We only want to iterate // through the smallest table. // NOTE: This optimisation can't be performed for the 'not_in' command. ITable smallTable; ITable largeTable; int smallColumn; int largeColumn; if (table.RowCount < other.RowCount) { smallTable = table; largeTable = other; smallColumn = column1; largeColumn = column2; } else { smallTable = other; largeTable = table; smallColumn = column2; largeColumn = column1; } // Iterate through the small table's column. If we can find identical // cells in the large table's column, then we should include the row in our // final result. var resultRows = new BlockIndex<int>(); var op = SqlExpressionType.Equal; foreach (var row in smallTable) { var cell = row.GetValue(smallColumn); var selectedSet = largeTable.SelectRows(largeColumn, op, cell).ToList(); // We've found cells that are IN both columns, if (selectedSet.Count > 0) { // If the large table is what our result table will be based on, append // the rows selected to our result set. Otherwise add the index of // our small table. This only works because we are performing an // EQUALS operation. if (largeTable == table) { // Only allow unique rows into the table set. int sz = selectedSet.Count; bool rs = true; for (int i = 0; rs && i < sz; ++i) { rs = resultRows.UniqueInsertSort(selectedSet[i]); } } else { // Don't bother adding in sorted order because it's not important. resultRows.Add(row.RowId.RowNumber); } } } return resultRows.ToList(); }
public static IList <long> In(this ITable table, ITable table2, int column1, int column2) { // First pick the the smallest and largest table. We only want to iterate // through the smallest table. // NOTE: This optimisation can't be performed for the 'not_in' command. ITable smallTable; ITable largeTable; int smallColumn; int largeColumn; if (table.RowCount < table2.RowCount) { smallTable = table; largeTable = table2; smallColumn = column1; largeColumn = column2; } else { smallTable = table2; largeTable = table; smallColumn = column2; largeColumn = column1; } // Iterate through the small table's column. If we can find identical // cells in the large table's column, then we should include the row in our // final result. BlockIndex <long> resultRows = new BlockIndex <long>(); IEnumerator <long> e = smallTable.GetRowEnumerator(); Operator op = Operator.Equal; while (e.MoveNext()) { long smallRowIndex = e.Current; DataObject cell = smallTable.GetValue(smallColumn, smallRowIndex); IEnumerable <long> selectedSet = largeTable.SelectRows(largeColumn, op, cell); // We've found cells that are IN both columns, if (selectedSet.Any()) { // If the large table is what our result table will be based on, append // the rows selected to our result set. Otherwise add the index of // our small table. This only works because we are performing an // EQUALS operation. if (largeTable == table) { // Only allow unique rows into the table set. foreach (var set in selectedSet) { if (!resultRows.UniqueInsertSort((int)set)) { break; } } } else { // Don't bother adding in sorted order because it's not important. resultRows.Add((int)smallRowIndex); } } } return(resultRows.Cast <long>().ToList()); }
/// <summary> /// Write a block to the scratch space. It will be possible to read it /// back from the scratch space using the (realm, hash) that was /// provided to create it. /// </summary> /// <remarks> /// <paramref name="hash"/> should be the hash of the data passed in, /// according to <see cref="BlockHasher"/>. /// /// <paramref name="writer"/> will be called at some point in the future, /// but not necessarily while <see cref="Write"/> is running. /// </remarks> public void Write(uint realm, Hash hash, int length, WithSpan.ReadWrite writer) { var addr = _wheel.ScheduleWrite(realm, hash, length, writer); _index.Add(realm, hash, addr); }