Пример #1
0
 private Func <TPeer, CancellationToken, Task> CreateEnqueuing(
     IList <HashDigest <SHA256> > hashDigests,
     BlockFetcher blockFetcher,
     TimeSpan singleSessionTimeout,
     CancellationToken cancellationToken,
     AsyncProducerConsumerQueue <Tuple <Block <TAction>, TPeer> > queue
     ) =>
 async(peer, ct) =>
Пример #2
0
        private void Save(ConcurrentDictionary <Task, Task> tasks, BlockFetcher fetcher, BulkImport <TIndexed> bulk)
        {
            this.logger.LogTrace("()");

            WaitFinished(tasks);
            ThrowIfException();
            fetcher.SaveCheckpoint();

            this.logger.LogTrace("(-)");
        }
Пример #3
0
 /// <summary>
 /// Downloads blocks from <paramref name="peers"/> in parallel,
 /// using the given <paramref name="blockFetcher"/> function.
 /// </summary>
 /// <param name="peers">A list of peers to download blocks.</param>
 /// <param name="blockFetcher">A function to take demands and a peer, and then
 /// download corresponding blocks.</param>
 /// <param name="millisecondsSingleSessionTimeout">A maximum time in milliseconds to wait
 /// each single call of <paramref name="blockFetcher"/>.  If a call is timed out unsatisfied
 /// demands are automatically retried to fetch from other peers.  10 seconds by default.
 /// </param>
 /// <param name="cancellationToken">A cancellation token to observe while waiting
 /// for the task to complete.</param>
 /// <returns>An async enumerable that yields pairs of a fetched block and its source
 /// peer.  It terminates when all demands are satisfied.</returns>
 public IAsyncEnumerable <Tuple <Block <TAction>, TPeer> > Complete(
     IReadOnlyList <TPeer> peers,
     BlockFetcher blockFetcher,
     int millisecondsSingleSessionTimeout = 10000,
     CancellationToken cancellationToken  = default
     ) =>
 Complete(
     peers,
     blockFetcher,
     TimeSpan.FromMilliseconds(millisecondsSingleSessionTimeout),
     cancellationToken
     );
Пример #4
0
        public int Run(ChainBase chain = null)
        {
            ListenerTrace.Info("Start initial indexing");
            int totalProcessed = 0;

            using (var node = _Conf.Indexer.ConnectToNode(false))
            {
                ListenerTrace.Info("Handshaking...");
                node.VersionHandshake();
                ListenerTrace.Info("Handshaked");
                chain = chain ?? node.GetChain();
                ListenerTrace.Info("Current chain at height " + chain.Height);
                var blockRepository = new NodeBlocksRepository(node);

                var    blobLock = GetInitBlob();
                string lease    = null;
                try
                {
                    blobLock.UploadText("Enqueuing");
                    lease = blobLock.AcquireLease(null, null);
                }
                catch (StorageException)
                {
                }
                if (lease != null)
                {
                    ListenerTrace.Info("Queueing index jobs");
                    EnqueueJobs(blockRepository, chain, blobLock, lease);
                }
                ListenerTrace.Info("Dequeuing index jobs");

                while (true)
                {
                    var msg = _Conf.Topics
                              .InitialIndexing
                              .ReceiveAsync(TimeSpan.FromMilliseconds(1000))
                              .Result;

                    var ns          = _Conf.Topics.InitialIndexing.GetNamespace();
                    var description = ns.GetQueue(_Conf.Topics.InitialIndexing.Queue);

                    Console.WriteLine("Work remaining in the queue : " + description.MessageCountDetails.ActiveMessageCount);
                    if (msg == null)
                    {
                        var state = blobLock.DownloadText();
                        if (state == "Enqueuing" || description.MessageCountDetails.ActiveMessageCount != 0)
                        {
                            ListenerTrace.Info("Additional work will be enqueued...");
                            continue;
                        }
                        else
                        {
                            var locator = new BlockLocator();
                            locator.FromBytes(Encoders.Hex.DecodeData(state));
                            UpdateCheckpoints(locator);
                            break;
                        }
                    }

                    using (msg.Message)
                    {
                        var range = msg.Body;
                        using (var sched = new CustomThreadPoolTaskScheduler(50, 100, range.ToString()))
                        {
                            ListenerTrace.Info("Processing " + range.ToString());
                            totalProcessed++;
                            var          task    = _IndexTasks[range.Target];
                            BlockFetcher fetcher = new BlockFetcher(task.Item1, blockRepository, chain)
                            {
                                FromHeight = range.From,
                                ToHeight   = range.From + range.Count - 1
                            };
                            try
                            {
                                task.Item2.SaveProgression = false;
                                task.Item2.EnsureIsSetup   = totalProcessed == 0;
                                var index = Task.Factory.StartNew(() =>
                                {
                                    task.Item2.Index(fetcher, sched);
                                }, TaskCreationOptions.LongRunning);
                                while (!index.Wait(TimeSpan.FromMinutes(4)))
                                {
                                    msg.Message.RenewLock();
                                    ListenerTrace.Info("Lock renewed");
                                }
                            }
                            catch (AggregateException aex)
                            {
                                ExceptionDispatchInfo.Capture(aex.InnerException).Throw();
                                throw;
                            }

                            range.Processed = true;
                            msg.Message.Complete();
                        }
                    }
                }
            }
            ListenerTrace.Info("Initial indexing terminated");
            return(totalProcessed);
        }
Пример #5
0
        /// <summary>
        /// Downloads blocks from <paramref name="peers"/> in parallel,
        /// using the given <paramref name="blockFetcher"/> function.
        /// </summary>
        /// <param name="peers">A list of peers to download blocks.</param>
        /// <param name="blockFetcher">A function to take demands and a peer, and then
        /// download corresponding blocks.</param>
        /// <param name="singleSessionTimeout">A maximum time to wait each single call of
        /// <paramref name="blockFetcher"/>.  If a call is timed out unsatisfied demands
        /// are automatically retried to fetch from other peers.</param>
        /// <param name="cancellationToken">A cancellation token to observe while waiting
        /// for the task to complete.</param>
        /// <returns>An async enumerable that yields pairs of a fetched block and its source
        /// peer.  It terminates when all demands are satisfied.</returns>
        public async IAsyncEnumerable <Tuple <Block <TAction>, TPeer> > Complete(
            IReadOnlyList <TPeer> peers,
            BlockFetcher blockFetcher,
            TimeSpan singleSessionTimeout,
            [EnumeratorCancellation] CancellationToken cancellationToken = default
            )
        {
            if (!peers.Any())
            {
                throw new ArgumentException("The list of peers must not be empty.", nameof(peers));
            }

            var pool       = new PeerPool(peers);
            var queue      = new AsyncProducerConsumerQueue <Tuple <Block <TAction>, TPeer> >();
            var completion =
                new ConcurrentDictionary <HashDigest <SHA256>, bool>(_satisfiedBlocks);

            await foreach (var hashes in EnumerateChunks(cancellationToken))
            {
                cancellationToken.ThrowIfCancellationRequested();
                IList <HashDigest <SHA256> > hashDigests =
                    hashes is IList <HashDigest <SHA256> > l ? l : hashes.ToList();

                foreach (HashDigest <SHA256> hash in hashDigests)
                {
                    completion.TryAdd(hash, false);
                }

                cancellationToken.ThrowIfCancellationRequested();
                await pool.SpawnAsync(
                    async (peer, ct) =>
                {
                    ct.ThrowIfCancellationRequested();
                    var demands = new HashSet <HashDigest <SHA256> >(hashDigests);
                    try
                    {
                        _logger.Debug(
                            "Request blocks {BlockHashes} to {Peer}...",
                            hashDigests,
                            peer
                            );
                        var timeout = new CancellationTokenSource(singleSessionTimeout);
                        CancellationToken timeoutToken = timeout.Token;
                        timeoutToken.Register(() =>
                                              _logger.Debug("Timed out to wait a response from {Peer}.", peer)
                                              );
                        ct.Register(() => timeout.Cancel());

                        try
                        {
                            ConfiguredCancelableAsyncEnumerable <Block <TAction> > blocks =
                                blockFetcher(peer, hashDigests, timeoutToken)
                                .WithCancellation(timeoutToken);
                            await foreach (Block <TAction> block in blocks)
                            {
                                _logger.Debug(
                                    "Downloaded a block #{BlockIndex} {BlockHash} " +
                                    "from {Peer}.",
                                    block.Index,
                                    block.Hash,
                                    peer
                                    );

                                if (Satisfy(block))
                                {
                                    await queue.EnqueueAsync(
                                        Tuple.Create(block, peer),
                                        cancellationToken
                                        );
                                }

                                demands.Remove(block.Hash);
                            }
                        }
                        catch (OperationCanceledException e)
                        {
                            if (ct.IsCancellationRequested)
                            {
                                _logger.Error(
                                    e,
                                    "A blockFetcher job (peer: {Peer}) is cancelled.",
                                    peer
                                    );
                                throw;
                            }

                            _logger.Debug(
                                e,
                                "Timed out to wait a response from {Peer}.",
                                peer
                                );
                        }
                    }
                    finally
                    {
                        if (demands.Any())
                        {
                            _logger.Verbose(
                                "Fetched blocks from {Peer}, but there are still " +
                                "unsatisfied demands ({UnsatisfiedDemandsNumber}) so " +
                                "enqueue them again: {UnsatisfiedDemands}.",
                                peer,
                                demands.Count,
                                demands
                                );
                            Demand(demands, retry: true);
                        }
                        else
                        {
                            _logger.Verbose("Fetched blocks from {Peer}.", peer);
                        }
                    }
                },
                    cancellationToken : cancellationToken
                    );
            }

            while (!completion.All(kv => kv.Value))
            {
                Tuple <Block <TAction>, TPeer> pair;
                try
                {
                    pair = await queue.DequeueAsync(cancellationToken);
                }
                catch (InvalidOperationException)
                {
                    break;
                }

                yield return(pair);

                _logger.Verbose(
                    "Completed a block {BlockIndex} {BlockHash} from {Peer}.",
                    pair.Item1.Index,
                    pair.Item1.Hash,
                    pair.Item2
                    );
                completion[pair.Item1.Hash] = true;
            }

            _logger.Verbose("Completed all blocks ({Number}).", completion.Count);
        }
Пример #6
0
        /// <summary>
        /// Downloads blocks from <paramref name="peers"/> in parallel,
        /// using the given <paramref name="blockFetcher"/> function.
        /// </summary>
        /// <param name="peers">A list of peers to download blocks.</param>
        /// <param name="blockFetcher">A function to take demands and a peer, and then
        /// download corresponding blocks.</param>
        /// <param name="singleSessionTimeout">A maximum time to wait each single call of
        /// <paramref name="blockFetcher"/>.  If a call is timed out unsatisfied demands
        /// are automatically retried to fetch from other peers.</param>
        /// <param name="cancellationToken">A cancellation token to observe while waiting
        /// for the task to complete.</param>
        /// <returns>An async enumerable that yields pairs of a fetched block and its source
        /// peer.  It terminates when all demands are satisfied.</returns>
        public async IAsyncEnumerable <Tuple <Block <TAction>, TPeer> > Complete(
            IReadOnlyList <TPeer> peers,
            BlockFetcher blockFetcher,
            TimeSpan singleSessionTimeout,
            [EnumeratorCancellation] CancellationToken cancellationToken = default
            )
        {
            if (!peers.Any())
            {
                throw new ArgumentException("The list of peers must not be empty.", nameof(peers));
            }

            var pool  = new PeerPool(peers);
            var queue = new AsyncProducerConsumerQueue <Tuple <Block <TAction>, TPeer> >();

            Task producer = Task.Run(async() =>
            {
                try
                {
                    await foreach (var hashes in EnumerateChunks(cancellationToken))
                    {
                        cancellationToken.ThrowIfCancellationRequested();
                        IList <HashDigest <SHA256> > hashDigests =
                            hashes is IList <HashDigest <SHA256> > l ? l : hashes.ToList();

                        cancellationToken.ThrowIfCancellationRequested();
                        await pool.SpawnAsync(
                            CreateEnqueuing(
                                hashDigests,
                                blockFetcher,
                                singleSessionTimeout,
                                cancellationToken,
                                queue
                                ),
                            cancellationToken: cancellationToken
                            );
                    }

                    await pool.WaitAll(cancellationToken);
                }
                finally
                {
                    queue.CompleteAdding();
                }
            });

            while (await queue.OutputAvailableAsync(cancellationToken))
            {
                Tuple <Block <TAction>, TPeer> pair;
                try
                {
                    pair = await queue.DequeueAsync(cancellationToken);
                }
                catch (InvalidOperationException)
                {
                    break;
                }

                yield return(pair);

                _logger.Verbose(
                    "Completed a block {BlockIndex} {BlockHash} from {Peer}.",
                    pair.Item1.Index,
                    pair.Item1.Hash,
                    pair.Item2
                    );
            }

            await producer;
        }
Пример #7
0
        public void Index(BlockFetcher blockFetcher, TaskScheduler scheduler, Network network)
        {
            this.logger.LogTrace("()");

            ConcurrentDictionary <Task, Task> tasks = new ConcurrentDictionary <Task, Task>();

            try
            {
                SetThrottling();
                if (EnsureIsSetup)
                {
                    EnsureSetup().Wait();
                }

                BulkImport <TIndexed> bulk = new BulkImport <TIndexed>(PartitionSize);
                if (!SkipToEnd)
                {
                    try
                    {
                        foreach (var block in blockFetcher)
                        {
                            ThrowIfException();
                            if (blockFetcher.NeedSave)
                            {
                                if (SaveProgression)
                                {
                                    EnqueueTasks(tasks, bulk, true, scheduler);
                                    Save(tasks, blockFetcher, bulk);
                                }
                            }
                            ProcessBlock(block, bulk, network);
                            if (bulk.HasFullPartition)
                            {
                                EnqueueTasks(tasks, bulk, false, scheduler);
                            }
                        }
                        EnqueueTasks(tasks, bulk, true, scheduler);
                    }
                    catch (OperationCanceledException ex)
                    {
                        if (ex.CancellationToken != blockFetcher.CancellationToken)
                        {
                            throw;
                        }
                    }
                }
                else
                {
                    this.logger.LogTrace("Skipping to end");
                    blockFetcher.SkipToEnd();
                }

                if (SaveProgression)
                {
                    Save(tasks, blockFetcher, bulk);
                }
                WaitFinished(tasks);
                ThrowIfException();
            }
            catch (AggregateException aex)
            {
                ExceptionDispatchInfo.Capture(aex.InnerException).Throw();
                throw;
            }

            this.logger.LogTrace("(-)");
        }
Пример #8
0
 private void Save(ConcurrentDictionary <Task, Task> tasks, BlockFetcher fetcher, BulkImport <TIndexed> bulk)
 {
     WaitFinished(tasks);
     ThrowIfException();
     fetcher.SaveCheckpoint();
 }