internal async ReusableTask TryHashPendingFilesAsync () { // If we cannot handle peer messages then we should not try to async hash. // This adds a little bit of a double meaning to the property (for now). // Any mode which doesn't allow processing peer messages also does not allow // partial hashing. if (hashingPendingFiles || !Manager.HasMetadata || !CanHandleMessages) return; // FIXME: Handle errors from DiskManager and also handle cancellation if the Mode is replaced. hashingPendingFiles = true; try { foreach (TorrentFile file in Manager.Torrent.Files) { // If the start piece *and* end piece have been hashed, then every piece in between must've been hashed! if (file.Priority != Priority.DoNotDownload && (Manager.UnhashedPieces[file.StartPieceIndex] || Manager.UnhashedPieces[file.EndPieceIndex])) { for (int index = file.StartPieceIndex; index <= file.EndPieceIndex; index++) { if (Manager.UnhashedPieces[index]) { byte[] hash = await DiskManager.GetHashAsync (Manager.Torrent, index); Cancellation.Token.ThrowIfCancellationRequested (); bool hashPassed = hash != null && Manager.Torrent.Pieces.IsValid (hash, index); Manager.OnPieceHashed (index, hashPassed, 1, 1); if (hashPassed) Manager.finishedPieces.Enqueue (new HaveMessage (index)); } } } } } finally { hashingPendingFiles = false; } }
public async Task WaitForHashingToComplete() { if (!Manager.HasMetadata) { throw new TorrentException("A hash check cannot be performed if TorrentManager.HasMetadata is false."); } Manager.HashFails = 0; // Delete any existing fast resume data. We will need to recreate it after hashing completes. await Manager.MaybeDeleteFastResumeAsync(); bool atLeastOneDoNotDownload = Manager.Files.Any(t => t.Priority == Priority.DoNotDownload); if (await DiskManager.CheckAnyFilesExistAsync(Manager)) { int piecesHashed = 0; Cancellation.Token.ThrowIfCancellationRequested(); // bep52: Properly support this using var hashBuffer = MemoryPool.Default.Rent(Manager.InfoHashes.GetMaxByteCount(), out Memory <byte> hashMemory); var hashes = new PieceHash(hashMemory); for (int index = 0; index < Manager.Torrent !.PieceCount; index++) { if (atLeastOneDoNotDownload && !Manager.Files.Any(f => index >= f.StartPieceIndex && index <= f.EndPieceIndex && f.Priority != Priority.DoNotDownload)) { // If a file is marked 'do not download' ensure we update the TorrentFiles // so they also report that the piece is not available/downloaded. Manager.OnPieceHashed(index, false, piecesHashed, Manager.PartialProgressSelector.TrueCount); // Then mark this piece as being unhashed so we don't try to download it. Manager.UnhashedPieces[index] = true; continue; } await PausedCompletionSource.Task; Cancellation.Token.ThrowIfCancellationRequested(); var successful = await DiskManager.GetHashAsync(Manager, index, hashes); if (Cancellation.Token.IsCancellationRequested) { await DiskManager.CloseFilesAsync(Manager); Cancellation.Token.ThrowIfCancellationRequested(); } bool hashPassed = successful && Manager.PieceHashes.IsValid(hashes, index); Manager.OnPieceHashed(index, hashPassed, ++piecesHashed, Manager.PartialProgressSelector.TrueCount); } } else { await PausedCompletionSource.Task; for (int i = 0; i < Manager.Torrent !.PieceCount; i++) { Manager.OnPieceHashed(i, false, i + 1, Manager.Torrent.PieceCount); } } }
public async Task WaitForHashingToComplete() { if (!Manager.HasMetadata) { throw new TorrentException("A hash check cannot be performed if TorrentManager.HasMetadata is false."); } // Ensure the partial progress selector is up to date before we start hashing UpdatePartialProgress(); int piecesHashed = 0; Manager.HashFails = 0; // Delete any existing fast resume data. We will need to recreate it after hashing completes. await Manager.MaybeDeleteFastResumeAsync(); if (await DiskManager.CheckAnyFilesExistAsync(Manager)) { Cancellation.Token.ThrowIfCancellationRequested(); for (int index = 0; index < Manager.Torrent.Pieces.Count; index++) { if (!Manager.Files.Any(f => index >= f.StartPieceIndex && index <= f.EndPieceIndex && f.Priority != Priority.DoNotDownload)) { // If a file is marked 'do not download' ensure we update the TorrentFiles // so they also report that the piece is not available/downloaded. Manager.OnPieceHashed(index, false, piecesHashed, Manager.PartialProgressSelector.TrueCount); // Then mark this piece as being unhashed so we don't try to download it. Manager.UnhashedPieces[index] = true; continue; } await PausedCompletionSource.Task; Cancellation.Token.ThrowIfCancellationRequested(); byte[] hash = await DiskManager.GetHashAsync(Manager, index); if (Cancellation.Token.IsCancellationRequested) { await DiskManager.CloseFilesAsync(Manager); Cancellation.Token.ThrowIfCancellationRequested(); } bool hashPassed = hash != null && Manager.Torrent.Pieces.IsValid(hash, index); Manager.OnPieceHashed(index, hashPassed, ++piecesHashed, Manager.PartialProgressSelector.TrueCount); } } else { await PausedCompletionSource.Task; for (int i = 0; i < Manager.Torrent.Pieces.Count; i++) { Manager.OnPieceHashed(i, false, ++piecesHashed, Manager.Torrent.Pieces.Count); } } }
async void WritePieceAsync (PieceMessage message, Piece piece) { long offset = (long) message.PieceIndex * Manager.Torrent.PieceLength + message.StartOffset; try { await DiskManager.WriteAsync (Manager.Torrent, offset, message.Data, message.RequestLength); if (Cancellation.IsCancellationRequested) return; } catch (Exception ex) { Manager.TrySetError (Reason.WriteFailure, ex); return; } finally { ClientEngine.BufferPool.Return (message.Data); } piece.TotalWritten++; // If we haven't received all the pieces to disk, there's no point in hash checking if (!piece.AllBlocksWritten) return; // Hashcheck the piece as we now have all the blocks. byte[] hash; try { hash = await DiskManager.GetHashAsync (Manager.Torrent, piece.Index); if (Cancellation.IsCancellationRequested) return; } catch (Exception ex) { Manager.TrySetError (Reason.ReadFailure, ex); return; } bool result = hash != null && Manager.Torrent.Pieces.IsValid (hash, piece.Index); Manager.OnPieceHashed (piece.Index, result, 1, 1); Manager.PieceManager.PendingHashCheckPieces[piece.Index] = false; if (!result) Manager.HashFails++; for (int i = 0; i < piece.Blocks.Length; i++) if (piece.Blocks[i].RequestedOff != null) peers.Add ((PeerId) piece.Blocks[i].RequestedOff); foreach (PeerId peer in peers) { peer.Peer.HashedPiece (result); if (peer.Peer.TotalHashFails == 5) ConnectionManager.CleanupSocket (Manager, peer); } peers.Clear (); // If the piece was successfully hashed, enqueue a new "have" message to be sent out if (result) Manager.finishedPieces.Enqueue (new HaveMessage (piece.Index)); }
public async Task WaitForHashingToComplete() { if (!Manager.HasMetadata) { throw new TorrentException("A hash check cannot be performed if TorrentManager.HasMetadata is false."); } Manager.HashFails = 0; if (await DiskManager.CheckAnyFilesExistAsync(Manager.Torrent)) { Cancellation.Token.ThrowIfCancellationRequested(); for (int index = 0; index < Manager.Torrent.Pieces.Count; index++) { if (!Manager.Torrent.Files.Any(f => index >= f.StartPieceIndex && index <= f.EndPieceIndex && f.Priority != Priority.DoNotDownload)) { // If a file is marked 'do not download' ensure we update the TorrentFiles // so they also report that the piece is not available/downloaded. Manager.OnPieceHashed(index, false); // Then mark this piece as being unhashed so we don't try to download it. Manager.UnhashedPieces[index] = true; continue; } await PausedCompletionSource.Task; Cancellation.Token.ThrowIfCancellationRequested(); var hash = await DiskManager.GetHashAsync(Manager.Torrent, index); if (Cancellation.Token.IsCancellationRequested) { await DiskManager.CloseFilesAsync(Manager.Torrent); Cancellation.Token.ThrowIfCancellationRequested(); } var hashPassed = hash != null && Manager.Torrent.Pieces.IsValid(hash, index); Manager.OnPieceHashed(index, hashPassed); } } else { await PausedCompletionSource.Task; for (int i = 0; i < Manager.Torrent.Pieces.Count; i++) { Manager.OnPieceHashed(i, false); } } }
public async Task WaitForHashingToComplete() { if (!Manager.HasMetadata) { throw new TorrentException("A hash check cannot be performed if TorrentManager.HasMetadata is false."); } Manager.HashFails = 0; if (await DiskManager.CheckAnyFilesExistAsync(Manager.Torrent)) { Cancellation.Token.ThrowIfCancellationRequested(); for (int index = 0; index < Manager.Torrent.Pieces.Count; index++) { if (!Manager.Torrent.Files.Any(f => index >= f.StartPieceIndex && index <= f.EndPieceIndex && f.Priority != Priority.DoNotDownload)) { Manager.Bitfield [index] = false; continue; } await PausedCompletionSource.Task; Cancellation.Token.ThrowIfCancellationRequested(); var hash = await DiskManager.GetHashAsync(Manager.Torrent, index); if (Cancellation.Token.IsCancellationRequested) { await DiskManager.CloseFilesAsync(Manager.Torrent); Cancellation.Token.ThrowIfCancellationRequested(); } var hashPassed = hash != null && Manager.Torrent.Pieces.IsValid(hash, index); Manager.OnPieceHashed(index, hashPassed); } } else { await PausedCompletionSource.Task; for (int i = 0; i < Manager.Torrent.Pieces.Count; i++) { Manager.OnPieceHashed(i, false); } } }
internal async Task TryHashPendingFilesAsync() { if (hashingPendingFiles || !Manager.HasMetadata) { return; } // FIXME: Handle errors from DiskManager and also handle cancellation if the Mode is replaced. hashingPendingFiles = true; try { foreach (var file in Manager.Torrent.Files) { // If the start piece *and* end piece have been hashed, then every piece in between must've been hashed! if (file.Priority != Priority.DoNotDownload && (Manager.UnhashedPieces[file.StartPieceIndex] || Manager.UnhashedPieces[file.EndPieceIndex])) { for (int index = file.StartPieceIndex; index <= file.EndPieceIndex; index++) { if (Manager.UnhashedPieces [index]) { var hash = await DiskManager.GetHashAsync(Manager.Torrent, index); Cancellation.Token.ThrowIfCancellationRequested(); var hashPassed = hash != null && Manager.Torrent.Pieces.IsValid(hash, index); Manager.OnPieceHashed(index, hashPassed); if (hashPassed) { Manager.finishedPieces.Enqueue(new HaveMessage(index)); } } } } } } finally { hashingPendingFiles = false; } }