public virtual void TestBlockVerification() { RemoteBlockReader2 reader = (RemoteBlockReader2)Org.Mockito.Mockito.Spy(util.GetBlockReader (testBlock, 0, FileSizeK * 1024)); util.ReadAndCheckEOS(reader, FileSizeK * 1024, true); Org.Mockito.Mockito.Verify(reader).SendReadResult(DataTransferProtos.Status.ChecksumOk ); reader.Close(); }
public virtual void TestCompletePartialRead() { // Ask for half the file RemoteBlockReader2 reader = (RemoteBlockReader2)Org.Mockito.Mockito.Spy(util.GetBlockReader (testBlock, 0, FileSizeK * 1024 / 2)); // And read half the file util.ReadAndCheckEOS(reader, FileSizeK * 1024 / 2, true); Org.Mockito.Mockito.Verify(reader).SendReadResult(DataTransferProtos.Status.ChecksumOk ); reader.Close(); }
public virtual void TestIncompleteRead() { RemoteBlockReader2 reader = (RemoteBlockReader2)Org.Mockito.Mockito.Spy(util.GetBlockReader (testBlock, 0, FileSizeK * 1024)); util.ReadAndCheckEOS(reader, FileSizeK / 2 * 1024, false); // We asked the blockreader for the whole file, and only read // half of it, so no CHECKSUM_OK Org.Mockito.Mockito.Verify(reader, Org.Mockito.Mockito.Never()).SendReadResult(DataTransferProtos.Status .ChecksumOk); reader.Close(); }
/// <exception cref="System.IO.IOException"/> private BlockReader GetRemoteBlockReader(Peer peer) { if (conf.useLegacyBlockReader) { return(RemoteBlockReader.NewBlockReader(fileName, block, token, startOffset, length , conf.ioBufferSize, verifyChecksum, clientName, peer, datanode, clientContext.GetPeerCache (), cachingStrategy)); } else { return(RemoteBlockReader2.NewBlockReader(fileName, block, token, startOffset, length , verifyChecksum, clientName, peer, datanode, clientContext.GetPeerCache(), cachingStrategy )); } }
/// <summary> /// When the reader reaches end of the read, it sends a status response /// (e.g. /// </summary> /// <remarks> /// When the reader reaches end of the read, it sends a status response /// (e.g. CHECKSUM_OK) to the DN. Failure to do so could lead to the DN /// closing our connection (which we will re-open), but won't affect /// data correctness. /// </remarks> internal virtual void SendReadResult(Peer peer, DataTransferProtos.Status statusCode ) { System.Diagnostics.Debug.Assert(!sentStatusCode, "already sent status code to " + peer); try { RemoteBlockReader2.WriteReadResult(peer.GetOutputStream(), statusCode); sentStatusCode = true; } catch (IOException e) { // It's ok not to be able to send this. But something is probably wrong. Log.Info("Could not send read status (" + statusCode + ") to datanode " + peer.GetRemoteAddressString () + ": " + e.Message); } }
public virtual void TestUnalignedReads() { int[] startOffsets = new int[] { 0, 3, 129 }; int[] lengths = new int[] { 30, 300, 512, 513, 1025 }; foreach (int startOffset in startOffsets) { foreach (int length in lengths) { DFSClient.Log.Info("Testing startOffset = " + startOffset + " and " + " len=" + length ); RemoteBlockReader2 reader = (RemoteBlockReader2)Org.Mockito.Mockito.Spy(util.GetBlockReader (testBlock, startOffset, length)); util.ReadAndCheckEOS(reader, length, true); Org.Mockito.Mockito.Verify(reader).SendReadResult(DataTransferProtos.Status.ChecksumOk ); reader.Close(); } } }
/// <summary>Create a new BlockReader specifically to satisfy a read.</summary> /// <remarks> /// Create a new BlockReader specifically to satisfy a read. /// This method also sends the OP_READ_BLOCK request. /// </remarks> /// <param name="file">File location</param> /// <param name="block">The block object</param> /// <param name="blockToken">The block token for security</param> /// <param name="startOffset">The read offset, relative to block head</param> /// <param name="len">The number of bytes to read</param> /// <param name="bufferSize">The IO buffer size (not the client buffer size)</param> /// <param name="verifyChecksum">Whether to verify checksum</param> /// <param name="clientName">Client name</param> /// <returns>New BlockReader instance, or null on error.</returns> /// <exception cref="System.IO.IOException"/> public static Org.Apache.Hadoop.Hdfs.RemoteBlockReader NewBlockReader(string file , ExtendedBlock block, Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier > blockToken, long startOffset, long len, int bufferSize, bool verifyChecksum, string clientName, Peer peer, DatanodeID datanodeID, PeerCache peerCache, CachingStrategy cachingStrategy) { // in and out will be closed when sock is closed (by the caller) DataOutputStream @out = new DataOutputStream(new BufferedOutputStream(peer.GetOutputStream ())); new Sender(@out).ReadBlock(block, blockToken, clientName, startOffset, len, verifyChecksum , cachingStrategy); // // Get bytes in block, set streams // DataInputStream @in = new DataInputStream(new BufferedInputStream(peer.GetInputStream (), bufferSize)); DataTransferProtos.BlockOpResponseProto status = DataTransferProtos.BlockOpResponseProto .ParseFrom(PBHelper.VintPrefixed(@in)); RemoteBlockReader2.CheckSuccess(status, peer, block, file); DataTransferProtos.ReadOpChecksumInfoProto checksumInfo = status.GetReadOpChecksumInfo (); DataChecksum checksum = DataTransferProtoUtil.FromProto(checksumInfo.GetChecksum( )); //Warning when we get CHECKSUM_NULL? // Read the first chunk offset. long firstChunkOffset = checksumInfo.GetChunkOffset(); if (firstChunkOffset < 0 || firstChunkOffset > startOffset || firstChunkOffset <= (startOffset - checksum.GetBytesPerChecksum())) { throw new IOException("BlockReader: error in first chunk offset (" + firstChunkOffset + ") startOffset is " + startOffset + " for file " + file); } return(new Org.Apache.Hadoop.Hdfs.RemoteBlockReader(file, block.GetBlockPoolId(), block.GetBlockId(), @in, checksum, verifyChecksum, startOffset, firstChunkOffset , len, peer, datanodeID, peerCache)); }