Exemplo n.º 1
0
 public Builder(CachingStrategy prev)
 {
     this.dropBehind = prev.dropBehind;
     this.readahead  = prev.readahead;
 }
Exemplo n.º 2
0
        /// <summary>Scan a block.</summary>
        /// <param name="cblock">The block to scan.</param>
        /// <param name="bytesPerSec">The bytes per second to scan at.</param>
        /// <returns>
        /// The length of the block that was scanned, or
        /// -1 if the block could not be scanned.
        /// </returns>
        private long ScanBlock(ExtendedBlock cblock, long bytesPerSec)
        {
            // 'cblock' has a valid blockId and block pool id, but we don't yet know the
            // genstamp the block is supposed to have.  Ask the FsDatasetImpl for this
            // information.
            ExtendedBlock block = null;

            try
            {
                Block b = volume.GetDataset().GetStoredBlock(cblock.GetBlockPoolId(), cblock.GetBlockId
                                                                 ());
                if (b == null)
                {
                    Log.Info("FileNotFound while finding block {} on volume {}", cblock, volume.GetBasePath
                                 ());
                }
                else
                {
                    block = new ExtendedBlock(cblock.GetBlockPoolId(), b);
                }
            }
            catch (FileNotFoundException)
            {
                Log.Info("FileNotFoundException while finding block {} on volume {}", cblock, volume
                         .GetBasePath());
            }
            catch (IOException)
            {
                Log.Warn("I/O error while finding block {} on volume {}", cblock, volume.GetBasePath
                             ());
            }
            if (block == null)
            {
                return(-1);
            }
            // block not found.
            BlockSender blockSender = null;

            try
            {
                blockSender = new BlockSender(block, 0, -1, false, true, true, datanode, null, CachingStrategy
                                              .NewDropBehind());
                throttler.SetBandwidth(bytesPerSec);
                long bytesRead = blockSender.SendBlock(nullStream, null, throttler);
                resultHandler.Handle(block, null);
                return(bytesRead);
            }
            catch (IOException e)
            {
                resultHandler.Handle(block, e);
            }
            finally
            {
                IOUtils.Cleanup(null, blockSender);
            }
            return(-1);
        }
Exemplo n.º 3
0
        /// <summary>try to access a block on a data node.</summary>
        /// <remarks>try to access a block on a data node. If fails - throws exception</remarks>
        /// <param name="datanode"/>
        /// <param name="lblock"/>
        /// <exception cref="System.IO.IOException"/>
        private void AccessBlock(DatanodeInfo datanode, LocatedBlock lblock)
        {
            IPEndPoint    targetAddr = null;
            ExtendedBlock block      = lblock.GetBlock();

            targetAddr = NetUtils.CreateSocketAddr(datanode.GetXferAddr());
            BlockReader blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).SetInetSocketAddress
                                          (targetAddr).SetBlock(block).SetFileName(BlockReaderFactory.GetFileName(targetAddr
                                                                                                                  , "test-blockpoolid", block.GetBlockId())).SetBlockToken(lblock.GetBlockToken())
                                      .SetStartOffset(0).SetLength(-1).SetVerifyChecksum(true).SetClientName("TestDataNodeVolumeFailure"
                                                                                                             ).SetDatanodeInfo(datanode).SetCachingStrategy(CachingStrategy.NewDefaultStrategy
                                                                                                                                                                ()).SetClientCacheContext(ClientContext.GetFromConf(conf)).SetConfiguration(conf
                                                                                                                                                                                                                                            ).SetRemotePeerFactory(new _RemotePeerFactory_422(this)).Build();

            blockReader.Close();
        }
Exemplo n.º 4
0
        public virtual void TestReplicationError()
        {
            // create a file of replication factor of 1
            Path fileName = new Path("/test.txt");
            int  fileLen  = 1;

            DFSTestUtil.CreateFile(fs, fileName, 1, (short)1, 1L);
            DFSTestUtil.WaitReplication(fs, fileName, (short)1);
            // get the block belonged to the created file
            LocatedBlocks blocks = NameNodeAdapter.GetBlockLocations(cluster.GetNameNode(), fileName
                                                                     .ToString(), 0, (long)fileLen);

            NUnit.Framework.Assert.AreEqual("Should only find 1 block", blocks.LocatedBlockCount
                                                (), 1);
            LocatedBlock block = blocks.Get(0);

            // bring up a second datanode
            cluster.StartDataNodes(conf, 1, true, null, null);
            cluster.WaitActive();
            int      sndNode  = 1;
            DataNode datanode = cluster.GetDataNodes()[sndNode];
            // replicate the block to the second datanode
            IPEndPoint target = datanode.GetXferAddress();
            Socket     s      = Sharpen.Extensions.CreateSocket(target.Address, target.Port);
            // write the header.
            DataOutputStream @out     = new DataOutputStream(s.GetOutputStream());
            DataChecksum     checksum = DataChecksum.NewDataChecksum(DataChecksum.Type.Crc32, 512
                                                                     );

            new Sender(@out).WriteBlock(block.GetBlock(), StorageType.Default, BlockTokenSecretManager
                                        .DummyToken, string.Empty, new DatanodeInfo[0], new StorageType[0], null, BlockConstructionStage
                                        .PipelineSetupCreate, 1, 0L, 0L, 0L, checksum, CachingStrategy.NewDefaultStrategy
                                            (), false, false, null);
            @out.Flush();
            // close the connection before sending the content of the block
            @out.Close();
            // the temporary block & meta files should be deleted
            string   bpid       = cluster.GetNamesystem().GetBlockPoolId();
            FilePath storageDir = cluster.GetInstanceStorageDir(sndNode, 0);
            FilePath dir1       = MiniDFSCluster.GetRbwDir(storageDir, bpid);

            storageDir = cluster.GetInstanceStorageDir(sndNode, 1);
            FilePath dir2 = MiniDFSCluster.GetRbwDir(storageDir, bpid);

            while (dir1.ListFiles().Length != 0 || dir2.ListFiles().Length != 0)
            {
                Sharpen.Thread.Sleep(100);
            }
            // then increase the file's replication factor
            fs.SetReplication(fileName, (short)2);
            // replication should succeed
            DFSTestUtil.WaitReplication(fs, fileName, (short)1);
            // clean up the file
            fs.Delete(fileName, false);
        }
Exemplo n.º 5
0
        /// <summary>Constructor</summary>
        /// <param name="block">Block that is being read</param>
        /// <param name="startOffset">starting offset to read from</param>
        /// <param name="length">length of data to read</param>
        /// <param name="corruptChecksumOk">if true, corrupt checksum is okay</param>
        /// <param name="verifyChecksum">verify checksum while reading the data</param>
        /// <param name="sendChecksum">send checksum to client.</param>
        /// <param name="datanode">datanode from which the block is being read</param>
        /// <param name="clientTraceFmt">format string used to print client trace logs</param>
        /// <exception cref="System.IO.IOException"/>
        internal BlockSender(ExtendedBlock block, long startOffset, long length, bool corruptChecksumOk
                             , bool verifyChecksum, bool sendChecksum, DataNode datanode, string clientTraceFmt
                             , CachingStrategy cachingStrategy)
        {
            // Cache-management related fields
            // 1MB
            try
            {
                this.block             = block;
                this.corruptChecksumOk = corruptChecksumOk;
                this.verifyChecksum    = verifyChecksum;
                this.clientTraceFmt    = clientTraceFmt;

                /*
                 * If the client asked for the cache to be dropped behind all reads,
                 * we honor that.  Otherwise, we use the DataNode defaults.
                 * When using DataNode defaults, we use a heuristic where we only
                 * drop the cache for large reads.
                 */
                if (cachingStrategy.GetDropBehind() == null)
                {
                    this.dropCacheBehindAllReads   = false;
                    this.dropCacheBehindLargeReads = datanode.GetDnConf().dropCacheBehindReads;
                }
                else
                {
                    this.dropCacheBehindAllReads = this.dropCacheBehindLargeReads = cachingStrategy.GetDropBehind
                                                                                        ();
                }

                /*
                 * Similarly, if readahead was explicitly requested, we always do it.
                 * Otherwise, we read ahead based on the DataNode settings, and only
                 * when the reads are large.
                 */
                if (cachingStrategy.GetReadahead() == null)
                {
                    this.alwaysReadahead = false;
                    this.readaheadLength = datanode.GetDnConf().readaheadLength;
                }
                else
                {
                    this.alwaysReadahead = true;
                    this.readaheadLength = cachingStrategy.GetReadahead();
                }
                this.datanode = datanode;
                if (verifyChecksum)
                {
                    // To simplify implementation, callers may not specify verification
                    // without sending.
                    Preconditions.CheckArgument(sendChecksum, "If verifying checksum, currently must also send it."
                                                );
                }
                Replica replica;
                long    replicaVisibleLength;
                lock (datanode.data)
                {
                    replica = GetReplica(block, datanode);
                    replicaVisibleLength = replica.GetVisibleLength();
                }
                // if there is a write in progress
                ChunkChecksum chunkChecksum = null;
                if (replica is ReplicaBeingWritten)
                {
                    ReplicaBeingWritten rbw = (ReplicaBeingWritten)replica;
                    WaitForMinLength(rbw, startOffset + length);
                    chunkChecksum = rbw.GetLastChecksumAndDataLen();
                }
                if (replica.GetGenerationStamp() < block.GetGenerationStamp())
                {
                    throw new IOException("Replica gen stamp < block genstamp, block=" + block + ", replica="
                                          + replica);
                }
                else
                {
                    if (replica.GetGenerationStamp() > block.GetGenerationStamp())
                    {
                        if (DataNode.Log.IsDebugEnabled())
                        {
                            DataNode.Log.Debug("Bumping up the client provided" + " block's genstamp to latest "
                                               + replica.GetGenerationStamp() + " for block " + block);
                        }
                        block.SetGenerationStamp(replica.GetGenerationStamp());
                    }
                }
                if (replicaVisibleLength < 0)
                {
                    throw new IOException("Replica is not readable, block=" + block + ", replica=" +
                                          replica);
                }
                if (DataNode.Log.IsDebugEnabled())
                {
                    DataNode.Log.Debug("block=" + block + ", replica=" + replica);
                }
                // transferToFully() fails on 32 bit platforms for block sizes >= 2GB,
                // use normal transfer in those cases
                this.transferToAllowed = datanode.GetDnConf().transferToAllowed&& (!is32Bit || length
                                                                                   <= int.MaxValue);
                // Obtain a reference before reading data
                this.volumeRef = datanode.data.GetVolume(block).ObtainReference();

                /*
                 * (corruptChecksumOK, meta_file_exist): operation
                 * True,   True: will verify checksum
                 * True,  False: No verify, e.g., need to read data from a corrupted file
                 * False,  True: will verify checksum
                 * False, False: throws IOException file not found
                 */
                DataChecksum csum = null;
                if (verifyChecksum || sendChecksum)
                {
                    LengthInputStream metaIn = null;
                    bool keepMetaInOpen      = false;
                    try
                    {
                        metaIn = datanode.data.GetMetaDataInputStream(block);
                        if (!corruptChecksumOk || metaIn != null)
                        {
                            if (metaIn == null)
                            {
                                //need checksum but meta-data not found
                                throw new FileNotFoundException("Meta-data not found for " + block);
                            }
                            // The meta file will contain only the header if the NULL checksum
                            // type was used, or if the replica was written to transient storage.
                            // Checksum verification is not performed for replicas on transient
                            // storage.  The header is important for determining the checksum
                            // type later when lazy persistence copies the block to non-transient
                            // storage and computes the checksum.
                            if (metaIn.GetLength() > BlockMetadataHeader.GetHeaderSize())
                            {
                                checksumIn = new DataInputStream(new BufferedInputStream(metaIn, HdfsConstants.IoFileBufferSize
                                                                                         ));
                                csum           = BlockMetadataHeader.ReadDataChecksum(checksumIn, block);
                                keepMetaInOpen = true;
                            }
                        }
                        else
                        {
                            Log.Warn("Could not find metadata file for " + block);
                        }
                    }
                    finally
                    {
                        if (!keepMetaInOpen)
                        {
                            IOUtils.CloseStream(metaIn);
                        }
                    }
                }
                if (csum == null)
                {
                    // The number of bytes per checksum here determines the alignment
                    // of reads: we always start reading at a checksum chunk boundary,
                    // even if the checksum type is NULL. So, choosing too big of a value
                    // would risk sending too much unnecessary data. 512 (1 disk sector)
                    // is likely to result in minimal extra IO.
                    csum = DataChecksum.NewDataChecksum(DataChecksum.Type.Null, 512);
                }

                /*
                 * If chunkSize is very large, then the metadata file is mostly
                 * corrupted. For now just truncate bytesPerchecksum to blockLength.
                 */
                int size = csum.GetBytesPerChecksum();
                if (size > 10 * 1024 * 1024 && size > replicaVisibleLength)
                {
                    csum = DataChecksum.NewDataChecksum(csum.GetChecksumType(), Math.Max((int)replicaVisibleLength
                                                                                         , 10 * 1024 * 1024));
                    size = csum.GetBytesPerChecksum();
                }
                chunkSize    = size;
                checksum     = csum;
                checksumSize = checksum.GetChecksumSize();
                length       = length < 0 ? replicaVisibleLength : length;
                // end is either last byte on disk or the length for which we have a
                // checksum
                long end = chunkChecksum != null?chunkChecksum.GetDataLength() : replica.GetBytesOnDisk
                                   ();

                if (startOffset < 0 || startOffset > end || (length + startOffset) > end)
                {
                    string msg = " Offset " + startOffset + " and length " + length + " don't match block "
                                 + block + " ( blockLen " + end + " )";
                    Log.Warn(datanode.GetDNRegistrationForBP(block.GetBlockPoolId()) + ":sendBlock() : "
                             + msg);
                    throw new IOException(msg);
                }
                // Ensure read offset is position at the beginning of chunk
                offset = startOffset - (startOffset % chunkSize);
                if (length >= 0)
                {
                    // Ensure endOffset points to end of chunk.
                    long tmpLen = startOffset + length;
                    if (tmpLen % chunkSize != 0)
                    {
                        tmpLen += (chunkSize - tmpLen % chunkSize);
                    }
                    if (tmpLen < end)
                    {
                        // will use on-disk checksum here since the end is a stable chunk
                        end = tmpLen;
                    }
                    else
                    {
                        if (chunkChecksum != null)
                        {
                            // last chunk is changing. flag that we need to use in-memory checksum
                            this.lastChunkChecksum = chunkChecksum;
                        }
                    }
                }
                endOffset = end;
                // seek to the right offsets
                if (offset > 0 && checksumIn != null)
                {
                    long checksumSkip = (offset / chunkSize) * checksumSize;
                    // note blockInStream is seeked when created below
                    if (checksumSkip > 0)
                    {
                        // Should we use seek() for checksum file as well?
                        IOUtils.SkipFully(checksumIn, checksumSkip);
                    }
                }
                seqno = 0;
                if (DataNode.Log.IsDebugEnabled())
                {
                    DataNode.Log.Debug("replica=" + replica);
                }
                blockIn = datanode.data.GetBlockInputStream(block, offset);
                // seek to offset
                if (blockIn is FileInputStream)
                {
                    blockInFd = ((FileInputStream)blockIn).GetFD();
                }
                else
                {
                    blockInFd = null;
                }
            }
            catch (IOException ioe)
            {
                IOUtils.CloseStream(this);
                IOUtils.CloseStream(blockIn);
                throw;
            }
        }