Exemple #1
0
 /// <exception cref="System.IO.IOException"/>
 internal virtual void WriteBlock(ExtendedBlock block, BlockConstructionStage stage
                                  , long newGS, DataChecksum checksum)
 {
     sender.WriteBlock(block, StorageType.Default, BlockTokenSecretManager.DummyToken,
                       "cl", new DatanodeInfo[1], new StorageType[1], null, stage, 0, block.GetNumBytes
                           (), block.GetNumBytes(), newGS, checksum, CachingStrategy.NewDefaultStrategy(),
                       false, false, null);
 }
        /// <exception cref="System.IO.IOException"/>
        public static void CheckMetaInfo(ExtendedBlock b, DataNode dn)
        {
            Block metainfo = DataNodeTestUtils.GetFSDataset(dn).GetStoredBlock(b.GetBlockPoolId
                                                                                   (), b.GetBlockId());

            NUnit.Framework.Assert.AreEqual(b.GetBlockId(), metainfo.GetBlockId());
            NUnit.Framework.Assert.AreEqual(b.GetNumBytes(), metainfo.GetNumBytes());
        }
Exemple #3
0
        /// <summary>Test write a file, verifies and closes it.</summary>
        /// <remarks>
        /// Test write a file, verifies and closes it. Then the length of the blocks
        /// are messed up and BlockReport is forced.
        /// The modification of blocks' length has to be ignored
        /// </remarks>
        /// <exception cref="System.IO.IOException">on an error</exception>
        public virtual void BlockReport_01()
        {
            string        MethodName = GenericTestUtils.GetMethodName();
            Path          filePath   = new Path("/" + MethodName + ".dat");
            AList <Block> blocks     = PrepareForRide(filePath, MethodName, FileSize);

            if (Log.IsDebugEnabled())
            {
                Log.Debug("Number of blocks allocated " + blocks.Count);
            }
            long[] oldLengths = new long[blocks.Count];
            int    tempLen;

            for (int i = 0; i < blocks.Count; i++)
            {
                Block b = blocks[i];
                if (Log.IsDebugEnabled())
                {
                    Log.Debug("Block " + b.GetBlockName() + " before\t" + "Size " + b.GetNumBytes());
                }
                oldLengths[i] = b.GetNumBytes();
                if (Log.IsDebugEnabled())
                {
                    Log.Debug("Setting new length");
                }
                tempLen = rand.Next(BlockSize);
                b.Set(b.GetBlockId(), tempLen, b.GetGenerationStamp());
                if (Log.IsDebugEnabled())
                {
                    Log.Debug("Block " + b.GetBlockName() + " after\t " + "Size " + b.GetNumBytes());
                }
            }
            // all blocks belong to the same file, hence same BP
            DataNode             dn     = cluster.GetDataNodes()[DnN0];
            string               poolId = cluster.GetNamesystem().GetBlockPoolId();
            DatanodeRegistration dnR    = dn.GetDNRegistrationForBP(poolId);

            StorageBlockReport[] reports = GetBlockReports(dn, poolId, false, false);
            SendBlockReports(dnR, poolId, reports);
            IList <LocatedBlock> blocksAfterReport = DFSTestUtil.GetAllBlocks(fs.Open(filePath
                                                                                      ));

            if (Log.IsDebugEnabled())
            {
                Log.Debug("After mods: Number of blocks allocated " + blocksAfterReport.Count);
            }
            for (int i_1 = 0; i_1 < blocksAfterReport.Count; i_1++)
            {
                ExtendedBlock b = blocksAfterReport[i_1].GetBlock();
                NUnit.Framework.Assert.AreEqual("Length of " + i_1 + "th block is incorrect", oldLengths
                                                [i_1], b.GetNumBytes());
            }
        }
Exemple #4
0
        /// <summary>Convert an ExtendedBlock to a Json map.</summary>
        private static IDictionary <string, object> ToJsonMap(ExtendedBlock extendedblock)
        {
            if (extendedblock == null)
            {
                return(null);
            }
            IDictionary <string, object> m = new SortedDictionary <string, object>();

            m["blockPoolId"]     = extendedblock.GetBlockPoolId();
            m["blockId"]         = extendedblock.GetBlockId();
            m["numBytes"]        = extendedblock.GetNumBytes();
            m["generationStamp"] = extendedblock.GetGenerationStamp();
            return(m);
        }
        /// <summary>TC7: Corrupted replicas are present.</summary>
        /// <exception cref="System.IO.IOException">an exception might be thrown</exception>
        /// <exception cref="System.Exception"/>
        private void TestTC7(bool appendToNewBlock)
        {
            short repl = 2;
            Path  p    = new Path("/TC7/foo" + (appendToNewBlock ? "0" : "1"));

            System.Console.Out.WriteLine("p=" + p);
            //a. Create file with replication factor of 2. Write half block of data. Close file.
            int len1 = (int)(BlockSize / 2);

            {
                FSDataOutputStream @out = fs.Create(p, false, buffersize, repl, BlockSize);
                AppendTestUtil.Write(@out, 0, len1);
                @out.Close();
            }
            DFSTestUtil.WaitReplication(fs, p, repl);
            //b. Log into one datanode that has one replica of this block.
            //   Find the block file on this datanode and truncate it to zero size.
            LocatedBlocks locatedblocks = fs.dfs.GetNamenode().GetBlockLocations(p.ToString()
                                                                                 , 0L, len1);

            NUnit.Framework.Assert.AreEqual(1, locatedblocks.LocatedBlockCount());
            LocatedBlock  lb  = locatedblocks.Get(0);
            ExtendedBlock blk = lb.GetBlock();

            NUnit.Framework.Assert.AreEqual(len1, lb.GetBlockSize());
            DatanodeInfo[] datanodeinfos = lb.GetLocations();
            NUnit.Framework.Assert.AreEqual(repl, datanodeinfos.Length);
            DataNode dn = cluster.GetDataNode(datanodeinfos[0].GetIpcPort());
            FilePath f  = DataNodeTestUtils.GetBlockFile(dn, blk.GetBlockPoolId(), blk.GetLocalBlock
                                                             ());
            RandomAccessFile raf = new RandomAccessFile(f, "rw");

            AppendTestUtil.Log.Info("dn=" + dn + ", blk=" + blk + " (length=" + blk.GetNumBytes
                                        () + ")");
            NUnit.Framework.Assert.AreEqual(len1, raf.Length());
            raf.SetLength(0);
            raf.Close();
            //c. Open file in "append mode".  Append a new block worth of data. Close file.
            int len2 = (int)BlockSize;

            {
                FSDataOutputStream @out = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.Append
                                                                                     , CreateFlag.NewBlock), 4096, null) : fs.Append(p);
                AppendTestUtil.Write(@out, len1, len2);
                @out.Close();
            }
            //d. Reopen file and read two blocks worth of data.
            AppendTestUtil.Check(fs, p, len1 + len2);
        }
Exemple #6
0
        /// <exception cref="System.IO.IOException"/>
        private void WriteZeroLengthPacket(ExtendedBlock block, string description)
        {
            PacketHeader hdr = new PacketHeader(8, block.GetNumBytes(), 100, true, 0, false);

            // size of packet
            // OffsetInBlock
            // sequencenumber
            // lastPacketInBlock
            // chunk length
            // sync block
            hdr.Write(sendOut);
            sendOut.WriteInt(0);
            // zero checksum
            //ok finally write a block with 0 len
            SendResponse(DataTransferProtos.Status.Success, string.Empty, null, recvOut);
            new PipelineAck(100, new int[] { PipelineAck.CombineHeader(PipelineAck.ECN.Disabled
                                                                       , DataTransferProtos.Status.Success) }).Write(recvOut);
            SendRecvData(description, false);
        }
        public virtual void TestBlockSynchronization()
        {
            int           OrgFileSize = 3000;
            Configuration conf        = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(5).Build();
            cluster.WaitActive();
            //create a file
            DistributedFileSystem dfs = cluster.GetFileSystem();
            string filestr            = "/foo";
            Path   filepath           = new Path(filestr);

            DFSTestUtil.CreateFile(dfs, filepath, OrgFileSize, ReplicationNum, 0L);
            NUnit.Framework.Assert.IsTrue(dfs.Exists(filepath));
            DFSTestUtil.WaitReplication(dfs, filepath, ReplicationNum);
            //get block info for the last block
            LocatedBlock locatedblock = TestInterDatanodeProtocol.GetLastLocatedBlock(dfs.dfs
                                                                                      .GetNamenode(), filestr);

            DatanodeInfo[] datanodeinfos = locatedblock.GetLocations();
            NUnit.Framework.Assert.AreEqual(ReplicationNum, datanodeinfos.Length);
            //connect to data nodes
            DataNode[] datanodes = new DataNode[ReplicationNum];
            for (int i = 0; i < ReplicationNum; i++)
            {
                datanodes[i] = cluster.GetDataNode(datanodeinfos[i].GetIpcPort());
                NUnit.Framework.Assert.IsTrue(datanodes[i] != null);
            }
            //verify Block Info
            ExtendedBlock lastblock = locatedblock.GetBlock();

            DataNode.Log.Info("newblocks=" + lastblock);
            for (int i_1 = 0; i_1 < ReplicationNum; i_1++)
            {
                CheckMetaInfo(lastblock, datanodes[i_1]);
            }
            DataNode.Log.Info("dfs.dfs.clientName=" + dfs.dfs.clientName);
            cluster.GetNameNodeRpc().Append(filestr, dfs.dfs.clientName, new EnumSetWritable <
                                                CreateFlag>(EnumSet.Of(CreateFlag.Append)));
            // expire lease to trigger block recovery.
            WaitLeaseRecovery(cluster);
            Block[] updatedmetainfo = new Block[ReplicationNum];
            long    oldSize         = lastblock.GetNumBytes();

            lastblock = TestInterDatanodeProtocol.GetLastLocatedBlock(dfs.dfs.GetNamenode(),
                                                                      filestr).GetBlock();
            long currentGS = lastblock.GetGenerationStamp();

            for (int i_2 = 0; i_2 < ReplicationNum; i_2++)
            {
                updatedmetainfo[i_2] = DataNodeTestUtils.GetFSDataset(datanodes[i_2]).GetStoredBlock
                                           (lastblock.GetBlockPoolId(), lastblock.GetBlockId());
                NUnit.Framework.Assert.AreEqual(lastblock.GetBlockId(), updatedmetainfo[i_2].GetBlockId
                                                    ());
                NUnit.Framework.Assert.AreEqual(oldSize, updatedmetainfo[i_2].GetNumBytes());
                NUnit.Framework.Assert.AreEqual(currentGS, updatedmetainfo[i_2].GetGenerationStamp
                                                    ());
            }
            // verify that lease recovery does not occur when namenode is in safemode
            System.Console.Out.WriteLine("Testing that lease recovery cannot happen during safemode."
                                         );
            filestr  = "/foo.safemode";
            filepath = new Path(filestr);
            dfs.Create(filepath, (short)1);
            cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter,
                                                 false);
            NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(filestr));
            DFSTestUtil.WaitReplication(dfs, filepath, (short)1);
            WaitLeaseRecovery(cluster);
            // verify that we still cannot recover the lease
            LeaseManager lm = NameNodeAdapter.GetLeaseManager(cluster.GetNamesystem());

            NUnit.Framework.Assert.IsTrue("Found " + lm.CountLease() + " lease, expected 1",
                                          lm.CountLease() == 1);
            cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave,
                                                 false);
        }
        public virtual void TestUpdatePipelineAfterDelete()
        {
            Configuration  conf    = new HdfsConfiguration();
            Path           file    = new Path("/test-file");
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();

            try
            {
                FileSystem        fs       = cluster.GetFileSystem();
                NamenodeProtocols namenode = cluster.GetNameNodeRpc();
                DFSOutputStream   @out     = null;
                try
                {
                    // Create a file and make sure a block is allocated for it.
                    @out = (DFSOutputStream)(fs.Create(file).GetWrappedStream());
                    @out.Write(1);
                    @out.Hflush();
                    // Create a snapshot that includes the file.
                    SnapshotTestHelper.CreateSnapshot((DistributedFileSystem)fs, new Path("/"), "s1");
                    // Grab the block info of this file for later use.
                    FSDataInputStream @in      = null;
                    ExtendedBlock     oldBlock = null;
                    try
                    {
                        @in      = fs.Open(file);
                        oldBlock = DFSTestUtil.GetAllBlocks(@in)[0].GetBlock();
                    }
                    finally
                    {
                        IOUtils.CloseStream(@in);
                    }
                    // Allocate a new block ID/gen stamp so we can simulate pipeline
                    // recovery.
                    string       clientName      = ((DistributedFileSystem)fs).GetClient().GetClientName();
                    LocatedBlock newLocatedBlock = namenode.UpdateBlockForPipeline(oldBlock, clientName
                                                                                   );
                    ExtendedBlock newBlock = new ExtendedBlock(oldBlock.GetBlockPoolId(), oldBlock.GetBlockId
                                                                   (), oldBlock.GetNumBytes(), newLocatedBlock.GetBlock().GetGenerationStamp());
                    // Delete the file from the present FS. It will still exist the
                    // previously-created snapshot. This will log an OP_DELETE for the
                    // file in question.
                    fs.Delete(file, true);
                    // Simulate a pipeline recovery, wherein a new block is allocated
                    // for the existing block, resulting in an OP_UPDATE_BLOCKS being
                    // logged for the file in question.
                    try
                    {
                        namenode.UpdatePipeline(clientName, oldBlock, newBlock, newLocatedBlock.GetLocations
                                                    (), newLocatedBlock.GetStorageIDs());
                    }
                    catch (IOException ioe)
                    {
                        // normal
                        GenericTestUtils.AssertExceptionContains("does not exist or it is not under construction"
                                                                 , ioe);
                    }
                    // Make sure the NN can restart with the edit logs as we have them now.
                    cluster.RestartNameNode(true);
                }
                finally
                {
                    IOUtils.CloseStream(@out);
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemple #9
0
 /* fill up a cluster with <code>numNodes</code> datanodes
  * whose used space to be <code>size</code>
  */
 /// <exception cref="System.IO.IOException"/>
 /// <exception cref="System.Exception"/>
 /// <exception cref="Sharpen.TimeoutException"/>
 private static ExtendedBlock[][] GenerateBlocks(TestBalancerWithMultipleNameNodes.Suite
                                                 s, long size)
 {
     ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.Length][];
     for (int n = 0; n < s.clients.Length; n++)
     {
         long fileLen = size / s.replication;
         CreateFile(s, n, fileLen);
         IList <LocatedBlock> locatedBlocks = s.clients[n].GetBlockLocations(FileName, 0, fileLen
                                                                             ).GetLocatedBlocks();
         int numOfBlocks = locatedBlocks.Count;
         blocks[n] = new ExtendedBlock[numOfBlocks];
         for (int i = 0; i < numOfBlocks; i++)
         {
             ExtendedBlock b = locatedBlocks[i].GetBlock();
             blocks[n][i] = new ExtendedBlock(b.GetBlockPoolId(), b.GetBlockId(), b.GetNumBytes
                                                  (), b.GetGenerationStamp());
         }
     }
     return(blocks);
 }
        public virtual void TestUpdateReplicaUnderRecovery()
        {
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
                cluster.WaitActive();
                string bpid = cluster.GetNamesystem().GetBlockPoolId();
                //create a file
                DistributedFileSystem dfs = cluster.GetFileSystem();
                string filestr            = "/foo";
                Path   filepath           = new Path(filestr);
                DFSTestUtil.CreateFile(dfs, filepath, 1024L, (short)3, 0L);
                //get block info
                LocatedBlock locatedblock = GetLastLocatedBlock(DFSClientAdapter.GetDFSClient(dfs
                                                                                              ).GetNamenode(), filestr);
                DatanodeInfo[] datanodeinfo = locatedblock.GetLocations();
                NUnit.Framework.Assert.IsTrue(datanodeinfo.Length > 0);
                //get DataNode and FSDataset objects
                DataNode datanode = cluster.GetDataNode(datanodeinfo[0].GetIpcPort());
                NUnit.Framework.Assert.IsTrue(datanode != null);
                //initReplicaRecovery
                ExtendedBlock         b          = locatedblock.GetBlock();
                long                  recoveryid = b.GetGenerationStamp() + 1;
                long                  newlength  = b.GetNumBytes() - 1;
                FsDatasetSpi <object> fsdataset  = DataNodeTestUtils.GetFSDataset(datanode);
                ReplicaRecoveryInfo   rri        = fsdataset.InitReplicaRecovery(new BlockRecoveryCommand.RecoveringBlock
                                                                                     (b, null, recoveryid));
                //check replica
                ReplicaInfo replica = FsDatasetTestUtil.FetchReplicaInfo(fsdataset, bpid, b.GetBlockId
                                                                             ());
                NUnit.Framework.Assert.AreEqual(HdfsServerConstants.ReplicaState.Rur, replica.GetState
                                                    ());
                //check meta data before update
                FsDatasetImpl.CheckReplicaFiles(replica);
                {
                    //case "THIS IS NOT SUPPOSED TO HAPPEN"
                    //with (block length) != (stored replica's on disk length).
                    //create a block with same id and gs but different length.
                    ExtendedBlock tmp = new ExtendedBlock(b.GetBlockPoolId(), rri.GetBlockId(), rri.GetNumBytes
                                                              () - 1, rri.GetGenerationStamp());
                    try
                    {
                        //update should fail
                        fsdataset.UpdateReplicaUnderRecovery(tmp, recoveryid, tmp.GetBlockId(), newlength
                                                             );
                        NUnit.Framework.Assert.Fail();
                    }
                    catch (IOException ioe)
                    {
                        System.Console.Out.WriteLine("GOOD: getting " + ioe);
                    }
                }
                //update
                string storageID = fsdataset.UpdateReplicaUnderRecovery(new ExtendedBlock(b.GetBlockPoolId
                                                                                              (), rri), recoveryid, rri.GetBlockId(), newlength);
                NUnit.Framework.Assert.IsTrue(storageID != null);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        /// <summary>The following test first creates a file.</summary>
        /// <remarks>
        /// The following test first creates a file.
        /// It verifies the block information from a datanode.
        /// Then, it updates the block with new information and verifies again.
        /// </remarks>
        /// <param name="useDnHostname">whether DNs should connect to other DNs by hostname</param>
        /// <exception cref="System.Exception"/>
        private void CheckBlockMetaDataInfo(bool useDnHostname)
        {
            MiniDFSCluster cluster = null;

            conf.SetBoolean(DFSConfigKeys.DfsDatanodeUseDnHostname, useDnHostname);
            if (useDnHostname)
            {
                // Since the mini cluster only listens on the loopback we have to
                // ensure the hostname used to access DNs maps to the loopback. We
                // do this by telling the DN to advertise localhost as its hostname
                // instead of the default hostname.
                conf.Set(DFSConfigKeys.DfsDatanodeHostNameKey, "localhost");
            }
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).CheckDataNodeHostConfig
                              (true).Build();
                cluster.WaitActive();
                //create a file
                DistributedFileSystem dfs = cluster.GetFileSystem();
                string filestr            = "/foo";
                Path   filepath           = new Path(filestr);
                DFSTestUtil.CreateFile(dfs, filepath, 1024L, (short)3, 0L);
                NUnit.Framework.Assert.IsTrue(dfs.Exists(filepath));
                //get block info
                LocatedBlock locatedblock = GetLastLocatedBlock(DFSClientAdapter.GetDFSClient(dfs
                                                                                              ).GetNamenode(), filestr);
                DatanodeInfo[] datanodeinfo = locatedblock.GetLocations();
                NUnit.Framework.Assert.IsTrue(datanodeinfo.Length > 0);
                //connect to a data node
                DataNode datanode         = cluster.GetDataNode(datanodeinfo[0].GetIpcPort());
                InterDatanodeProtocol idp = DataNodeTestUtils.CreateInterDatanodeProtocolProxy(datanode
                                                                                               , datanodeinfo[0], conf, useDnHostname);
                // Stop the block scanners.
                datanode.GetBlockScanner().RemoveAllVolumeScanners();
                //verify BlockMetaDataInfo
                ExtendedBlock b = locatedblock.GetBlock();
                InterDatanodeProtocol.Log.Info("b=" + b + ", " + b.GetType());
                CheckMetaInfo(b, datanode);
                long recoveryId = b.GetGenerationStamp() + 1;
                idp.InitReplicaRecovery(new BlockRecoveryCommand.RecoveringBlock(b, locatedblock.
                                                                                 GetLocations(), recoveryId));
                //verify updateBlock
                ExtendedBlock newblock = new ExtendedBlock(b.GetBlockPoolId(), b.GetBlockId(), b.
                                                           GetNumBytes() / 2, b.GetGenerationStamp() + 1);
                idp.UpdateReplicaUnderRecovery(b, recoveryId, b.GetBlockId(), newblock.GetNumBytes
                                                   ());
                CheckMetaInfo(newblock, datanode);
                // Verify correct null response trying to init recovery for a missing block
                ExtendedBlock badBlock = new ExtendedBlock("fake-pool", b.GetBlockId(), 0, 0);
                NUnit.Framework.Assert.IsNull(idp.InitReplicaRecovery(new BlockRecoveryCommand.RecoveringBlock
                                                                          (badBlock, locatedblock.GetLocations(), recoveryId)));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }