public virtual void TestBlockIdGeneration()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            try
            {
                cluster.WaitActive();
                FileSystem fs = cluster.GetFileSystem();
                // Create a file that is 10 blocks long.
                Path path = new Path("testBlockIdGeneration.dat");
                DFSTestUtil.CreateFile(fs, path, IoSize, BlockSize * 10, BlockSize, Replication,
                                       Seed);
                IList <LocatedBlock> blocks = DFSTestUtil.GetAllBlocks(fs, path);
                Log.Info("Block0 id is " + blocks[0].GetBlock().GetBlockId());
                long nextBlockExpectedId = blocks[0].GetBlock().GetBlockId() + 1;
                // Ensure that the block IDs are sequentially increasing.
                for (int i = 1; i < blocks.Count; ++i)
                {
                    long nextBlockId = blocks[i].GetBlock().GetBlockId();
                    Log.Info("Block" + i + " id is " + nextBlockId);
                    Assert.AssertThat(nextBlockId, CoreMatchers.Is(nextBlockExpectedId));
                    ++nextBlockExpectedId;
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Example #2
0
        /// <summary>Test write a file, verifies and closes it.</summary>
        /// <remarks>
        /// Test write a file, verifies and closes it. Then the length of the blocks
        /// are messed up and BlockReport is forced.
        /// The modification of blocks' length has to be ignored
        /// </remarks>
        /// <exception cref="System.IO.IOException">on an error</exception>
        public virtual void BlockReport_01()
        {
            string        MethodName = GenericTestUtils.GetMethodName();
            Path          filePath   = new Path("/" + MethodName + ".dat");
            AList <Block> blocks     = PrepareForRide(filePath, MethodName, FileSize);

            if (Log.IsDebugEnabled())
            {
                Log.Debug("Number of blocks allocated " + blocks.Count);
            }
            long[] oldLengths = new long[blocks.Count];
            int    tempLen;

            for (int i = 0; i < blocks.Count; i++)
            {
                Block b = blocks[i];
                if (Log.IsDebugEnabled())
                {
                    Log.Debug("Block " + b.GetBlockName() + " before\t" + "Size " + b.GetNumBytes());
                }
                oldLengths[i] = b.GetNumBytes();
                if (Log.IsDebugEnabled())
                {
                    Log.Debug("Setting new length");
                }
                tempLen = rand.Next(BlockSize);
                b.Set(b.GetBlockId(), tempLen, b.GetGenerationStamp());
                if (Log.IsDebugEnabled())
                {
                    Log.Debug("Block " + b.GetBlockName() + " after\t " + "Size " + b.GetNumBytes());
                }
            }
            // all blocks belong to the same file, hence same BP
            DataNode             dn     = cluster.GetDataNodes()[DnN0];
            string               poolId = cluster.GetNamesystem().GetBlockPoolId();
            DatanodeRegistration dnR    = dn.GetDNRegistrationForBP(poolId);

            StorageBlockReport[] reports = GetBlockReports(dn, poolId, false, false);
            SendBlockReports(dnR, poolId, reports);
            IList <LocatedBlock> blocksAfterReport = DFSTestUtil.GetAllBlocks(fs.Open(filePath
                                                                                      ));

            if (Log.IsDebugEnabled())
            {
                Log.Debug("After mods: Number of blocks allocated " + blocksAfterReport.Count);
            }
            for (int i_1 = 0; i_1 < blocksAfterReport.Count; i_1++)
            {
                ExtendedBlock b = blocksAfterReport[i_1].GetBlock();
                NUnit.Framework.Assert.AreEqual("Length of " + i_1 + "th block is incorrect", oldLengths
                                                [i_1], b.GetNumBytes());
            }
        }
Example #3
0
        /// <summary>Test to verify the race between finalizeBlock and Lease recovery</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestRaceBetweenReplicaRecoveryAndFinalizeBlock()
        {
            TearDown();
            // Stop the Mocked DN started in startup()
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsDatanodeXceiverStopTimeoutMillisKey, "1000");
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            try
            {
                cluster.WaitClusterUp();
                DistributedFileSystem fs = cluster.GetFileSystem();
                Path path = new Path("/test");
                FSDataOutputStream @out = fs.Create(path);
                @out.WriteBytes("data");
                @out.Hsync();
                IList <LocatedBlock> blocks             = DFSTestUtil.GetAllBlocks(fs.Open(path));
                LocatedBlock         block              = blocks[0];
                DataNode             dataNode           = cluster.GetDataNodes()[0];
                AtomicBoolean        recoveryInitResult = new AtomicBoolean(true);
                Sharpen.Thread       recoveryThread     = new _Thread_612(block, dataNode, recoveryInitResult
                                                                          );
                recoveryThread.Start();
                try
                {
                    @out.Close();
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue("Writing should fail", e.Message.Contains("are bad. Aborting..."
                                                                                            ));
                }
                finally
                {
                    recoveryThread.Join();
                }
                NUnit.Framework.Assert.IsTrue("Recovery should be initiated successfully", recoveryInitResult
                                              .Get());
                dataNode.UpdateReplicaUnderRecovery(block.GetBlock(), block.GetBlock().GetGenerationStamp
                                                        () + 1, block.GetBlock().GetBlockId(), block.GetBlockSize());
            }
            finally
            {
                if (null != cluster)
                {
                    cluster.Shutdown();
                    cluster = null;
                }
            }
        }
        public virtual void TestTriggerBlockIdCollision()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            try
            {
                cluster.WaitActive();
                FileSystem   fs         = cluster.GetFileSystem();
                FSNamesystem fsn        = cluster.GetNamesystem();
                int          blockCount = 10;
                // Create a file with a few blocks to rev up the global block ID
                // counter.
                Path path1 = new Path("testBlockIdCollisionDetection_file1.dat");
                DFSTestUtil.CreateFile(fs, path1, IoSize, BlockSize * blockCount, BlockSize, Replication
                                       , Seed);
                IList <LocatedBlock> blocks1 = DFSTestUtil.GetAllBlocks(fs, path1);
                // Rewind the block ID counter in the name system object. This will result
                // in block ID collisions when we try to allocate new blocks.
                SequentialBlockIdGenerator blockIdGenerator = fsn.GetBlockIdManager().GetBlockIdGenerator
                                                                  ();
                blockIdGenerator.SetCurrentValue(blockIdGenerator.GetCurrentValue() - 5);
                // Trigger collisions by creating a new file.
                Path path2 = new Path("testBlockIdCollisionDetection_file2.dat");
                DFSTestUtil.CreateFile(fs, path2, IoSize, BlockSize * blockCount, BlockSize, Replication
                                       , Seed);
                IList <LocatedBlock> blocks2 = DFSTestUtil.GetAllBlocks(fs, path2);
                Assert.AssertThat(blocks2.Count, CoreMatchers.Is(blockCount));
                // Make sure that file2 block IDs start immediately after file1
                Assert.AssertThat(blocks2[0].GetBlock().GetBlockId(), CoreMatchers.Is(blocks1[9].
                                                                                      GetBlock().GetBlockId() + 1));
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Example #5
0
        public virtual void TestEncryptedAppendRequiringBlockTransfer()
        {
            MiniDFSCluster cluster = null;

            try
            {
                Configuration conf = new Configuration();
                SetEncryptionConfigKeys(conf);
                // start up 4 DNs
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(4).Build();
                FileSystem fs = GetFileSystem(conf);
                // Create a file with replication 3, so its block is on 3 / 4 DNs.
                WriteTestDataToFile(fs);
                NUnit.Framework.Assert.AreEqual(PlainText, DFSTestUtil.ReadFile(fs, TestPath));
                // Shut down one of the DNs holding a block replica.
                FSDataInputStream    @in           = fs.Open(TestPath);
                IList <LocatedBlock> locatedBlocks = DFSTestUtil.GetAllBlocks(@in);
                @in.Close();
                NUnit.Framework.Assert.AreEqual(1, locatedBlocks.Count);
                NUnit.Framework.Assert.AreEqual(3, locatedBlocks[0].GetLocations().Length);
                DataNode dn = cluster.GetDataNode(locatedBlocks[0].GetLocations()[0].GetIpcPort()
                                                  );
                dn.Shutdown();
                // Reopen the file for append, which will need to add another DN to the
                // pipeline and in doing so trigger a block transfer.
                WriteTestDataToFile(fs);
                NUnit.Framework.Assert.AreEqual(PlainText + PlainText, DFSTestUtil.ReadFile(fs, TestPath
                                                                                            ));
                fs.Close();
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Example #6
0
        public virtual void TestGetNewStamp()
        {
            int            numDataNodes = 1;
            Configuration  conf         = new HdfsConfiguration();
            MiniDFSCluster cluster      = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes
                                                                                        ).Build();

            try
            {
                cluster.WaitActive();
                FileSystem        fileSys  = cluster.GetFileSystem();
                NamenodeProtocols namenode = cluster.GetNameNodeRpc();
                /* Test writing to finalized replicas */
                Path file = new Path("dataprotocol.dat");
                DFSTestUtil.CreateFile(fileSys, file, 1L, (short)numDataNodes, 0L);
                // get the first blockid for the file
                ExtendedBlock firstBlock = DFSTestUtil.GetFirstBlock(fileSys, file);
                // test getNewStampAndToken on a finalized block
                try
                {
                    namenode.UpdateBlockForPipeline(firstBlock, string.Empty);
                    NUnit.Framework.Assert.Fail("Can not get a new GS from a finalized block");
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue(e.Message.Contains("is not under Construction"));
                }
                // test getNewStampAndToken on a non-existent block
                try
                {
                    long          newBlockId = firstBlock.GetBlockId() + 1;
                    ExtendedBlock newBlock   = new ExtendedBlock(firstBlock.GetBlockPoolId(), newBlockId
                                                                 , 0, firstBlock.GetGenerationStamp());
                    namenode.UpdateBlockForPipeline(newBlock, string.Empty);
                    NUnit.Framework.Assert.Fail("Cannot get a new GS from a non-existent block");
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue(e.Message.Contains("does not exist"));
                }
                /* Test RBW replicas */
                // change first block to a RBW
                DFSOutputStream @out = null;
                try
                {
                    @out = (DFSOutputStream)(fileSys.Append(file).GetWrappedStream());
                    @out.Write(1);
                    @out.Hflush();
                    FSDataInputStream @in = null;
                    try
                    {
                        @in        = fileSys.Open(file);
                        firstBlock = DFSTestUtil.GetAllBlocks(@in)[0].GetBlock();
                    }
                    finally
                    {
                        IOUtils.CloseStream(@in);
                    }
                    // test non-lease holder
                    DFSClient dfs = ((DistributedFileSystem)fileSys).dfs;
                    try
                    {
                        namenode.UpdateBlockForPipeline(firstBlock, "test" + dfs.clientName);
                        NUnit.Framework.Assert.Fail("Cannot get a new GS for a non lease holder");
                    }
                    catch (LeaseExpiredException e)
                    {
                        NUnit.Framework.Assert.IsTrue(e.Message.StartsWith("Lease mismatch"));
                    }
                    // test null lease holder
                    try
                    {
                        namenode.UpdateBlockForPipeline(firstBlock, null);
                        NUnit.Framework.Assert.Fail("Cannot get a new GS for a null lease holder");
                    }
                    catch (LeaseExpiredException e)
                    {
                        NUnit.Framework.Assert.IsTrue(e.Message.StartsWith("Lease mismatch"));
                    }
                    // test getNewStampAndToken on a rbw block
                    namenode.UpdateBlockForPipeline(firstBlock, dfs.clientName);
                }
                finally
                {
                    IOUtils.CloseStream(@out);
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        public virtual void TestUpdatePipelineAfterDelete()
        {
            Configuration  conf    = new HdfsConfiguration();
            Path           file    = new Path("/test-file");
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();

            try
            {
                FileSystem        fs       = cluster.GetFileSystem();
                NamenodeProtocols namenode = cluster.GetNameNodeRpc();
                DFSOutputStream   @out     = null;
                try
                {
                    // Create a file and make sure a block is allocated for it.
                    @out = (DFSOutputStream)(fs.Create(file).GetWrappedStream());
                    @out.Write(1);
                    @out.Hflush();
                    // Create a snapshot that includes the file.
                    SnapshotTestHelper.CreateSnapshot((DistributedFileSystem)fs, new Path("/"), "s1");
                    // Grab the block info of this file for later use.
                    FSDataInputStream @in      = null;
                    ExtendedBlock     oldBlock = null;
                    try
                    {
                        @in      = fs.Open(file);
                        oldBlock = DFSTestUtil.GetAllBlocks(@in)[0].GetBlock();
                    }
                    finally
                    {
                        IOUtils.CloseStream(@in);
                    }
                    // Allocate a new block ID/gen stamp so we can simulate pipeline
                    // recovery.
                    string       clientName      = ((DistributedFileSystem)fs).GetClient().GetClientName();
                    LocatedBlock newLocatedBlock = namenode.UpdateBlockForPipeline(oldBlock, clientName
                                                                                   );
                    ExtendedBlock newBlock = new ExtendedBlock(oldBlock.GetBlockPoolId(), oldBlock.GetBlockId
                                                                   (), oldBlock.GetNumBytes(), newLocatedBlock.GetBlock().GetGenerationStamp());
                    // Delete the file from the present FS. It will still exist the
                    // previously-created snapshot. This will log an OP_DELETE for the
                    // file in question.
                    fs.Delete(file, true);
                    // Simulate a pipeline recovery, wherein a new block is allocated
                    // for the existing block, resulting in an OP_UPDATE_BLOCKS being
                    // logged for the file in question.
                    try
                    {
                        namenode.UpdatePipeline(clientName, oldBlock, newBlock, newLocatedBlock.GetLocations
                                                    (), newLocatedBlock.GetStorageIDs());
                    }
                    catch (IOException ioe)
                    {
                        // normal
                        GenericTestUtils.AssertExceptionContains("does not exist or it is not under construction"
                                                                 , ioe);
                    }
                    // Make sure the NN can restart with the edit logs as we have them now.
                    cluster.RestartNameNode(true);
                }
                finally
                {
                    IOUtils.CloseStream(@out);
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Example #8
0
        public virtual void TestOpWrite()
        {
            int            numDataNodes = 1;
            long           BlockIdFudge = 128;
            Configuration  conf         = new HdfsConfiguration();
            MiniDFSCluster cluster      = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes
                                                                                        ).Build();

            try
            {
                cluster.WaitActive();
                string poolId = cluster.GetNamesystem().GetBlockPoolId();
                datanode = DataNodeTestUtils.GetDNRegistrationForBP(cluster.GetDataNodes()[0], poolId
                                                                    );
                dnAddr = NetUtils.CreateSocketAddr(datanode.GetXferAddr());
                FileSystem fileSys = cluster.GetFileSystem();
                /* Test writing to finalized replicas */
                Path file = new Path("dataprotocol.dat");
                DFSTestUtil.CreateFile(fileSys, file, 1L, (short)numDataNodes, 0L);
                // get the first blockid for the file
                ExtendedBlock firstBlock = DFSTestUtil.GetFirstBlock(fileSys, file);
                // test PIPELINE_SETUP_CREATE on a finalized block
                TestWrite(firstBlock, BlockConstructionStage.PipelineSetupCreate, 0L, "Cannot create an existing block"
                          , true);
                // test PIPELINE_DATA_STREAMING on a finalized block
                TestWrite(firstBlock, BlockConstructionStage.DataStreaming, 0L, "Unexpected stage"
                          , true);
                // test PIPELINE_SETUP_STREAMING_RECOVERY on an existing block
                long newGS = firstBlock.GetGenerationStamp() + 1;
                TestWrite(firstBlock, BlockConstructionStage.PipelineSetupStreamingRecovery, newGS
                          , "Cannot recover data streaming to a finalized replica", true);
                // test PIPELINE_SETUP_APPEND on an existing block
                newGS = firstBlock.GetGenerationStamp() + 1;
                TestWrite(firstBlock, BlockConstructionStage.PipelineSetupAppend, newGS, "Append to a finalized replica"
                          , false);
                firstBlock.SetGenerationStamp(newGS);
                // test PIPELINE_SETUP_APPEND_RECOVERY on an existing block
                file = new Path("dataprotocol1.dat");
                DFSTestUtil.CreateFile(fileSys, file, 1L, (short)numDataNodes, 0L);
                firstBlock = DFSTestUtil.GetFirstBlock(fileSys, file);
                newGS      = firstBlock.GetGenerationStamp() + 1;
                TestWrite(firstBlock, BlockConstructionStage.PipelineSetupAppendRecovery, newGS,
                          "Recover appending to a finalized replica", false);
                // test PIPELINE_CLOSE_RECOVERY on an existing block
                file = new Path("dataprotocol2.dat");
                DFSTestUtil.CreateFile(fileSys, file, 1L, (short)numDataNodes, 0L);
                firstBlock = DFSTestUtil.GetFirstBlock(fileSys, file);
                newGS      = firstBlock.GetGenerationStamp() + 1;
                TestWrite(firstBlock, BlockConstructionStage.PipelineCloseRecovery, newGS, "Recover failed close to a finalized replica"
                          , false);
                firstBlock.SetGenerationStamp(newGS);
                // Test writing to a new block. Don't choose the next sequential
                // block ID to avoid conflicting with IDs chosen by the NN.
                long          newBlockId = firstBlock.GetBlockId() + BlockIdFudge;
                ExtendedBlock newBlock   = new ExtendedBlock(firstBlock.GetBlockPoolId(), newBlockId
                                                             , 0, firstBlock.GetGenerationStamp());
                // test PIPELINE_SETUP_CREATE on a new block
                TestWrite(newBlock, BlockConstructionStage.PipelineSetupCreate, 0L, "Create a new block"
                          , false);
                // test PIPELINE_SETUP_STREAMING_RECOVERY on a new block
                newGS = newBlock.GetGenerationStamp() + 1;
                newBlock.SetBlockId(newBlock.GetBlockId() + 1);
                TestWrite(newBlock, BlockConstructionStage.PipelineSetupStreamingRecovery, newGS,
                          "Recover a new block", true);
                // test PIPELINE_SETUP_APPEND on a new block
                newGS = newBlock.GetGenerationStamp() + 1;
                TestWrite(newBlock, BlockConstructionStage.PipelineSetupAppend, newGS, "Cannot append to a new block"
                          , true);
                // test PIPELINE_SETUP_APPEND_RECOVERY on a new block
                newBlock.SetBlockId(newBlock.GetBlockId() + 1);
                newGS = newBlock.GetGenerationStamp() + 1;
                TestWrite(newBlock, BlockConstructionStage.PipelineSetupAppendRecovery, newGS, "Cannot append to a new block"
                          , true);
                /* Test writing to RBW replicas */
                Path file1 = new Path("dataprotocol1.dat");
                DFSTestUtil.CreateFile(fileSys, file1, 1L, (short)numDataNodes, 0L);
                DFSOutputStream @out = (DFSOutputStream)(fileSys.Append(file1).GetWrappedStream()
                                                         );
                @out.Write(1);
                @out.Hflush();
                FSDataInputStream @in = fileSys.Open(file1);
                firstBlock = DFSTestUtil.GetAllBlocks(@in)[0].GetBlock();
                firstBlock.SetNumBytes(2L);
                try
                {
                    // test PIPELINE_SETUP_CREATE on a RBW block
                    TestWrite(firstBlock, BlockConstructionStage.PipelineSetupCreate, 0L, "Cannot create a RBW block"
                              , true);
                    // test PIPELINE_SETUP_APPEND on an existing block
                    newGS = firstBlock.GetGenerationStamp() + 1;
                    TestWrite(firstBlock, BlockConstructionStage.PipelineSetupAppend, newGS, "Cannot append to a RBW replica"
                              , true);
                    // test PIPELINE_SETUP_APPEND on an existing block
                    TestWrite(firstBlock, BlockConstructionStage.PipelineSetupAppendRecovery, newGS,
                              "Recover append to a RBW replica", false);
                    firstBlock.SetGenerationStamp(newGS);
                    // test PIPELINE_SETUP_STREAMING_RECOVERY on a RBW block
                    file = new Path("dataprotocol2.dat");
                    DFSTestUtil.CreateFile(fileSys, file, 1L, (short)numDataNodes, 0L);
                    @out = (DFSOutputStream)(fileSys.Append(file).GetWrappedStream());
                    @out.Write(1);
                    @out.Hflush();
                    @in        = fileSys.Open(file);
                    firstBlock = DFSTestUtil.GetAllBlocks(@in)[0].GetBlock();
                    firstBlock.SetNumBytes(2L);
                    newGS = firstBlock.GetGenerationStamp() + 1;
                    TestWrite(firstBlock, BlockConstructionStage.PipelineSetupStreamingRecovery, newGS
                              , "Recover a RBW replica", false);
                }
                finally
                {
                    IOUtils.CloseStream(@in);
                    IOUtils.CloseStream(@out);
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        public virtual void TestRead()
        {
            MiniDFSCluster cluster      = null;
            int            numDataNodes = 2;
            Configuration  conf         = GetConf(numDataNodes);

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).Build();
                cluster.WaitActive();
                NUnit.Framework.Assert.AreEqual(numDataNodes, cluster.GetDataNodes().Count);
                NameNode                nn      = cluster.GetNameNode();
                NamenodeProtocols       nnProto = nn.GetRpcServer();
                BlockManager            bm      = nn.GetNamesystem().GetBlockManager();
                BlockTokenSecretManager sm      = bm.GetBlockTokenSecretManager();
                // set a short token lifetime (1 second) initially
                SecurityTestUtil.SetBlockTokenLifetime(sm, 1000L);
                Path       fileToRead = new Path(FileToRead);
                FileSystem fs         = cluster.GetFileSystem();
                CreateFile(fs, fileToRead);

                /*
                 * setup for testing expiration handling of cached tokens
                 */
                // read using blockSeekTo(). Acquired tokens are cached in in1
                FSDataInputStream in1 = fs.Open(fileToRead);
                NUnit.Framework.Assert.IsTrue(CheckFile1(in1));
                // read using blockSeekTo(). Acquired tokens are cached in in2
                FSDataInputStream in2 = fs.Open(fileToRead);
                NUnit.Framework.Assert.IsTrue(CheckFile1(in2));
                // read using fetchBlockByteRange(). Acquired tokens are cached in in3
                FSDataInputStream in3 = fs.Open(fileToRead);
                NUnit.Framework.Assert.IsTrue(CheckFile2(in3));

                /*
                 * testing READ interface on DN using a BlockReader
                 */
                DFSClient client = null;
                try
                {
                    client = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()), conf
                                           );
                }
                finally
                {
                    if (client != null)
                    {
                        client.Close();
                    }
                }
                IList <LocatedBlock> locatedBlocks = nnProto.GetBlockLocations(FileToRead, 0, FileSize
                                                                               ).GetLocatedBlocks();
                LocatedBlock lblock = locatedBlocks[0];
                // first block
                Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> myToken = lblock.GetBlockToken
                                                                                            ();
                // verify token is not expired
                NUnit.Framework.Assert.IsFalse(SecurityTestUtil.IsBlockTokenExpired(myToken));
                // read with valid token, should succeed
                TryRead(conf, lblock, true);

                /*
                 * wait till myToken and all cached tokens in in1, in2 and in3 expire
                 */
                while (!SecurityTestUtil.IsBlockTokenExpired(myToken))
                {
                    try
                    {
                        Sharpen.Thread.Sleep(10);
                    }
                    catch (Exception)
                    {
                    }
                }

                /*
                 * continue testing READ interface on DN using a BlockReader
                 */
                // verify token is expired
                NUnit.Framework.Assert.IsTrue(SecurityTestUtil.IsBlockTokenExpired(myToken));
                // read should fail
                TryRead(conf, lblock, false);
                // use a valid new token
                lblock.SetBlockToken(sm.GenerateToken(lblock.GetBlock(), EnumSet.Of(BlockTokenSecretManager.AccessMode
                                                                                    .Read)));
                // read should succeed
                TryRead(conf, lblock, true);
                // use a token with wrong blockID
                ExtendedBlock wrongBlock = new ExtendedBlock(lblock.GetBlock().GetBlockPoolId(),
                                                             lblock.GetBlock().GetBlockId() + 1);
                lblock.SetBlockToken(sm.GenerateToken(wrongBlock, EnumSet.Of(BlockTokenSecretManager.AccessMode
                                                                             .Read)));
                // read should fail
                TryRead(conf, lblock, false);
                // use a token with wrong access modes
                lblock.SetBlockToken(sm.GenerateToken(lblock.GetBlock(), EnumSet.Of(BlockTokenSecretManager.AccessMode
                                                                                    .Write, BlockTokenSecretManager.AccessMode.Copy, BlockTokenSecretManager.AccessMode
                                                                                    .Replace)));
                // read should fail
                TryRead(conf, lblock, false);
                // set a long token lifetime for future tokens
                SecurityTestUtil.SetBlockTokenLifetime(sm, 600 * 1000L);

                /*
                 * testing that when cached tokens are expired, DFSClient will re-fetch
                 * tokens transparently for READ.
                 */
                // confirm all tokens cached in in1 are expired by now
                IList <LocatedBlock> lblocks = DFSTestUtil.GetAllBlocks(in1);
                foreach (LocatedBlock blk in lblocks)
                {
                    NUnit.Framework.Assert.IsTrue(SecurityTestUtil.IsBlockTokenExpired(blk.GetBlockToken
                                                                                           ()));
                }
                // verify blockSeekTo() is able to re-fetch token transparently
                in1.Seek(0);
                NUnit.Framework.Assert.IsTrue(CheckFile1(in1));
                // confirm all tokens cached in in2 are expired by now
                IList <LocatedBlock> lblocks2 = DFSTestUtil.GetAllBlocks(in2);
                foreach (LocatedBlock blk_1 in lblocks2)
                {
                    NUnit.Framework.Assert.IsTrue(SecurityTestUtil.IsBlockTokenExpired(blk_1.GetBlockToken
                                                                                           ()));
                }
                // verify blockSeekTo() is able to re-fetch token transparently (testing
                // via another interface method)
                NUnit.Framework.Assert.IsTrue(in2.SeekToNewSource(0));
                NUnit.Framework.Assert.IsTrue(CheckFile1(in2));
                // confirm all tokens cached in in3 are expired by now
                IList <LocatedBlock> lblocks3 = DFSTestUtil.GetAllBlocks(in3);
                foreach (LocatedBlock blk_2 in lblocks3)
                {
                    NUnit.Framework.Assert.IsTrue(SecurityTestUtil.IsBlockTokenExpired(blk_2.GetBlockToken
                                                                                           ()));
                }
                // verify fetchBlockByteRange() is able to re-fetch token transparently
                NUnit.Framework.Assert.IsTrue(CheckFile2(in3));

                /*
                 * testing that after datanodes are restarted on the same ports, cached
                 * tokens should still work and there is no need to fetch new tokens from
                 * namenode. This test should run while namenode is down (to make sure no
                 * new tokens can be fetched from namenode).
                 */
                // restart datanodes on the same ports that they currently use
                NUnit.Framework.Assert.IsTrue(cluster.RestartDataNodes(true));
                cluster.WaitActive();
                NUnit.Framework.Assert.AreEqual(numDataNodes, cluster.GetDataNodes().Count);
                cluster.ShutdownNameNode(0);
                // confirm tokens cached in in1 are still valid
                lblocks = DFSTestUtil.GetAllBlocks(in1);
                foreach (LocatedBlock blk_3 in lblocks)
                {
                    NUnit.Framework.Assert.IsFalse(SecurityTestUtil.IsBlockTokenExpired(blk_3.GetBlockToken
                                                                                            ()));
                }
                // verify blockSeekTo() still works (forced to use cached tokens)
                in1.Seek(0);
                NUnit.Framework.Assert.IsTrue(CheckFile1(in1));
                // confirm tokens cached in in2 are still valid
                lblocks2 = DFSTestUtil.GetAllBlocks(in2);
                foreach (LocatedBlock blk_4 in lblocks2)
                {
                    NUnit.Framework.Assert.IsFalse(SecurityTestUtil.IsBlockTokenExpired(blk_4.GetBlockToken
                                                                                            ()));
                }
                // verify blockSeekTo() still works (forced to use cached tokens)
                in2.SeekToNewSource(0);
                NUnit.Framework.Assert.IsTrue(CheckFile1(in2));
                // confirm tokens cached in in3 are still valid
                lblocks3 = DFSTestUtil.GetAllBlocks(in3);
                foreach (LocatedBlock blk_5 in lblocks3)
                {
                    NUnit.Framework.Assert.IsFalse(SecurityTestUtil.IsBlockTokenExpired(blk_5.GetBlockToken
                                                                                            ()));
                }
                // verify fetchBlockByteRange() still works (forced to use cached tokens)
                NUnit.Framework.Assert.IsTrue(CheckFile2(in3));

                /*
                 * testing that when namenode is restarted, cached tokens should still
                 * work and there is no need to fetch new tokens from namenode. Like the
                 * previous test, this test should also run while namenode is down. The
                 * setup for this test depends on the previous test.
                 */
                // restart the namenode and then shut it down for test
                cluster.RestartNameNode(0);
                cluster.ShutdownNameNode(0);
                // verify blockSeekTo() still works (forced to use cached tokens)
                in1.Seek(0);
                NUnit.Framework.Assert.IsTrue(CheckFile1(in1));
                // verify again blockSeekTo() still works (forced to use cached tokens)
                in2.SeekToNewSource(0);
                NUnit.Framework.Assert.IsTrue(CheckFile1(in2));
                // verify fetchBlockByteRange() still works (forced to use cached tokens)
                NUnit.Framework.Assert.IsTrue(CheckFile2(in3));

                /*
                 * testing that after both namenode and datanodes got restarted (namenode
                 * first, followed by datanodes), DFSClient can't access DN without
                 * re-fetching tokens and is able to re-fetch tokens transparently. The
                 * setup of this test depends on the previous test.
                 */
                // restore the cluster and restart the datanodes for test
                cluster.RestartNameNode(0);
                NUnit.Framework.Assert.IsTrue(cluster.RestartDataNodes(true));
                cluster.WaitActive();
                NUnit.Framework.Assert.AreEqual(numDataNodes, cluster.GetDataNodes().Count);
                // shutdown namenode so that DFSClient can't get new tokens from namenode
                cluster.ShutdownNameNode(0);
                // verify blockSeekTo() fails (cached tokens become invalid)
                in1.Seek(0);
                NUnit.Framework.Assert.IsFalse(CheckFile1(in1));
                // verify fetchBlockByteRange() fails (cached tokens become invalid)
                NUnit.Framework.Assert.IsFalse(CheckFile2(in3));
                // restart the namenode to allow DFSClient to re-fetch tokens
                cluster.RestartNameNode(0);
                // verify blockSeekTo() works again (by transparently re-fetching
                // tokens from namenode)
                in1.Seek(0);
                NUnit.Framework.Assert.IsTrue(CheckFile1(in1));
                in2.SeekToNewSource(0);
                NUnit.Framework.Assert.IsTrue(CheckFile1(in2));
                // verify fetchBlockByteRange() works again (by transparently
                // re-fetching tokens from namenode)
                NUnit.Framework.Assert.IsTrue(CheckFile2(in3));

                /*
                 * testing that when datanodes are restarted on different ports, DFSClient
                 * is able to re-fetch tokens transparently to connect to them
                 */
                // restart datanodes on newly assigned ports
                NUnit.Framework.Assert.IsTrue(cluster.RestartDataNodes(false));
                cluster.WaitActive();
                NUnit.Framework.Assert.AreEqual(numDataNodes, cluster.GetDataNodes().Count);
                // verify blockSeekTo() is able to re-fetch token transparently
                in1.Seek(0);
                NUnit.Framework.Assert.IsTrue(CheckFile1(in1));
                // verify blockSeekTo() is able to re-fetch token transparently
                in2.SeekToNewSource(0);
                NUnit.Framework.Assert.IsTrue(CheckFile1(in2));
                // verify fetchBlockByteRange() is able to re-fetch token transparently
                NUnit.Framework.Assert.IsTrue(CheckFile2(in3));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }