예제 #1
0
        /// <summary>
        /// Count datanodes that have copies of the blocks for a file
        /// put it into the map
        /// </summary>
        /// <param name="map"/>
        /// <param name="path"/>
        /// <param name="size"/>
        /// <returns/>
        /// <exception cref="System.IO.IOException"/>
        private int CountNNBlocks(IDictionary <string, TestDataNodeVolumeFailure.BlockLocs
                                               > map, string path, long size)
        {
            int total = 0;
            NamenodeProtocols    nn            = cluster.GetNameNodeRpc();
            IList <LocatedBlock> locatedBlocks = nn.GetBlockLocations(path, 0, size).GetLocatedBlocks
                                                     ();

            //System.out.println("Number of blocks: " + locatedBlocks.size());
            foreach (LocatedBlock lb in locatedBlocks)
            {
                string blockId = string.Empty + lb.GetBlock().GetBlockId();
                //System.out.print(blockId + ": ");
                DatanodeInfo[] dn_locs = lb.GetLocations();
                TestDataNodeVolumeFailure.BlockLocs bl = map[blockId];
                if (bl == null)
                {
                    bl = new TestDataNodeVolumeFailure.BlockLocs(this);
                }
                //System.out.print(dn_info.name+",");
                total       += dn_locs.Length;
                bl.num_locs += dn_locs.Length;
                map[blockId] = bl;
            }
            //System.out.println();
            return(total);
        }
예제 #2
0
        public virtual void TestRaceWhileNNStartup()
        {
            MiniDFSCluster cluster = null;
            Configuration  conf    = WebHdfsTestUtil.CreateConf();

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                cluster.WaitActive();
                NameNode          namenode  = cluster.GetNameNode();
                NamenodeProtocols rpcServer = namenode.GetRpcServer();
                Whitebox.SetInternalState(namenode, "rpcServer", null);
                Path       foo     = new Path("/foo");
                FileSystem webHdfs = WebHdfsTestUtil.GetWebHdfsFileSystem(conf, WebHdfsFileSystem
                                                                          .Scheme);
                try
                {
                    webHdfs.Mkdirs(foo);
                    NUnit.Framework.Assert.Fail("Expected RetriableException");
                }
                catch (RetriableException e)
                {
                    GenericTestUtils.AssertExceptionContains("Namenode is in startup mode", e);
                }
                Whitebox.SetInternalState(namenode, "rpcServer", rpcServer);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #3
0
        public virtual void TestAddBlockRetryShouldReturnBlockWithLocations()
        {
            string            src         = "/testAddBlockRetryShouldReturnBlockWithLocations";
            NamenodeProtocols nameNodeRpc = cluster.GetNameNodeRpc();

            // create file
            nameNodeRpc.Create(src, FsPermission.GetFileDefault(), "clientName", new EnumSetWritable
                               <CreateFlag>(EnumSet.Of(CreateFlag.Create)), true, (short)3, 1024, null);
            // start first addBlock()
            Log.Info("Starting first addBlock for " + src);
            LocatedBlock lb1 = nameNodeRpc.AddBlock(src, "clientName", null, null, INodeId.GrandfatherInodeId
                                                    , null);

            NUnit.Framework.Assert.IsTrue("Block locations should be present", lb1.GetLocations
                                              ().Length > 0);
            cluster.RestartNameNode();
            nameNodeRpc = cluster.GetNameNodeRpc();
            LocatedBlock lb2 = nameNodeRpc.AddBlock(src, "clientName", null, null, INodeId.GrandfatherInodeId
                                                    , null);

            NUnit.Framework.Assert.AreEqual("Blocks are not equal", lb1.GetBlock(), lb2.GetBlock
                                                ());
            NUnit.Framework.Assert.IsTrue("Wrong locations with retry", lb2.GetLocations().Length
                                          > 0);
        }
        public virtual void TestOpenFilesWithRename()
        {
            Path path = new Path("/test");

            DoWriteAndAbort(fs, path);
            // check for zero sized blocks
            Path fileWithEmptyBlock = new Path("/test/test/test4");

            fs.Create(fileWithEmptyBlock);
            NamenodeProtocols nameNodeRpc = cluster.GetNameNodeRpc();
            string            clientName  = fs.GetClient().GetClientName();

            // create one empty block
            nameNodeRpc.AddBlock(fileWithEmptyBlock.ToString(), clientName, null, null, INodeId
                                 .GrandfatherInodeId, null);
            fs.CreateSnapshot(path, "s2");
            fs.Rename(new Path("/test/test"), new Path("/test/test-renamed"));
            fs.Delete(new Path("/test/test-renamed"), true);
            NameNode nameNode = cluster.GetNameNode();

            NameNodeAdapter.EnterSafeMode(nameNode, false);
            NameNodeAdapter.SaveNamespace(nameNode);
            NameNodeAdapter.LeaveSafeMode(nameNode);
            cluster.RestartNameNode(true);
        }
예제 #5
0
        public virtual void TestGetBlockLocations()
        {
            NamenodeProtocols namenode = cluster.GetNameNodeRpc();
            Path               p       = new Path(BaseDir, "file2.dat");
            string             src     = p.ToString();
            FSDataOutputStream @out    = TestFileCreation.CreateFile(hdfs, p, 3);
            // write a half block
            int len = (int)(((uint)BlockSize) >> 1);

            WriteFile(p, @out, len);
            for (int i = 1; i < NumBlocks;)
            {
                // verify consistency
                LocatedBlocks        lb     = namenode.GetBlockLocations(src, 0, len);
                IList <LocatedBlock> blocks = lb.GetLocatedBlocks();
                NUnit.Framework.Assert.AreEqual(i, blocks.Count);
                Block b = blocks[blocks.Count - 1].GetBlock().GetLocalBlock();
                NUnit.Framework.Assert.IsTrue(b is BlockInfoContiguousUnderConstruction);
                if (++i < NumBlocks)
                {
                    // write one more block
                    WriteFile(p, @out, BlockSize);
                    len += BlockSize;
                }
            }
            // close file
            @out.Close();
        }
예제 #6
0
        public virtual void TestMkdirRpcNonCanonicalPath()
        {
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();

            try
            {
                NamenodeProtocols nnrpc = cluster.GetNameNodeRpc();
                foreach (string pathStr in NonCanonicalPaths)
                {
                    try
                    {
                        nnrpc.Mkdirs(pathStr, new FsPermission((short)0x1ed), true);
                        NUnit.Framework.Assert.Fail("Did not fail when called with a non-canonicalized path: "
                                                    + pathStr);
                    }
                    catch (InvalidPathException)
                    {
                    }
                }
            }
            finally
            {
                // expected
                cluster.Shutdown();
            }
        }
예제 #7
0
 /// <exception cref="System.IO.IOException"/>
 /// <exception cref="System.Exception"/>
 internal static string GetDelegationToken(NamenodeProtocols nn, HttpServletRequest
                                           request, Configuration conf, UserGroupInformation ugi)
 {
     Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token = ugi.DoAs
                                                                                    (new _PrivilegedExceptionAction_39(nn, ugi));
     return(token == null ? null : token.EncodeToUrlString());
 }
예제 #8
0
 /// <summary>
 /// Test case that stops a writer after finalizing a block but
 /// before calling completeFile, recovers a file from another writer,
 /// starts writing from that writer, and then has the old lease holder
 /// call completeFile
 /// </summary>
 /// <exception cref="System.Exception"/>
 public virtual void TestCompleteOtherLeaseHoldersFile()
 {
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(5).Build();
     try
     {
         cluster.WaitActive();
         NamenodeProtocols preSpyNN = cluster.GetNameNodeRpc();
         NamenodeProtocols spyNN    = Org.Mockito.Mockito.Spy(preSpyNN);
         // Delay completeFile
         GenericTestUtils.DelayAnswer delayer = new GenericTestUtils.DelayAnswer(Log);
         Org.Mockito.Mockito.DoAnswer(delayer).When(spyNN).Complete(Matchers.AnyString(),
                                                                    Matchers.AnyString(), (ExtendedBlock)Matchers.AnyObject(), Matchers.AnyLong());
         DFSClient client = new DFSClient(null, spyNN, conf, null);
         file1 = new Path("/testCompleteOtherLease");
         OutputStream stm = client.Create("/testCompleteOtherLease", true);
         // write 1/2 block
         AppendTestUtil.Write(stm, 0, 4096);
         AtomicReference <Exception> err = new AtomicReference <Exception>();
         Sharpen.Thread t = new _Thread_242(stm, err);
         t.Start();
         Log.Info("Waiting for close to get to latch...");
         delayer.WaitForCall();
         // At this point, the block is finalized on the DNs, but the file
         // has not been completed in the NN.
         // Lose the leases
         Log.Info("Killing lease checker");
         client.GetLeaseRenewer().InterruptAndJoin();
         FileSystem fs1 = cluster.GetFileSystem();
         FileSystem fs2 = AppendTestUtil.CreateHdfsWithDifferentUsername(fs1.GetConf());
         Log.Info("Recovering file");
         RecoverFile(fs2);
         Log.Info("Opening file for append from new fs");
         FSDataOutputStream appenderStream = fs2.Append(file1);
         Log.Info("Writing some data from new appender");
         AppendTestUtil.Write(appenderStream, 0, 4096);
         Log.Info("Telling old close to proceed.");
         delayer.Proceed();
         Log.Info("Waiting for close to finish.");
         t.Join();
         Log.Info("Close finished.");
         // We expect that close will get a "Lease mismatch"
         // error.
         Exception thrownByClose = err.Get();
         NUnit.Framework.Assert.IsNotNull(thrownByClose);
         NUnit.Framework.Assert.IsTrue(thrownByClose is IOException);
         if (!thrownByClose.Message.Contains("Lease mismatch"))
         {
             throw thrownByClose;
         }
         // The appender should be able to close properly
         appenderStream.Close();
     }
     finally
     {
         cluster.Shutdown();
     }
 }
예제 #9
0
        public virtual void TestEditLogRolling()
        {
            // start a cluster
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = null;
            FileSystem     fileSys = null;
            AtomicReference <Exception> caughtErr = new AtomicReference <Exception>();

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Build();
                cluster.WaitActive();
                fileSys = cluster.GetFileSystem();
                NamenodeProtocols        nn      = cluster.GetNameNode().GetRpcServer();
                FSImage                  fsimage = cluster.GetNamesystem().GetFSImage();
                Storage.StorageDirectory sd      = fsimage.GetStorage().GetStorageDir(0);
                StartTransactionWorkers(nn, caughtErr);
                long previousLogTxId = 1;
                for (int i = 0; i < NumRolls && caughtErr.Get() == null; i++)
                {
                    try
                    {
                        Sharpen.Thread.Sleep(20);
                    }
                    catch (Exception)
                    {
                    }
                    Log.Info("Starting roll " + i + ".");
                    CheckpointSignature sig = nn.RollEditLog();
                    long   nextLog          = sig.curSegmentTxId;
                    string logFileName      = NNStorage.GetFinalizedEditsFileName(previousLogTxId, nextLog
                                                                                  - 1);
                    previousLogTxId += VerifyEditLogs(cluster.GetNamesystem(), fsimage, logFileName,
                                                      previousLogTxId);
                    NUnit.Framework.Assert.AreEqual(previousLogTxId, nextLog);
                    FilePath expectedLog = NNStorage.GetInProgressEditsFile(sd, previousLogTxId);
                    NUnit.Framework.Assert.IsTrue("Expect " + expectedLog + " to exist", expectedLog.
                                                  Exists());
                }
            }
            finally
            {
                StopTransactionWorkers();
                if (caughtErr.Get() != null)
                {
                    throw new RuntimeException(caughtErr.Get());
                }
                if (fileSys != null)
                {
                    fileSys.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #10
0
 internal Transactions(NamenodeProtocols ns, AtomicReference <Exception> caught)
 {
     // This test creates NUM_THREADS threads and each thread continuously writes
     // transactions
     //
     // an object that does a bunch of transactions
     //
     nn          = ns;
     this.caught = caught;
 }
예제 #11
0
 public virtual void StartUpCluster()
 {
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(ReplFactor).Build();
     NUnit.Framework.Assert.IsNotNull("Failed Cluster Creation", cluster);
     cluster.WaitClusterUp();
     dfs = cluster.GetFileSystem();
     NUnit.Framework.Assert.IsNotNull("Failed to get FileSystem", dfs);
     nn = cluster.GetNameNodeRpc();
     NUnit.Framework.Assert.IsNotNull("Failed to get NameNode", nn);
 }
예제 #12
0
 public virtual void Setup()
 {
     conf = new HdfsConfiguration();
     conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
     conf.SetBoolean(DFSConfigKeys.DfsNamenodeEnableRetryCacheKey, true);
     conf.SetBoolean(DFSConfigKeys.DfsNamenodeAclsEnabledKey, true);
     cluster = new MiniDFSCluster.Builder(conf).Build();
     cluster.WaitActive();
     nnRpc      = cluster.GetNameNode().GetRpcServer();
     filesystem = cluster.GetFileSystem();
 }
예제 #13
0
        public virtual void TestRegistrationWithDifferentSoftwareVersionsDuringUpgrade()
        {
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsDatanodeMinSupportedNamenodeVersionKey, "1.0.0");
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                NamenodeProtocols rpcServer = cluster.GetNameNodeRpc();
                long        nnCTime         = cluster.GetNamesystem().GetFSImage().GetStorage().GetCTime();
                StorageInfo mockStorageInfo = Org.Mockito.Mockito.Mock <StorageInfo>();
                Org.Mockito.Mockito.DoReturn(nnCTime).When(mockStorageInfo).GetCTime();
                DatanodeRegistration mockDnReg = Org.Mockito.Mockito.Mock <DatanodeRegistration>();
                Org.Mockito.Mockito.DoReturn(HdfsConstants.DatanodeLayoutVersion).When(mockDnReg)
                .GetVersion();
                Org.Mockito.Mockito.DoReturn("fake-storage-id").When(mockDnReg).GetDatanodeUuid();
                Org.Mockito.Mockito.DoReturn(mockStorageInfo).When(mockDnReg).GetStorageInfo();
                // Should succeed when software versions are the same and CTimes are the
                // same.
                Org.Mockito.Mockito.DoReturn(VersionInfo.GetVersion()).When(mockDnReg).GetSoftwareVersion
                    ();
                Org.Mockito.Mockito.DoReturn("127.0.0.1").When(mockDnReg).GetIpAddr();
                Org.Mockito.Mockito.DoReturn(123).When(mockDnReg).GetXferPort();
                rpcServer.RegisterDatanode(mockDnReg);
                // Should succeed when software versions are the same and CTimes are
                // different.
                Org.Mockito.Mockito.DoReturn(nnCTime + 1).When(mockStorageInfo).GetCTime();
                rpcServer.RegisterDatanode(mockDnReg);
                // Should fail when software version of DN is different from NN and CTimes
                // are different.
                Org.Mockito.Mockito.DoReturn(VersionInfo.GetVersion() + ".1").When(mockDnReg).GetSoftwareVersion
                    ();
                try
                {
                    rpcServer.RegisterDatanode(mockDnReg);
                    NUnit.Framework.Assert.Fail("Should not have been able to register DN with different software"
                                                + " versions and CTimes");
                }
                catch (IncorrectVersionException ive)
                {
                    GenericTestUtils.AssertExceptionContains("does not match CTime of NN", ive);
                    Log.Info("Got expected exception", ive);
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #14
0
        /// <exception cref="System.IO.IOException"/>
        private void CheckNameSpace(Configuration conf)
        {
            NameNode          namenode = new NameNode(conf);
            NamenodeProtocols nnRpc    = namenode.GetRpcServer();

            NUnit.Framework.Assert.IsTrue(nnRpc.GetFileInfo("/test").IsDir());
            nnRpc.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter, false);
            nnRpc.SaveNamespace();
            namenode.Stop();
            namenode.Join();
        }
예제 #15
0
 private void StartTransactionWorkers(NamenodeProtocols namesystem, AtomicReference
                                      <Exception> caughtErr)
 {
     // Create threads and make them run transactions concurrently.
     for (int i = 0; i < NumThreads; i++)
     {
         TestEditLogRace.Transactions trans = new TestEditLogRace.Transactions(namesystem,
                                                                               caughtErr);
         new Sharpen.Thread(trans, "TransactionThread-" + i).Start();
         workers.AddItem(trans);
     }
 }
예제 #16
0
        public virtual void TestRegistrationWithDifferentSoftwareVersions()
        {
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsDatanodeMinSupportedNamenodeVersionKey, "3.0.0");
            conf.Set(DFSConfigKeys.DfsNamenodeMinSupportedDatanodeVersionKey, "3.0.0");
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                NamenodeProtocols rpcServer = cluster.GetNameNodeRpc();
                long        nnCTime         = cluster.GetNamesystem().GetFSImage().GetStorage().GetCTime();
                StorageInfo mockStorageInfo = Org.Mockito.Mockito.Mock <StorageInfo>();
                Org.Mockito.Mockito.DoReturn(nnCTime).When(mockStorageInfo).GetCTime();
                DatanodeRegistration mockDnReg = Org.Mockito.Mockito.Mock <DatanodeRegistration>();
                Org.Mockito.Mockito.DoReturn(HdfsConstants.DatanodeLayoutVersion).When(mockDnReg)
                .GetVersion();
                Org.Mockito.Mockito.DoReturn("127.0.0.1").When(mockDnReg).GetIpAddr();
                Org.Mockito.Mockito.DoReturn(123).When(mockDnReg).GetXferPort();
                Org.Mockito.Mockito.DoReturn("fake-storage-id").When(mockDnReg).GetDatanodeUuid();
                Org.Mockito.Mockito.DoReturn(mockStorageInfo).When(mockDnReg).GetStorageInfo();
                // Should succeed when software versions are the same.
                Org.Mockito.Mockito.DoReturn("3.0.0").When(mockDnReg).GetSoftwareVersion();
                rpcServer.RegisterDatanode(mockDnReg);
                // Should succeed when software version of DN is above minimum required by NN.
                Org.Mockito.Mockito.DoReturn("4.0.0").When(mockDnReg).GetSoftwareVersion();
                rpcServer.RegisterDatanode(mockDnReg);
                // Should fail when software version of DN is below minimum required by NN.
                Org.Mockito.Mockito.DoReturn("2.0.0").When(mockDnReg).GetSoftwareVersion();
                try
                {
                    rpcServer.RegisterDatanode(mockDnReg);
                    NUnit.Framework.Assert.Fail("Should not have been able to register DN with too-low version."
                                                );
                }
                catch (IncorrectVersionException ive)
                {
                    GenericTestUtils.AssertExceptionContains("The reported DataNode version is too low"
                                                             , ive);
                    Log.Info("Got expected exception", ive);
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #17
0
        public virtual void TestChangeStorageID()
        {
            string         DnIpAddr         = "127.0.0.1";
            string         DnHostname       = "localhost";
            int            DnXferPort       = 12345;
            int            DnInfoPort       = 12346;
            int            DnInfoSecurePort = 12347;
            int            DnIpcPort        = 12348;
            Configuration  conf             = new HdfsConfiguration();
            MiniDFSCluster cluster          = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                IPEndPoint        addr      = new IPEndPoint("localhost", cluster.GetNameNodePort());
                DFSClient         client    = new DFSClient(addr, conf);
                NamenodeProtocols rpcServer = cluster.GetNameNodeRpc();
                // register a datanode
                DatanodeID dnId = new DatanodeID(DnIpAddr, DnHostname, "fake-datanode-id", DnXferPort
                                                 , DnInfoPort, DnInfoSecurePort, DnIpcPort);
                long        nnCTime         = cluster.GetNamesystem().GetFSImage().GetStorage().GetCTime();
                StorageInfo mockStorageInfo = Org.Mockito.Mockito.Mock <StorageInfo>();
                Org.Mockito.Mockito.DoReturn(nnCTime).When(mockStorageInfo).GetCTime();
                Org.Mockito.Mockito.DoReturn(HdfsConstants.DatanodeLayoutVersion).When(mockStorageInfo
                                                                                       ).GetLayoutVersion();
                DatanodeRegistration dnReg = new DatanodeRegistration(dnId, mockStorageInfo, null
                                                                      , VersionInfo.GetVersion());
                rpcServer.RegisterDatanode(dnReg);
                DatanodeInfo[] report = client.DatanodeReport(HdfsConstants.DatanodeReportType.All
                                                              );
                NUnit.Framework.Assert.AreEqual("Expected a registered datanode", 1, report.Length
                                                );
                // register the same datanode again with a different storage ID
                dnId = new DatanodeID(DnIpAddr, DnHostname, "changed-fake-datanode-id", DnXferPort
                                      , DnInfoPort, DnInfoSecurePort, DnIpcPort);
                dnReg = new DatanodeRegistration(dnId, mockStorageInfo, null, VersionInfo.GetVersion
                                                     ());
                rpcServer.RegisterDatanode(dnReg);
                report = client.DatanodeReport(HdfsConstants.DatanodeReportType.All);
                NUnit.Framework.Assert.AreEqual("Datanode with changed storage ID not recognized"
                                                , 1, report.Length);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #18
0
        public static void SetUp()
        {
            // start a cluster
            Configuration conf = new HdfsConfiguration();

            // High value of replication interval
            // so that blocks remain under-replicated
            conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, 1000);
            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1L);
            conf.SetLong(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1L);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Build();
            cluster.WaitActive();
            fileSys = cluster.GetFileSystem();
            nnRpc   = cluster.GetNameNodeRpc();
        }
예제 #19
0
        public virtual void Setup()
        {
            StaticMapping.ResetMap();
            Configuration conf = new HdfsConfiguration();

            string[] racks = new string[] { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
            string[] hosts = new string[] { "/host0", "/host1", "/host2", "/host3", "/host4" };
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockSize);
            conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, DefaultBlockSize / 2);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(5).Racks(racks).Hosts(hosts
                                                                                          ).Build();
            cluster.WaitActive();
            nameNodeRpc = cluster.GetNameNodeRpc();
            namesystem  = cluster.GetNamesystem();
            perm        = new PermissionStatus("TestDefaultBlockPlacementPolicy", null, FsPermission
                                               .GetDefault());
        }
예제 #20
0
        public virtual void TestRetryAddBlockWhileInChooseTarget()
        {
            string            src = "/testRetryAddBlockWhileInChooseTarget";
            FSNamesystem      ns  = cluster.GetNamesystem();
            NamenodeProtocols nn  = cluster.GetNameNodeRpc();

            // create file
            nn.Create(src, FsPermission.GetFileDefault(), "clientName", new EnumSetWritable <CreateFlag
                                                                                             >(EnumSet.Of(CreateFlag.Create)), true, (short)3, 1024, null);
            // start first addBlock()
            Log.Info("Starting first addBlock for " + src);
            LocatedBlock[]        onRetryBlock = new LocatedBlock[1];
            DatanodeStorageInfo[] targets      = ns.GetNewBlockTargets(src, INodeId.GrandfatherInodeId
                                                                       , "clientName", null, null, null, onRetryBlock);
            NUnit.Framework.Assert.IsNotNull("Targets must be generated", targets);
            // run second addBlock()
            Log.Info("Starting second addBlock for " + src);
            nn.AddBlock(src, "clientName", null, null, INodeId.GrandfatherInodeId, null);
            NUnit.Framework.Assert.IsTrue("Penultimate block must be complete", CheckFileProgress
                                              (src, false));
            LocatedBlocks lbs = nn.GetBlockLocations(src, 0, long.MaxValue);

            NUnit.Framework.Assert.AreEqual("Must be one block", 1, lbs.GetLocatedBlocks().Count
                                            );
            LocatedBlock lb2 = lbs.Get(0);

            NUnit.Framework.Assert.AreEqual("Wrong replication", Replication, lb2.GetLocations
                                                ().Length);
            // continue first addBlock()
            LocatedBlock newBlock = ns.StoreAllocatedBlock(src, INodeId.GrandfatherInodeId, "clientName"
                                                           , null, targets);

            NUnit.Framework.Assert.AreEqual("Blocks are not equal", lb2.GetBlock(), newBlock.
                                            GetBlock());
            // check locations
            lbs = nn.GetBlockLocations(src, 0, long.MaxValue);
            NUnit.Framework.Assert.AreEqual("Must be one block", 1, lbs.GetLocatedBlocks().Count
                                            );
            LocatedBlock lb1 = lbs.Get(0);

            NUnit.Framework.Assert.AreEqual("Wrong replication", Replication, lb1.GetLocations
                                                ().Length);
            NUnit.Framework.Assert.AreEqual("Blocks are not equal", lb1.GetBlock(), lb2.GetBlock
                                                ());
        }
예제 #21
0
        public virtual void TestCompression()
        {
            Log.Info("Test compressing image.");
            Configuration conf = new Configuration();

            FileSystem.SetDefaultUri(conf, "hdfs://localhost:0");
            conf.Set(DFSConfigKeys.DfsNamenodeHttpAddressKey, "127.0.0.1:0");
            FilePath base_dir = new FilePath(PathUtils.GetTestDir(GetType()), "dfs/");

            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, new FilePath(base_dir, "name").GetPath
                         ());
            conf.SetBoolean(DFSConfigKeys.DfsPermissionsEnabledKey, false);
            DFSTestUtil.FormatNameNode(conf);
            // create an uncompressed image
            Log.Info("Create an uncompressed fsimage");
            NameNode namenode = new NameNode(conf);

            namenode.GetNamesystem().Mkdirs("/test", new PermissionStatus("hairong", null, FsPermission
                                                                          .GetDefault()), true);
            NamenodeProtocols nnRpc = namenode.GetRpcServer();

            NUnit.Framework.Assert.IsTrue(nnRpc.GetFileInfo("/test").IsDir());
            nnRpc.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter, false);
            nnRpc.SaveNamespace();
            namenode.Stop();
            namenode.Join();
            // compress image using default codec
            Log.Info("Read an uncomressed image and store it compressed using default codec."
                     );
            conf.SetBoolean(DFSConfigKeys.DfsImageCompressKey, true);
            CheckNameSpace(conf);
            // read image compressed using the default and compress it using Gzip codec
            Log.Info("Read a compressed image and store it using a different codec.");
            conf.Set(DFSConfigKeys.DfsImageCompressionCodecKey, "org.apache.hadoop.io.compress.GzipCodec"
                     );
            CheckNameSpace(conf);
            // read an image compressed in Gzip and store it uncompressed
            Log.Info("Read a compressed image and store it as uncompressed.");
            conf.SetBoolean(DFSConfigKeys.DfsImageCompressKey, false);
            CheckNameSpace(conf);
            // read an uncomrpessed image and store it uncompressed
            Log.Info("Read an uncompressed image and store it as uncompressed.");
            CheckNameSpace(conf);
        }
예제 #22
0
        /// <summary>go to each block on the 2nd DataNode until it fails...</summary>
        /// <param name="path"/>
        /// <param name="size"/>
        /// <exception cref="System.IO.IOException"/>
        private void TriggerFailure(string path, long size)
        {
            NamenodeProtocols    nn            = cluster.GetNameNodeRpc();
            IList <LocatedBlock> locatedBlocks = nn.GetBlockLocations(path, 0, size).GetLocatedBlocks
                                                     ();

            foreach (LocatedBlock lb in locatedBlocks)
            {
                DatanodeInfo  dinfo = lb.GetLocations()[1];
                ExtendedBlock b     = lb.GetBlock();
                try
                {
                    AccessBlock(dinfo, lb);
                }
                catch (IOException)
                {
                    System.Console.Out.WriteLine("Failure triggered, on block: " + b.GetBlockId() + "; corresponding volume should be removed by now"
                                                 );
                    break;
                }
            }
        }
예제 #23
0
        public virtual void TestNNHealthCheck()
        {
            MiniDFSCluster cluster = null;

            try
            {
                Configuration conf = new Configuration();
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).NnTopology(MiniDFSNNTopology
                                                                                      .SimpleHATopology()).Build();
                NameNodeResourceChecker mockResourceChecker = Org.Mockito.Mockito.Mock <NameNodeResourceChecker
                                                                                        >();
                Org.Mockito.Mockito.DoReturn(true).When(mockResourceChecker).HasAvailableDiskSpace
                    ();
                cluster.GetNameNode(0).GetNamesystem().SetNNResourceChecker(mockResourceChecker);
                NamenodeProtocols rpc = cluster.GetNameNodeRpc(0);
                // Should not throw error, which indicates healthy.
                rpc.MonitorHealth();
                Org.Mockito.Mockito.DoReturn(false).When(mockResourceChecker).HasAvailableDiskSpace
                    ();
                try
                {
                    // Should throw error - NN is unhealthy.
                    rpc.MonitorHealth();
                    NUnit.Framework.Assert.Fail("Should not have succeeded in calling monitorHealth");
                }
                catch (HealthCheckFailedException hcfe)
                {
                    GenericTestUtils.AssertExceptionContains("The NameNode has no resources available"
                                                             , hcfe);
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #24
0
        /// <summary>
        /// Make sure the WebHdfsFileSystem will retry based on RetriableException when
        /// rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestRetryWhileNNStartup()
        {
            Configuration              conf      = DFSTestUtil.NewHAConfiguration(LogicalName);
            MiniDFSCluster             cluster   = null;
            IDictionary <string, bool> resultMap = new Dictionary <string, bool>();

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(topo).NumDataNodes(0).Build
                              ();
                HATestUtil.SetFailoverConfigurations(cluster, conf, LogicalName);
                cluster.WaitActive();
                cluster.TransitionToActive(0);
                NameNode          namenode  = cluster.GetNameNode(0);
                NamenodeProtocols rpcServer = namenode.GetRpcServer();
                Whitebox.SetInternalState(namenode, "rpcServer", null);
                new _Thread_212(this, conf, resultMap).Start();
                Sharpen.Thread.Sleep(1000);
                Whitebox.SetInternalState(namenode, "rpcServer", rpcServer);
                lock (this)
                {
                    while (!resultMap.Contains("mkdirs"))
                    {
                        Sharpen.Runtime.Wait(this);
                    }
                    NUnit.Framework.Assert.IsTrue(resultMap["mkdirs"]);
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #25
0
        // expected
        /// <summary>
        /// Make sure a retry call does not hang because of the exception thrown in the
        /// first call.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestUpdatePipelineWithFailOver()
        {
            cluster.Shutdown();
            nnRpc      = null;
            filesystem = null;
            cluster    = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology
                                                                         ()).NumDataNodes(1).Build();
            cluster.WaitActive();
            NamenodeProtocols ns0      = cluster.GetNameNodeRpc(0);
            ExtendedBlock     oldBlock = new ExtendedBlock();
            ExtendedBlock     newBlock = new ExtendedBlock();

            DatanodeID[] newNodes    = new DatanodeID[2];
            string[]     newStorages = new string[2];
            NewCall();
            try
            {
                ns0.UpdatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
                NUnit.Framework.Assert.Fail("Expect StandbyException from the updatePipeline call"
                                            );
            }
            catch (StandbyException e)
            {
                // expected, since in the beginning both nn are in standby state
                GenericTestUtils.AssertExceptionContains(HAServiceProtocol.HAServiceState.Standby
                                                         .ToString(), e);
            }
            cluster.TransitionToActive(0);
            try
            {
                ns0.UpdatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
            }
            catch (IOException)
            {
            }
        }
예제 #26
0
        public virtual void TestGetNewStamp()
        {
            int            numDataNodes = 1;
            Configuration  conf         = new HdfsConfiguration();
            MiniDFSCluster cluster      = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes
                                                                                        ).Build();

            try
            {
                cluster.WaitActive();
                FileSystem        fileSys  = cluster.GetFileSystem();
                NamenodeProtocols namenode = cluster.GetNameNodeRpc();
                /* Test writing to finalized replicas */
                Path file = new Path("dataprotocol.dat");
                DFSTestUtil.CreateFile(fileSys, file, 1L, (short)numDataNodes, 0L);
                // get the first blockid for the file
                ExtendedBlock firstBlock = DFSTestUtil.GetFirstBlock(fileSys, file);
                // test getNewStampAndToken on a finalized block
                try
                {
                    namenode.UpdateBlockForPipeline(firstBlock, string.Empty);
                    NUnit.Framework.Assert.Fail("Can not get a new GS from a finalized block");
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue(e.Message.Contains("is not under Construction"));
                }
                // test getNewStampAndToken on a non-existent block
                try
                {
                    long          newBlockId = firstBlock.GetBlockId() + 1;
                    ExtendedBlock newBlock   = new ExtendedBlock(firstBlock.GetBlockPoolId(), newBlockId
                                                                 , 0, firstBlock.GetGenerationStamp());
                    namenode.UpdateBlockForPipeline(newBlock, string.Empty);
                    NUnit.Framework.Assert.Fail("Cannot get a new GS from a non-existent block");
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue(e.Message.Contains("does not exist"));
                }
                /* Test RBW replicas */
                // change first block to a RBW
                DFSOutputStream @out = null;
                try
                {
                    @out = (DFSOutputStream)(fileSys.Append(file).GetWrappedStream());
                    @out.Write(1);
                    @out.Hflush();
                    FSDataInputStream @in = null;
                    try
                    {
                        @in        = fileSys.Open(file);
                        firstBlock = DFSTestUtil.GetAllBlocks(@in)[0].GetBlock();
                    }
                    finally
                    {
                        IOUtils.CloseStream(@in);
                    }
                    // test non-lease holder
                    DFSClient dfs = ((DistributedFileSystem)fileSys).dfs;
                    try
                    {
                        namenode.UpdateBlockForPipeline(firstBlock, "test" + dfs.clientName);
                        NUnit.Framework.Assert.Fail("Cannot get a new GS for a non lease holder");
                    }
                    catch (LeaseExpiredException e)
                    {
                        NUnit.Framework.Assert.IsTrue(e.Message.StartsWith("Lease mismatch"));
                    }
                    // test null lease holder
                    try
                    {
                        namenode.UpdateBlockForPipeline(firstBlock, null);
                        NUnit.Framework.Assert.Fail("Cannot get a new GS for a null lease holder");
                    }
                    catch (LeaseExpiredException e)
                    {
                        NUnit.Framework.Assert.IsTrue(e.Message.StartsWith("Lease mismatch"));
                    }
                    // test getNewStampAndToken on a rbw block
                    namenode.UpdateBlockForPipeline(firstBlock, dfs.clientName);
                }
                finally
                {
                    IOUtils.CloseStream(@out);
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
예제 #27
0
        public virtual void TestBackupNodeTailsEdits()
        {
            Configuration conf = new HdfsConfiguration();

            HAUtil.SetAllowStandbyReads(conf, true);
            MiniDFSCluster cluster = null;
            FileSystem     fileSys = null;
            BackupNode     backup  = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                fileSys = cluster.GetFileSystem();
                backup  = StartBackupNode(conf, HdfsServerConstants.StartupOption.Backup, 1);
                BackupImage bnImage = (BackupImage)backup.GetFSImage();
                TestBNInSync(cluster, backup, 1);
                // Force a roll -- BN should roll with NN.
                NameNode          nn    = cluster.GetNameNode();
                NamenodeProtocols nnRpc = nn.GetRpcServer();
                nnRpc.RollEditLog();
                NUnit.Framework.Assert.AreEqual(bnImage.GetEditLog().GetCurSegmentTxId(), nn.GetFSImage
                                                    ().GetEditLog().GetCurSegmentTxId());
                // BN should stay in sync after roll
                TestBNInSync(cluster, backup, 2);
                long nnImageBefore = nn.GetFSImage().GetStorage().GetMostRecentCheckpointTxId();
                // BN checkpoint
                backup.DoCheckpoint();
                // NN should have received a new image
                long nnImageAfter = nn.GetFSImage().GetStorage().GetMostRecentCheckpointTxId();
                NUnit.Framework.Assert.IsTrue("nn should have received new checkpoint. before: "
                                              + nnImageBefore + " after: " + nnImageAfter, nnImageAfter > nnImageBefore);
                // BN should stay in sync after checkpoint
                TestBNInSync(cluster, backup, 3);
                // Stop BN
                Storage.StorageDirectory sd = bnImage.GetStorage().GetStorageDir(0);
                backup.Stop();
                backup = null;
                // When shutting down the BN, it shouldn't finalize logs that are
                // still open on the NN
                FileJournalManager.EditLogFile editsLog = FSImageTestUtil.FindLatestEditsLog(sd);
                NUnit.Framework.Assert.AreEqual(editsLog.GetFirstTxId(), nn.GetFSImage().GetEditLog
                                                    ().GetCurSegmentTxId());
                NUnit.Framework.Assert.IsTrue("Should not have finalized " + editsLog, editsLog.IsInProgress
                                                  ());
                // do some edits
                NUnit.Framework.Assert.IsTrue(fileSys.Mkdirs(new Path("/edit-while-bn-down")));
                // start a new backup node
                backup = StartBackupNode(conf, HdfsServerConstants.StartupOption.Backup, 1);
                TestBNInSync(cluster, backup, 4);
                NUnit.Framework.Assert.IsNotNull(backup.GetNamesystem().GetFileInfo("/edit-while-bn-down"
                                                                                    , false));
            }
            finally
            {
                Log.Info("Shutting down...");
                if (backup != null)
                {
                    backup.Stop();
                }
                if (fileSys != null)
                {
                    fileSys.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            AssertStorageDirsMatch(cluster.GetNameNode(), backup);
        }
예제 #28
0
        public virtual void TestSaveNamespace()
        {
            // start a cluster
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = null;
            FileSystem     fileSys = null;
            AtomicReference <Exception> caughtErr = new AtomicReference <Exception>();

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Build();
                cluster.WaitActive();
                fileSys = cluster.GetFileSystem();
                FSNamesystem      namesystem = cluster.GetNamesystem();
                NamenodeProtocols nn         = cluster.GetNameNodeRpc();
                FSImage           fsimage    = namesystem.GetFSImage();
                FSEditLog         editLog    = fsimage.GetEditLog();
                StartTransactionWorkers(nn, caughtErr);
                for (int i = 0; i < NumSaveImage && caughtErr.Get() == null; i++)
                {
                    try
                    {
                        Sharpen.Thread.Sleep(20);
                    }
                    catch (Exception)
                    {
                    }
                    Log.Info("Save " + i + ": entering safe mode");
                    namesystem.EnterSafeMode(false);
                    // Verify edit logs before the save
                    // They should start with the first edit after the checkpoint
                    long logStartTxId = fsimage.GetStorage().GetMostRecentCheckpointTxId() + 1;
                    VerifyEditLogs(namesystem, fsimage, NNStorage.GetInProgressEditsFileName(logStartTxId
                                                                                             ), logStartTxId);
                    Log.Info("Save " + i + ": saving namespace");
                    namesystem.SaveNamespace();
                    Log.Info("Save " + i + ": leaving safemode");
                    long savedImageTxId = fsimage.GetStorage().GetMostRecentCheckpointTxId();
                    // Verify that edit logs post save got finalized and aren't corrupt
                    VerifyEditLogs(namesystem, fsimage, NNStorage.GetFinalizedEditsFileName(logStartTxId
                                                                                            , savedImageTxId), logStartTxId);
                    // The checkpoint id should be 1 less than the last written ID, since
                    // the log roll writes the "BEGIN" transaction to the new log.
                    NUnit.Framework.Assert.AreEqual(fsimage.GetStorage().GetMostRecentCheckpointTxId(
                                                        ), editLog.GetLastWrittenTxId() - 1);
                    namesystem.LeaveSafeMode();
                    Log.Info("Save " + i + ": complete");
                }
            }
            finally
            {
                StopTransactionWorkers();
                if (caughtErr.Get() != null)
                {
                    throw new RuntimeException(caughtErr.Get());
                }
                if (fileSys != null)
                {
                    fileSys.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #29
0
        // Root scratch directory on local filesystem
        // The singleton master storage directory for Namenode
        // A checksum of the contents in namenodeStorage directory
        // The namespaceId of the namenodeStorage directory
        // The clusterId of the namenodeStorage directory
        // The blockpoolId of the namenodeStorage directory
        // The fsscTime of the namenodeStorage directory
        // The singleton master storage directory for Datanode
        // A checksum of the contents in datanodeStorage directory
        // A checksum of the contents in blockpool storage directory
        // A checksum of the contents in blockpool finalize storage directory
        // A checksum of the contents in blockpool rbw storage directory
        /// <summary>Initialize the data structures used by this class.</summary>
        /// <remarks>
        /// Initialize the data structures used by this class.
        /// IMPORTANT NOTE: This method must be called once before calling
        /// any other public method on this class.
        /// <p>
        /// Creates a singleton master populated storage
        /// directory for a Namenode (contains edits, fsimage,
        /// version, and time files) and a Datanode (contains version and
        /// block files).  This can be a lengthy operation.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public static void Initialize()
        {
            CreateEmptyDirs(new string[] { TestRootDir.ToString() });
            Configuration config = new HdfsConfiguration();

            config.Set(DFSConfigKeys.DfsNamenodeNameDirKey, namenodeStorage.ToString());
            config.Set(DFSConfigKeys.DfsNamenodeEditsDirKey, namenodeStorage.ToString());
            config.Set(DFSConfigKeys.DfsDatanodeDataDirKey, datanodeStorage.ToString());
            MiniDFSCluster cluster = null;
            string         bpid    = null;

            try
            {
                // format data-node
                CreateEmptyDirs(new string[] { datanodeStorage.ToString() });
                // format and start NameNode and start DataNode
                DFSTestUtil.FormatNameNode(config);
                cluster = new MiniDFSCluster.Builder(config).NumDataNodes(1).StartupOption(HdfsServerConstants.StartupOption
                                                                                           .Regular).Format(false).ManageDataDfsDirs(false).ManageNameDfsDirs(false).Build(
                    );
                NamenodeProtocols namenode = cluster.GetNameNodeRpc();
                namenodeStorageNamespaceID = namenode.VersionRequest().GetNamespaceID();
                namenodeStorageFsscTime    = namenode.VersionRequest().GetCTime();
                namenodeStorageClusterID   = namenode.VersionRequest().GetClusterID();
                namenodeStorageBlockPoolID = namenode.VersionRequest().GetBlockPoolID();
                FileSystem fs      = FileSystem.Get(config);
                Path       baseDir = new Path("/TestUpgrade");
                fs.Mkdirs(baseDir);
                // write some files
                int    bufferSize = 4096;
                byte[] buffer     = new byte[bufferSize];
                for (int i = 0; i < bufferSize; i++)
                {
                    buffer[i] = unchecked ((byte)((byte)('0') + i % 50));
                }
                WriteFile(fs, new Path(baseDir, "file1"), buffer, bufferSize);
                WriteFile(fs, new Path(baseDir, "file2"), buffer, bufferSize);
                // save image
                namenode.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter, false);
                namenode.SaveNamespace();
                namenode.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave, false);
                // write more files
                WriteFile(fs, new Path(baseDir, "file3"), buffer, bufferSize);
                WriteFile(fs, new Path(baseDir, "file4"), buffer, bufferSize);
                bpid = cluster.GetNamesystem(0).GetBlockPoolId();
            }
            finally
            {
                // shutdown
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
                FileUtil.FullyDelete(new FilePath(namenodeStorage, "in_use.lock"));
                FileUtil.FullyDelete(new FilePath(datanodeStorage, "in_use.lock"));
            }
            namenodeStorageChecksum = ChecksumContents(HdfsServerConstants.NodeType.NameNode,
                                                       new FilePath(namenodeStorage, "current"), false);
            FilePath dnCurDir = new FilePath(datanodeStorage, "current");

            datanodeStorageChecksum = ChecksumContents(HdfsServerConstants.NodeType.DataNode,
                                                       dnCurDir, false);
            FilePath bpCurDir = new FilePath(BlockPoolSliceStorage.GetBpRoot(bpid, dnCurDir),
                                             "current");

            blockPoolStorageChecksum = ChecksumContents(HdfsServerConstants.NodeType.DataNode
                                                        , bpCurDir, false);
            FilePath bpCurFinalizeDir = new FilePath(BlockPoolSliceStorage.GetBpRoot(bpid, dnCurDir
                                                                                     ), "current/" + DataStorage.StorageDirFinalized);

            blockPoolFinalizedStorageChecksum = ChecksumContents(HdfsServerConstants.NodeType
                                                                 .DataNode, bpCurFinalizeDir, true);
            FilePath bpCurRbwDir = new FilePath(BlockPoolSliceStorage.GetBpRoot(bpid, dnCurDir
                                                                                ), "current/" + DataStorage.StorageDirRbw);

            blockPoolRbwStorageChecksum = ChecksumContents(HdfsServerConstants.NodeType.DataNode
                                                           , bpCurRbwDir, false);
        }
        public virtual void TestUpdatePipelineAfterDelete()
        {
            Configuration  conf    = new HdfsConfiguration();
            Path           file    = new Path("/test-file");
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();

            try
            {
                FileSystem        fs       = cluster.GetFileSystem();
                NamenodeProtocols namenode = cluster.GetNameNodeRpc();
                DFSOutputStream   @out     = null;
                try
                {
                    // Create a file and make sure a block is allocated for it.
                    @out = (DFSOutputStream)(fs.Create(file).GetWrappedStream());
                    @out.Write(1);
                    @out.Hflush();
                    // Create a snapshot that includes the file.
                    SnapshotTestHelper.CreateSnapshot((DistributedFileSystem)fs, new Path("/"), "s1");
                    // Grab the block info of this file for later use.
                    FSDataInputStream @in      = null;
                    ExtendedBlock     oldBlock = null;
                    try
                    {
                        @in      = fs.Open(file);
                        oldBlock = DFSTestUtil.GetAllBlocks(@in)[0].GetBlock();
                    }
                    finally
                    {
                        IOUtils.CloseStream(@in);
                    }
                    // Allocate a new block ID/gen stamp so we can simulate pipeline
                    // recovery.
                    string       clientName      = ((DistributedFileSystem)fs).GetClient().GetClientName();
                    LocatedBlock newLocatedBlock = namenode.UpdateBlockForPipeline(oldBlock, clientName
                                                                                   );
                    ExtendedBlock newBlock = new ExtendedBlock(oldBlock.GetBlockPoolId(), oldBlock.GetBlockId
                                                                   (), oldBlock.GetNumBytes(), newLocatedBlock.GetBlock().GetGenerationStamp());
                    // Delete the file from the present FS. It will still exist the
                    // previously-created snapshot. This will log an OP_DELETE for the
                    // file in question.
                    fs.Delete(file, true);
                    // Simulate a pipeline recovery, wherein a new block is allocated
                    // for the existing block, resulting in an OP_UPDATE_BLOCKS being
                    // logged for the file in question.
                    try
                    {
                        namenode.UpdatePipeline(clientName, oldBlock, newBlock, newLocatedBlock.GetLocations
                                                    (), newLocatedBlock.GetStorageIDs());
                    }
                    catch (IOException ioe)
                    {
                        // normal
                        GenericTestUtils.AssertExceptionContains("does not exist or it is not under construction"
                                                                 , ioe);
                    }
                    // Make sure the NN can restart with the edit logs as we have them now.
                    cluster.RestartNameNode(true);
                }
                finally
                {
                    IOUtils.CloseStream(@out);
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }