Exemple #1
0
        public virtual void TestDeletionWithZeroSizeBlock()
        {
            Path foo = new Path("/foo");
            Path bar = new Path(foo, "bar");

            DFSTestUtil.CreateFile(hdfs, bar, Blocksize, Replication, 0L);
            SnapshotTestHelper.CreateSnapshot(hdfs, foo, "s0");
            hdfs.Append(bar);
            INodeFile barNode = fsdir.GetINode4Write(bar.ToString()).AsFile();

            BlockInfoContiguous[] blks = barNode.GetBlocks();
            NUnit.Framework.Assert.AreEqual(1, blks.Length);
            NUnit.Framework.Assert.AreEqual(Blocksize, blks[0].GetNumBytes());
            ExtendedBlock previous = new ExtendedBlock(fsn.GetBlockPoolId(), blks[0]);

            cluster.GetNameNodeRpc().AddBlock(bar.ToString(), hdfs.GetClient().GetClientName(
                                                  ), previous, null, barNode.GetId(), null);
            SnapshotTestHelper.CreateSnapshot(hdfs, foo, "s1");
            barNode = fsdir.GetINode4Write(bar.ToString()).AsFile();
            blks    = barNode.GetBlocks();
            NUnit.Framework.Assert.AreEqual(2, blks.Length);
            NUnit.Framework.Assert.AreEqual(Blocksize, blks[0].GetNumBytes());
            NUnit.Framework.Assert.AreEqual(0, blks[1].GetNumBytes());
            hdfs.Delete(bar, true);
            Path sbar = SnapshotTestHelper.GetSnapshotPath(foo, "s1", bar.GetName());

            barNode = fsdir.GetINode(sbar.ToString()).AsFile();
            blks    = barNode.GetBlocks();
            NUnit.Framework.Assert.AreEqual(1, blks.Length);
            NUnit.Framework.Assert.AreEqual(Blocksize, blks[0].GetNumBytes());
        }
Exemple #2
0
        public virtual void TestIsInSafemode()
        {
            // Check for the standby nn without client failover.
            NameNode nn2 = cluster.GetNameNode(1);

            NUnit.Framework.Assert.IsTrue("nn2 should be in standby state", nn2.IsStandbyState
                                              ());
            IPEndPoint            nameNodeAddress = nn2.GetNameNodeAddress();
            Configuration         conf            = new Configuration();
            DistributedFileSystem dfs             = new DistributedFileSystem();

            try
            {
                dfs.Initialize(URI.Create("hdfs://" + nameNodeAddress.GetHostName() + ":" + nameNodeAddress
                                          .Port), conf);
                dfs.IsInSafeMode();
                NUnit.Framework.Assert.Fail("StandBy should throw exception for isInSafeMode");
            }
            catch (IOException e)
            {
                if (e is RemoteException)
                {
                    IOException sbExcpetion = ((RemoteException)e).UnwrapRemoteException();
                    NUnit.Framework.Assert.IsTrue("StandBy nn should not support isInSafeMode", sbExcpetion
                                                  is StandbyException);
                }
                else
                {
                    throw;
                }
            }
            finally
            {
                if (null != dfs)
                {
                    dfs.Close();
                }
            }
            // Check with Client FailOver
            cluster.TransitionToStandby(0);
            cluster.TransitionToActive(1);
            cluster.GetNameNodeRpc(1).SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter,
                                                  false);
            DistributedFileSystem dfsWithFailOver = (DistributedFileSystem)fs;

            NUnit.Framework.Assert.IsTrue("ANN should be in SafeMode", dfsWithFailOver.IsInSafeMode
                                              ());
            cluster.GetNameNodeRpc(1).SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave,
                                                  false);
            NUnit.Framework.Assert.IsFalse("ANN should be out of SafeMode", dfsWithFailOver.IsInSafeMode
                                               ());
        }
Exemple #3
0
 public virtual void TestFilesInGetListingOps()
 {
     CreateFile("/tmp1/t1", 3200, (short)3);
     CreateFile("/tmp1/t2", 3200, (short)3);
     CreateFile("/tmp2/t1", 3200, (short)3);
     CreateFile("/tmp2/t2", 3200, (short)3);
     cluster.GetNameNodeRpc().GetListing("/tmp1", HdfsFileStatus.EmptyName, false);
     MetricsAsserts.AssertCounter("FilesInGetListingOps", 2L, MetricsAsserts.GetMetrics
                                      (NnMetrics));
     cluster.GetNameNodeRpc().GetListing("/tmp2", HdfsFileStatus.EmptyName, false);
     MetricsAsserts.AssertCounter("FilesInGetListingOps", 4L, MetricsAsserts.GetMetrics
                                      (NnMetrics));
 }
        public virtual void TestTransactionAndCheckpointMetrics()
        {
            long lastCkptTime = MetricsAsserts.GetLongGauge("LastCheckpointTime", MetricsAsserts.GetMetrics
                                                                (NsMetrics));

            MetricsAsserts.AssertGauge("LastCheckpointTime", lastCkptTime, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("LastWrittenTransactionId", 1L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("TransactionsSinceLastCheckpoint", 1L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("TransactionsSinceLastLogRoll", 1L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            fs.Mkdirs(new Path(TestRootDirPath, "/tmp"));
            UpdateMetrics();
            MetricsAsserts.AssertGauge("LastCheckpointTime", lastCkptTime, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("LastWrittenTransactionId", 2L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("TransactionsSinceLastCheckpoint", 2L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("TransactionsSinceLastLogRoll", 2L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            cluster.GetNameNodeRpc().RollEditLog();
            UpdateMetrics();
            MetricsAsserts.AssertGauge("LastCheckpointTime", lastCkptTime, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("LastWrittenTransactionId", 4L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("TransactionsSinceLastCheckpoint", 4L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("TransactionsSinceLastLogRoll", 1L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter,
                                                 false);
            cluster.GetNameNodeRpc().SaveNamespace();
            cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave,
                                                 false);
            UpdateMetrics();
            long newLastCkptTime = MetricsAsserts.GetLongGauge("LastCheckpointTime", MetricsAsserts.GetMetrics
                                                                   (NsMetrics));

            NUnit.Framework.Assert.IsTrue(lastCkptTime < newLastCkptTime);
            MetricsAsserts.AssertGauge("LastWrittenTransactionId", 6L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("TransactionsSinceLastCheckpoint", 1L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
            MetricsAsserts.AssertGauge("TransactionsSinceLastLogRoll", 1L, MetricsAsserts.GetMetrics
                                           (NsMetrics));
        }
        public virtual void TestOpenFilesWithRename()
        {
            Path path = new Path("/test");

            DoWriteAndAbort(fs, path);
            // check for zero sized blocks
            Path fileWithEmptyBlock = new Path("/test/test/test4");

            fs.Create(fileWithEmptyBlock);
            NamenodeProtocols nameNodeRpc = cluster.GetNameNodeRpc();
            string            clientName  = fs.GetClient().GetClientName();

            // create one empty block
            nameNodeRpc.AddBlock(fileWithEmptyBlock.ToString(), clientName, null, null, INodeId
                                 .GrandfatherInodeId, null);
            fs.CreateSnapshot(path, "s2");
            fs.Rename(new Path("/test/test"), new Path("/test/test-renamed"));
            fs.Delete(new Path("/test/test-renamed"), true);
            NameNode nameNode = cluster.GetNameNode();

            NameNodeAdapter.EnterSafeMode(nameNode, false);
            NameNodeAdapter.SaveNamespace(nameNode);
            NameNodeAdapter.LeaveSafeMode(nameNode);
            cluster.RestartNameNode(true);
        }
Exemple #6
0
        public virtual void Pipeline_01()
        {
            string MethodName = GenericTestUtils.GetMethodName();

            if (Log.IsDebugEnabled())
            {
                Log.Debug("Running " + MethodName);
            }
            Path filePath = new Path("/" + MethodName + ".dat");

            DFSTestUtil.CreateFile(fs, filePath, FileSize, ReplFactor, rand.NextLong());
            if (Log.IsDebugEnabled())
            {
                Log.Debug("Invoking append but doing nothing otherwise...");
            }
            FSDataOutputStream ofs = fs.Append(filePath);

            ofs.WriteBytes("Some more stuff to write");
            ((DFSOutputStream)ofs.GetWrappedStream()).Hflush();
            IList <LocatedBlock> lb = cluster.GetNameNodeRpc().GetBlockLocations(filePath.ToString
                                                                                     (), FileSize - 1, FileSize).GetLocatedBlocks();
            string bpid = cluster.GetNamesystem().GetBlockPoolId();

            foreach (DataNode dn in cluster.GetDataNodes())
            {
                Replica r = DataNodeTestUtils.FetchReplicaInfo(dn, bpid, lb[0].GetBlock().GetBlockId
                                                                   ());
                NUnit.Framework.Assert.IsTrue("Replica on DN " + dn + " shouldn't be null", r !=
                                              null);
                NUnit.Framework.Assert.AreEqual("Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()"
                                                , HdfsServerConstants.ReplicaState.Rbw, r.GetState());
            }
            ofs.Close();
        }
Exemple #7
0
        public virtual void TestGetBlockLocations()
        {
            NamenodeProtocols namenode = cluster.GetNameNodeRpc();
            Path               p       = new Path(BaseDir, "file2.dat");
            string             src     = p.ToString();
            FSDataOutputStream @out    = TestFileCreation.CreateFile(hdfs, p, 3);
            // write a half block
            int len = (int)(((uint)BlockSize) >> 1);

            WriteFile(p, @out, len);
            for (int i = 1; i < NumBlocks;)
            {
                // verify consistency
                LocatedBlocks        lb     = namenode.GetBlockLocations(src, 0, len);
                IList <LocatedBlock> blocks = lb.GetLocatedBlocks();
                NUnit.Framework.Assert.AreEqual(i, blocks.Count);
                Block b = blocks[blocks.Count - 1].GetBlock().GetLocalBlock();
                NUnit.Framework.Assert.IsTrue(b is BlockInfoContiguousUnderConstruction);
                if (++i < NumBlocks)
                {
                    // write one more block
                    WriteFile(p, @out, BlockSize);
                    len += BlockSize;
                }
            }
            // close file
            @out.Close();
        }
        /// <exception cref="System.IO.IOException"/>
        public virtual void VerifyIncrementalBlockReports(bool splitReports)
        {
            // Get the block list for the file with the block locations.
            LocatedBlocks blocks = CreateFileGetBlocks(GenericTestUtils.GetMethodName());

            // We will send 'fake' incremental block reports to the NN that look
            // like they originated from DN 0.
            StorageReceivedDeletedBlocks[] reports = new StorageReceivedDeletedBlocks[dn0.GetFSDataset
                                                                                          ().GetVolumes().Count];
            // Lie to the NN that one block on each storage has been deleted.
            for (int i = 0; i < reports.Length; ++i)
            {
                FsVolumeSpi volume = dn0.GetFSDataset().GetVolumes()[i];
                bool        foundBlockOnStorage = false;
                ReceivedDeletedBlockInfo[] rdbi = new ReceivedDeletedBlockInfo[1];
                // Find the first block on this storage and mark it as deleted for the
                // report.
                foreach (LocatedBlock block in blocks.GetLocatedBlocks())
                {
                    if (block.GetStorageIDs()[0].Equals(volume.GetStorageID()))
                    {
                        rdbi[0] = new ReceivedDeletedBlockInfo(block.GetBlock().GetLocalBlock(), ReceivedDeletedBlockInfo.BlockStatus
                                                               .DeletedBlock, null);
                        foundBlockOnStorage = true;
                        break;
                    }
                }
                NUnit.Framework.Assert.IsTrue(foundBlockOnStorage);
                reports[i] = new StorageReceivedDeletedBlocks(volume.GetStorageID(), rdbi);
                if (splitReports)
                {
                    // If we are splitting reports then send the report for this storage now.
                    StorageReceivedDeletedBlocks[] singletonReport = new StorageReceivedDeletedBlocks
                                                                     [] { reports[i] };
                    cluster.GetNameNodeRpc().BlockReceivedAndDeleted(dn0Reg, poolId, singletonReport);
                }
            }
            if (!splitReports)
            {
                // Send a combined report.
                cluster.GetNameNodeRpc().BlockReceivedAndDeleted(dn0Reg, poolId, reports);
            }
            // Make sure that the deleted block from each storage was picked up
            // by the NameNode.
            Assert.AssertThat(cluster.GetNamesystem().GetMissingBlocksCount(), IS.Is((long)reports
                                                                                     .Length));
        }
 /// <summary>
 /// Return the namespace ID inherent in the currently running
 /// Namenode.
 /// </summary>
 /// <remarks>
 /// Return the namespace ID inherent in the currently running
 /// Namenode.  If no Namenode is running, return the namespace ID of
 /// the master Namenode storage directory.
 /// The UpgradeUtilities.initialize() method must be called once before
 /// calling this method.
 /// </remarks>
 /// <exception cref="System.IO.IOException"/>
 public static int GetCurrentNamespaceID(MiniDFSCluster cluster)
 {
     if (cluster != null)
     {
         return(cluster.GetNameNodeRpc().VersionRequest().GetNamespaceID());
     }
     return(namenodeStorageNamespaceID);
 }
 /// <summary>
 /// Return the blockpool ID inherent in the currently running
 /// Namenode.
 /// </summary>
 /// <exception cref="System.IO.IOException"/>
 public static string GetCurrentBlockPoolID(MiniDFSCluster cluster)
 {
     if (cluster != null)
     {
         return(cluster.GetNameNodeRpc().VersionRequest().GetBlockPoolID());
     }
     return(namenodeStorageBlockPoolID);
 }
 /// <summary>
 /// Return the File System State Creation Timestamp (FSSCTime) inherent
 /// in the currently running Namenode.
 /// </summary>
 /// <remarks>
 /// Return the File System State Creation Timestamp (FSSCTime) inherent
 /// in the currently running Namenode.  If no Namenode is running,
 /// return the FSSCTime of the master Namenode storage directory.
 /// The UpgradeUtilities.initialize() method must be called once before
 /// calling this method.
 /// </remarks>
 /// <exception cref="System.IO.IOException"/>
 public static long GetCurrentFsscTime(MiniDFSCluster cluster)
 {
     if (cluster != null)
     {
         return(cluster.GetNameNodeRpc().VersionRequest().GetCTime());
     }
     return(namenodeStorageFsscTime);
 }
        public virtual void TestRetryAddBlockWhileInChooseTarget()
        {
            string            src = "/testRetryAddBlockWhileInChooseTarget";
            FSNamesystem      ns  = cluster.GetNamesystem();
            NamenodeProtocols nn  = cluster.GetNameNodeRpc();

            // create file
            nn.Create(src, FsPermission.GetFileDefault(), "clientName", new EnumSetWritable <CreateFlag
                                                                                             >(EnumSet.Of(CreateFlag.Create)), true, (short)3, 1024, null);
            // start first addBlock()
            Log.Info("Starting first addBlock for " + src);
            LocatedBlock[]        onRetryBlock = new LocatedBlock[1];
            DatanodeStorageInfo[] targets      = ns.GetNewBlockTargets(src, INodeId.GrandfatherInodeId
                                                                       , "clientName", null, null, null, onRetryBlock);
            NUnit.Framework.Assert.IsNotNull("Targets must be generated", targets);
            // run second addBlock()
            Log.Info("Starting second addBlock for " + src);
            nn.AddBlock(src, "clientName", null, null, INodeId.GrandfatherInodeId, null);
            NUnit.Framework.Assert.IsTrue("Penultimate block must be complete", CheckFileProgress
                                              (src, false));
            LocatedBlocks lbs = nn.GetBlockLocations(src, 0, long.MaxValue);

            NUnit.Framework.Assert.AreEqual("Must be one block", 1, lbs.GetLocatedBlocks().Count
                                            );
            LocatedBlock lb2 = lbs.Get(0);

            NUnit.Framework.Assert.AreEqual("Wrong replication", Replication, lb2.GetLocations
                                                ().Length);
            // continue first addBlock()
            LocatedBlock newBlock = ns.StoreAllocatedBlock(src, INodeId.GrandfatherInodeId, "clientName"
                                                           , null, targets);

            NUnit.Framework.Assert.AreEqual("Blocks are not equal", lb2.GetBlock(), newBlock.
                                            GetBlock());
            // check locations
            lbs = nn.GetBlockLocations(src, 0, long.MaxValue);
            NUnit.Framework.Assert.AreEqual("Must be one block", 1, lbs.GetLocatedBlocks().Count
                                            );
            LocatedBlock lb1 = lbs.Get(0);

            NUnit.Framework.Assert.AreEqual("Wrong replication", Replication, lb1.GetLocations
                                                ().Length);
            NUnit.Framework.Assert.AreEqual("Blocks are not equal", lb1.GetBlock(), lb2.GetBlock
                                                ());
        }
        /// <summary>
        /// Test that getContentSummary on Standby should should throw standby
        /// exception.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestgetContentSummaryOnStandby()
        {
            Configuration nn1conf = cluster.GetConfiguration(1);

            // just reset the standby reads to default i.e False on standby.
            HAUtil.SetAllowStandbyReads(nn1conf, false);
            cluster.RestartNameNode(1);
            cluster.GetNameNodeRpc(1).GetContentSummary("/");
        }
 /// <summary>
 /// Test case that stops a writer after finalizing a block but
 /// before calling completeFile, recovers a file from another writer,
 /// starts writing from that writer, and then has the old lease holder
 /// call completeFile
 /// </summary>
 /// <exception cref="System.Exception"/>
 public virtual void TestCompleteOtherLeaseHoldersFile()
 {
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(5).Build();
     try
     {
         cluster.WaitActive();
         NamenodeProtocols preSpyNN = cluster.GetNameNodeRpc();
         NamenodeProtocols spyNN    = Org.Mockito.Mockito.Spy(preSpyNN);
         // Delay completeFile
         GenericTestUtils.DelayAnswer delayer = new GenericTestUtils.DelayAnswer(Log);
         Org.Mockito.Mockito.DoAnswer(delayer).When(spyNN).Complete(Matchers.AnyString(),
                                                                    Matchers.AnyString(), (ExtendedBlock)Matchers.AnyObject(), Matchers.AnyLong());
         DFSClient client = new DFSClient(null, spyNN, conf, null);
         file1 = new Path("/testCompleteOtherLease");
         OutputStream stm = client.Create("/testCompleteOtherLease", true);
         // write 1/2 block
         AppendTestUtil.Write(stm, 0, 4096);
         AtomicReference <Exception> err = new AtomicReference <Exception>();
         Sharpen.Thread t = new _Thread_242(stm, err);
         t.Start();
         Log.Info("Waiting for close to get to latch...");
         delayer.WaitForCall();
         // At this point, the block is finalized on the DNs, but the file
         // has not been completed in the NN.
         // Lose the leases
         Log.Info("Killing lease checker");
         client.GetLeaseRenewer().InterruptAndJoin();
         FileSystem fs1 = cluster.GetFileSystem();
         FileSystem fs2 = AppendTestUtil.CreateHdfsWithDifferentUsername(fs1.GetConf());
         Log.Info("Recovering file");
         RecoverFile(fs2);
         Log.Info("Opening file for append from new fs");
         FSDataOutputStream appenderStream = fs2.Append(file1);
         Log.Info("Writing some data from new appender");
         AppendTestUtil.Write(appenderStream, 0, 4096);
         Log.Info("Telling old close to proceed.");
         delayer.Proceed();
         Log.Info("Waiting for close to finish.");
         t.Join();
         Log.Info("Close finished.");
         // We expect that close will get a "Lease mismatch"
         // error.
         Exception thrownByClose = err.Get();
         NUnit.Framework.Assert.IsNotNull(thrownByClose);
         NUnit.Framework.Assert.IsTrue(thrownByClose is IOException);
         if (!thrownByClose.Message.Contains("Lease mismatch"))
         {
             throw thrownByClose;
         }
         // The appender should be able to close properly
         appenderStream.Close();
     }
     finally
     {
         cluster.Shutdown();
     }
 }
        public virtual void TestDeadDatanode()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 500);
            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1L);
            cluster = new MiniDFSCluster.Builder(conf).Build();
            cluster.WaitActive();
            string poolId = cluster.GetNamesystem().GetBlockPoolId();
            // wait for datanode to be marked live
            DataNode             dn  = cluster.GetDataNodes()[0];
            DatanodeRegistration reg = DataNodeTestUtils.GetDNRegistrationForBP(cluster.GetDataNodes
                                                                                    ()[0], poolId);

            DFSTestUtil.WaitForDatanodeState(cluster, reg.GetDatanodeUuid(), true, 20000);
            // Shutdown and wait for datanode to be marked dead
            dn.Shutdown();
            DFSTestUtil.WaitForDatanodeState(cluster, reg.GetDatanodeUuid(), false, 20000);
            DatanodeProtocol dnp = cluster.GetNameNodeRpc();

            ReceivedDeletedBlockInfo[] blocks = new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo
                                                                                     (new Block(0), ReceivedDeletedBlockInfo.BlockStatus.ReceivedBlock, null) };
            StorageReceivedDeletedBlocks[] storageBlocks = new StorageReceivedDeletedBlocks[]
            { new StorageReceivedDeletedBlocks(reg.GetDatanodeUuid(), blocks) };
            // Ensure blockReceived call from dead datanode is rejected with IOException
            try
            {
                dnp.BlockReceivedAndDeleted(reg, poolId, storageBlocks);
                NUnit.Framework.Assert.Fail("Expected IOException is not thrown");
            }
            catch (IOException)
            {
            }
            // Expected
            // Ensure blockReport from dead datanode is rejected with IOException
            StorageBlockReport[] report = new StorageBlockReport[] { new StorageBlockReport(new
                                                                                            DatanodeStorage(reg.GetDatanodeUuid()), BlockListAsLongs.Empty) };
            try
            {
                dnp.BlockReport(reg, poolId, report, new BlockReportContext(1, 0, Runtime.NanoTime
                                                                                ()));
                NUnit.Framework.Assert.Fail("Expected IOException is not thrown");
            }
            catch (IOException)
            {
            }
            // Expected
            // Ensure heartbeat from dead datanode is rejected with a command
            // that asks datanode to register again
            StorageReport[] rep = new StorageReport[] { new StorageReport(new DatanodeStorage
                                                                              (reg.GetDatanodeUuid()), false, 0, 0, 0, 0) };
            DatanodeCommand[] cmd = dnp.SendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null).GetCommands
                                        ();
            NUnit.Framework.Assert.AreEqual(1, cmd.Length);
            NUnit.Framework.Assert.AreEqual(cmd[0].GetAction(), RegisterCommand.Register.GetAction
                                                ());
        }
Exemple #16
0
 public virtual void StartUpCluster()
 {
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(ReplFactor).Build();
     NUnit.Framework.Assert.IsNotNull("Failed Cluster Creation", cluster);
     cluster.WaitClusterUp();
     dfs = cluster.GetFileSystem();
     NUnit.Framework.Assert.IsNotNull("Failed to get FileSystem", dfs);
     nn = cluster.GetNameNodeRpc();
     NUnit.Framework.Assert.IsNotNull("Failed to get NameNode", nn);
 }
Exemple #17
0
        public static void SetUp()
        {
            // start a cluster
            Configuration conf = new HdfsConfiguration();

            // High value of replication interval
            // so that blocks remain under-replicated
            conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, 1000);
            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1L);
            conf.SetLong(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1L);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDataNodes).Build();
            cluster.WaitActive();
            fileSys = cluster.GetFileSystem();
            nnRpc   = cluster.GetNameNodeRpc();
        }
        /// <summary>
        /// Run file operations to create edits for all op codes
        /// to be tested.
        /// </summary>
        /// <remarks>
        /// Run file operations to create edits for all op codes
        /// to be tested.
        /// the following op codes are deprecated and therefore not tested:
        /// OP_DATANODE_ADD    ( 5)
        /// OP_DATANODE_REMOVE ( 6)
        /// OP_SET_NS_QUOTA    (11)
        /// OP_CLEAR_NS_QUOTA  (12)
        /// </remarks>
        /// <exception cref="System.IO.IOException"/>
        private CheckpointSignature RunOperations()
        {
            Log.Info("Creating edits by performing fs operations");
            // no check, if it's not it throws an exception which is what we want
            DistributedFileSystem dfs = cluster.GetFileSystem();

            DFSTestUtil.RunOperations(cluster, dfs, cluster.GetConfiguration(0), dfs.GetDefaultBlockSize
                                          (), 0);
            // OP_ROLLING_UPGRADE_START
            cluster.GetNamesystem().GetEditLog().LogStartRollingUpgrade(Time.Now());
            // OP_ROLLING_UPGRADE_FINALIZE
            cluster.GetNamesystem().GetEditLog().LogFinalizeRollingUpgrade(Time.Now());
            // Force a roll so we get an OP_END_LOG_SEGMENT txn
            return(cluster.GetNameNodeRpc().RollEditLog());
        }
        public virtual void TestBlockRecoveryWithLessMetafile()
        {
            Configuration conf = new Configuration();

            conf.Set(DFSConfigKeys.DfsBlockLocalPathAccessUserKey, UserGroupInformation.GetCurrentUser
                         ().GetShortUserName());
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            Path file = new Path("/testRecoveryFile");
            DistributedFileSystem dfs  = cluster.GetFileSystem();
            FSDataOutputStream    @out = dfs.Create(file);
            int count = 0;

            while (count < 2 * 1024 * 1024)
            {
                @out.WriteBytes("Data");
                count += 4;
            }
            @out.Hsync();
            // abort the original stream
            ((DFSOutputStream)@out.GetWrappedStream()).Abort();
            LocatedBlocks locations = cluster.GetNameNodeRpc().GetBlockLocations(file.ToString
                                                                                     (), 0, count);
            ExtendedBlock      block         = locations.Get(0).GetBlock();
            DataNode           dn            = cluster.GetDataNodes()[0];
            BlockLocalPathInfo localPathInfo = dn.GetBlockLocalPathInfo(block, null);
            FilePath           metafile      = new FilePath(localPathInfo.GetMetaPath());

            NUnit.Framework.Assert.IsTrue(metafile.Exists());
            // reduce the block meta file size
            RandomAccessFile raf = new RandomAccessFile(metafile, "rw");

            raf.SetLength(metafile.Length() - 20);
            raf.Close();
            // restart DN to make replica to RWR
            MiniDFSCluster.DataNodeProperties dnProp = cluster.StopDataNode(0);
            cluster.RestartDataNode(dnProp, true);
            // try to recover the lease
            DistributedFileSystem newdfs = (DistributedFileSystem)FileSystem.NewInstance(cluster
                                                                                         .GetConfiguration(0));

            count = 0;
            while (++count < 10 && !newdfs.RecoverLease(file))
            {
                Sharpen.Thread.Sleep(1000);
            }
            NUnit.Framework.Assert.IsTrue("File should be closed", newdfs.RecoverLease(file));
        }
Exemple #20
0
        public virtual void TestBlockHasMultipleReplicasOnSameDN()
        {
            string filename = MakeFileName(GenericTestUtils.GetMethodName());
            Path   filePath = new Path(filename);

            // Write out a file with a few blocks.
            DFSTestUtil.CreateFile(fs, filePath, BlockSize, BlockSize * NumBlocks, BlockSize,
                                   NumDatanodes, seed);
            // Get the block list for the file with the block locations.
            LocatedBlocks locatedBlocks = client.GetLocatedBlocks(filePath.ToString(), 0, BlockSize
                                                                  * NumBlocks);
            // Generate a fake block report from one of the DataNodes, such
            // that it reports one copy of each block on either storage.
            DataNode             dn    = cluster.GetDataNodes()[0];
            DatanodeRegistration dnReg = dn.GetDNRegistrationForBP(bpid);

            StorageBlockReport[] reports = new StorageBlockReport[cluster.GetStoragesPerDatanode
                                                                      ()];
            AList <Replica> blocks = new AList <Replica>();

            foreach (LocatedBlock locatedBlock in locatedBlocks.GetLocatedBlocks())
            {
                Block localBlock = locatedBlock.GetBlock().GetLocalBlock();
                blocks.AddItem(new FinalizedReplica(localBlock, null, null));
            }
            BlockListAsLongs bll = BlockListAsLongs.Encode(blocks);

            for (int i = 0; i < cluster.GetStoragesPerDatanode(); ++i)
            {
                FsVolumeSpi     v   = dn.GetFSDataset().GetVolumes()[i];
                DatanodeStorage dns = new DatanodeStorage(v.GetStorageID());
                reports[i] = new StorageBlockReport(dns, bll);
            }
            // Should not assert!
            cluster.GetNameNodeRpc().BlockReport(dnReg, bpid, reports, new BlockReportContext
                                                     (1, 0, Runtime.NanoTime()));
            // Get the block locations once again.
            locatedBlocks = client.GetLocatedBlocks(filename, 0, BlockSize * NumBlocks);
            // Make sure that each block has two replicas, one on each DataNode.
            foreach (LocatedBlock locatedBlock_1 in locatedBlocks.GetLocatedBlocks())
            {
                DatanodeInfo[] locations = locatedBlock_1.GetLocations();
                Assert.AssertThat(locations.Length, IS.Is((int)NumDatanodes));
                Assert.AssertThat(locations[0].GetDatanodeUuid(), CoreMatchers.Not(locations[1].GetDatanodeUuid
                                                                                       ()));
            }
        }
        public virtual void TestMultipleSecondaryCheckpoint()
        {
            SecondaryNameNode secondary = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(config).NumDataNodes(1).ManageNameDfsDirs(false
                                                                                               ).Build();
                cluster.WaitActive();
                secondary = new SecondaryNameNode(config);
                FSImage fsImage = cluster.GetNameNode().GetFSImage();
                PrintStorages(fsImage);
                FileSystem fs       = cluster.GetFileSystem();
                Path       testPath = new Path("/", "test");
                NUnit.Framework.Assert.IsTrue(fs.Mkdirs(testPath));
                PrintStorages(fsImage);
                // Take name1 offline
                InvalidateStorage(fsImage, ImmutableSet.Of(path1));
                // Simulate a 2NN beginning a checkpoint, but not finishing. This will
                // cause name1 to be restored.
                cluster.GetNameNodeRpc().RollEditLog();
                PrintStorages(fsImage);
                // Now another 2NN comes along to do a full checkpoint.
                secondary.DoCheckpoint();
                PrintStorages(fsImage);
                // The created file should still exist in the in-memory FS state after the
                // checkpoint.
                NUnit.Framework.Assert.IsTrue("path exists before restart", fs.Exists(testPath));
                secondary.Shutdown();
                // Restart the NN so it reloads the edits from on-disk.
                cluster.RestartNameNode();
                // The created file should still exist after the restart.
                NUnit.Framework.Assert.IsTrue("path should still exist after restart", fs.Exists(
                                                  testPath));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
                if (secondary != null)
                {
                    secondary.Shutdown();
                }
            }
        }
Exemple #22
0
        public virtual void Setup()
        {
            StaticMapping.ResetMap();
            Configuration conf = new HdfsConfiguration();

            string[] racks = new string[] { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
            string[] hosts = new string[] { "/host0", "/host1", "/host2", "/host3", "/host4" };
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockSize);
            conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, DefaultBlockSize / 2);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(5).Racks(racks).Hosts(hosts
                                                                                          ).Build();
            cluster.WaitActive();
            nameNodeRpc = cluster.GetNameNodeRpc();
            namesystem  = cluster.GetNamesystem();
            perm        = new PermissionStatus("TestDefaultBlockPlacementPolicy", null, FsPermission
                                               .GetDefault());
        }
        // expected
        /// <summary>
        /// Make sure a retry call does not hang because of the exception thrown in the
        /// first call.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestUpdatePipelineWithFailOver()
        {
            cluster.Shutdown();
            nnRpc      = null;
            filesystem = null;
            cluster    = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology
                                                                         ()).NumDataNodes(1).Build();
            cluster.WaitActive();
            NamenodeProtocols ns0      = cluster.GetNameNodeRpc(0);
            ExtendedBlock     oldBlock = new ExtendedBlock();
            ExtendedBlock     newBlock = new ExtendedBlock();

            DatanodeID[] newNodes    = new DatanodeID[2];
            string[]     newStorages = new string[2];
            NewCall();
            try
            {
                ns0.UpdatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
                NUnit.Framework.Assert.Fail("Expect StandbyException from the updatePipeline call"
                                            );
            }
            catch (StandbyException e)
            {
                // expected, since in the beginning both nn are in standby state
                GenericTestUtils.AssertExceptionContains(HAServiceProtocol.HAServiceState.Standby
                                                         .ToString(), e);
            }
            cluster.TransitionToActive(0);
            try
            {
                ns0.UpdatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
            }
            catch (IOException)
            {
            }
        }
        public virtual void TestGetBlockLocations()
        {
            Path root = new Path("/");
            Path file = new Path("/file");

            DFSTestUtil.CreateFile(hdfs, file, Blocksize, Replication, seed);
            // take a snapshot on root
            SnapshotTestHelper.CreateSnapshot(hdfs, root, "s1");
            Path fileInSnapshot = SnapshotTestHelper.GetSnapshotPath(root, "s1", file.GetName
                                                                         ());
            FileStatus status = hdfs.GetFileStatus(fileInSnapshot);

            // make sure we record the size for the file
            NUnit.Framework.Assert.AreEqual(Blocksize, status.GetLen());
            // append data to file
            DFSTestUtil.AppendFile(hdfs, file, Blocksize - 1);
            status = hdfs.GetFileStatus(fileInSnapshot);
            // the size of snapshot file should still be BLOCKSIZE
            NUnit.Framework.Assert.AreEqual(Blocksize, status.GetLen());
            // the size of the file should be (2 * BLOCKSIZE - 1)
            status = hdfs.GetFileStatus(file);
            NUnit.Framework.Assert.AreEqual(Blocksize * 2 - 1, status.GetLen());
            // call DFSClient#callGetBlockLocations for the file in snapshot
            LocatedBlocks blocks = DFSClientAdapter.CallGetBlockLocations(cluster.GetNameNodeRpc
                                                                              (), fileInSnapshot.ToString(), 0, long.MaxValue);
            IList <LocatedBlock> blockList = blocks.GetLocatedBlocks();

            // should be only one block
            NUnit.Framework.Assert.AreEqual(Blocksize, blocks.GetFileLength());
            NUnit.Framework.Assert.AreEqual(1, blockList.Count);
            // check the last block
            LocatedBlock lastBlock = blocks.GetLastLocatedBlock();

            NUnit.Framework.Assert.AreEqual(0, lastBlock.GetStartOffset());
            NUnit.Framework.Assert.AreEqual(Blocksize, lastBlock.GetBlockSize());
            // take another snapshot
            SnapshotTestHelper.CreateSnapshot(hdfs, root, "s2");
            Path fileInSnapshot2 = SnapshotTestHelper.GetSnapshotPath(root, "s2", file.GetName
                                                                          ());
            // append data to file without closing
            HdfsDataOutputStream @out = AppendFileWithoutClosing(file, Blocksize);

            @out.Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag.UpdateLength));
            status = hdfs.GetFileStatus(fileInSnapshot2);
            // the size of snapshot file should be BLOCKSIZE*2-1
            NUnit.Framework.Assert.AreEqual(Blocksize * 2 - 1, status.GetLen());
            // the size of the file should be (3 * BLOCKSIZE - 1)
            status = hdfs.GetFileStatus(file);
            NUnit.Framework.Assert.AreEqual(Blocksize * 3 - 1, status.GetLen());
            blocks = DFSClientAdapter.CallGetBlockLocations(cluster.GetNameNodeRpc(), fileInSnapshot2
                                                            .ToString(), 0, long.MaxValue);
            NUnit.Framework.Assert.IsFalse(blocks.IsUnderConstruction());
            NUnit.Framework.Assert.IsTrue(blocks.IsLastBlockComplete());
            blockList = blocks.GetLocatedBlocks();
            // should be 2 blocks
            NUnit.Framework.Assert.AreEqual(Blocksize * 2 - 1, blocks.GetFileLength());
            NUnit.Framework.Assert.AreEqual(2, blockList.Count);
            // check the last block
            lastBlock = blocks.GetLastLocatedBlock();
            NUnit.Framework.Assert.AreEqual(Blocksize, lastBlock.GetStartOffset());
            NUnit.Framework.Assert.AreEqual(Blocksize, lastBlock.GetBlockSize());
            blocks = DFSClientAdapter.CallGetBlockLocations(cluster.GetNameNodeRpc(), fileInSnapshot2
                                                            .ToString(), Blocksize, 0);
            blockList = blocks.GetLocatedBlocks();
            NUnit.Framework.Assert.AreEqual(1, blockList.Count);
            // check blocks for file being written
            blocks = DFSClientAdapter.CallGetBlockLocations(cluster.GetNameNodeRpc(), file.ToString
                                                                (), 0, long.MaxValue);
            blockList = blocks.GetLocatedBlocks();
            NUnit.Framework.Assert.AreEqual(3, blockList.Count);
            NUnit.Framework.Assert.IsTrue(blocks.IsUnderConstruction());
            NUnit.Framework.Assert.IsFalse(blocks.IsLastBlockComplete());
            lastBlock = blocks.GetLastLocatedBlock();
            NUnit.Framework.Assert.AreEqual(Blocksize * 2, lastBlock.GetStartOffset());
            NUnit.Framework.Assert.AreEqual(Blocksize - 1, lastBlock.GetBlockSize());
            @out.Close();
        }
        public virtual void TestBlockSynchronization()
        {
            int           OrgFileSize = 3000;
            Configuration conf        = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(5).Build();
            cluster.WaitActive();
            //create a file
            DistributedFileSystem dfs = cluster.GetFileSystem();
            string filestr            = "/foo";
            Path   filepath           = new Path(filestr);

            DFSTestUtil.CreateFile(dfs, filepath, OrgFileSize, ReplicationNum, 0L);
            NUnit.Framework.Assert.IsTrue(dfs.Exists(filepath));
            DFSTestUtil.WaitReplication(dfs, filepath, ReplicationNum);
            //get block info for the last block
            LocatedBlock locatedblock = TestInterDatanodeProtocol.GetLastLocatedBlock(dfs.dfs
                                                                                      .GetNamenode(), filestr);

            DatanodeInfo[] datanodeinfos = locatedblock.GetLocations();
            NUnit.Framework.Assert.AreEqual(ReplicationNum, datanodeinfos.Length);
            //connect to data nodes
            DataNode[] datanodes = new DataNode[ReplicationNum];
            for (int i = 0; i < ReplicationNum; i++)
            {
                datanodes[i] = cluster.GetDataNode(datanodeinfos[i].GetIpcPort());
                NUnit.Framework.Assert.IsTrue(datanodes[i] != null);
            }
            //verify Block Info
            ExtendedBlock lastblock = locatedblock.GetBlock();

            DataNode.Log.Info("newblocks=" + lastblock);
            for (int i_1 = 0; i_1 < ReplicationNum; i_1++)
            {
                CheckMetaInfo(lastblock, datanodes[i_1]);
            }
            DataNode.Log.Info("dfs.dfs.clientName=" + dfs.dfs.clientName);
            cluster.GetNameNodeRpc().Append(filestr, dfs.dfs.clientName, new EnumSetWritable <
                                                CreateFlag>(EnumSet.Of(CreateFlag.Append)));
            // expire lease to trigger block recovery.
            WaitLeaseRecovery(cluster);
            Block[] updatedmetainfo = new Block[ReplicationNum];
            long    oldSize         = lastblock.GetNumBytes();

            lastblock = TestInterDatanodeProtocol.GetLastLocatedBlock(dfs.dfs.GetNamenode(),
                                                                      filestr).GetBlock();
            long currentGS = lastblock.GetGenerationStamp();

            for (int i_2 = 0; i_2 < ReplicationNum; i_2++)
            {
                updatedmetainfo[i_2] = DataNodeTestUtils.GetFSDataset(datanodes[i_2]).GetStoredBlock
                                           (lastblock.GetBlockPoolId(), lastblock.GetBlockId());
                NUnit.Framework.Assert.AreEqual(lastblock.GetBlockId(), updatedmetainfo[i_2].GetBlockId
                                                    ());
                NUnit.Framework.Assert.AreEqual(oldSize, updatedmetainfo[i_2].GetNumBytes());
                NUnit.Framework.Assert.AreEqual(currentGS, updatedmetainfo[i_2].GetGenerationStamp
                                                    ());
            }
            // verify that lease recovery does not occur when namenode is in safemode
            System.Console.Out.WriteLine("Testing that lease recovery cannot happen during safemode."
                                         );
            filestr  = "/foo.safemode";
            filepath = new Path(filestr);
            dfs.Create(filepath, (short)1);
            cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter,
                                                 false);
            NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(filestr));
            DFSTestUtil.WaitReplication(dfs, filepath, (short)1);
            WaitLeaseRecovery(cluster);
            // verify that we still cannot recover the lease
            LeaseManager lm = NameNodeAdapter.GetLeaseManager(cluster.GetNamesystem());

            NUnit.Framework.Assert.IsTrue("Found " + lm.CountLease() + " lease, expected 1",
                                          lm.CountLease() == 1);
            cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave,
                                                 false);
        }
Exemple #26
0
        public virtual void TestVolumeFailure()
        {
            System.Console.Out.WriteLine("Data dir: is " + dataDir.GetPath());
            // Data dir structure is dataDir/data[1-4]/[current,tmp...]
            // data1,2 is for datanode 1, data2,3 - datanode2
            string filename = "/test.txt";
            Path   filePath = new Path(filename);
            // we use only small number of blocks to avoid creating subdirs in the data dir..
            int filesize = block_size * blocks_num;

            DFSTestUtil.CreateFile(fs, filePath, filesize, repl, 1L);
            DFSTestUtil.WaitReplication(fs, filePath, repl);
            System.Console.Out.WriteLine("file " + filename + "(size " + filesize + ") is created and replicated"
                                         );
            // fail the volume
            // delete/make non-writable one of the directories (failed volume)
            data_fail = new FilePath(dataDir, "data3");
            failedDir = MiniDFSCluster.GetFinalizedDir(dataDir, cluster.GetNamesystem().GetBlockPoolId
                                                           ());
            if (failedDir.Exists() && !DeteteBlocks(failedDir))
            {
                //!FileUtil.fullyDelete(failedDir)
                throw new IOException("Could not delete hdfs directory '" + failedDir + "'");
            }
            data_fail.SetReadOnly();
            failedDir.SetReadOnly();
            System.Console.Out.WriteLine("Deleteing " + failedDir.GetPath() + "; exist=" + failedDir
                                         .Exists());
            // access all the blocks on the "failed" DataNode,
            // we need to make sure that the "failed" volume is being accessed -
            // and that will cause failure, blocks removal, "emergency" block report
            TriggerFailure(filename, filesize);
            // make sure a block report is sent
            DataNode dn = cluster.GetDataNodes()[1];
            //corresponds to dir data3
            string bpid = cluster.GetNamesystem().GetBlockPoolId();
            DatanodeRegistration dnR = dn.GetDNRegistrationForBP(bpid);
            IDictionary <DatanodeStorage, BlockListAsLongs> perVolumeBlockLists = dn.GetFSDataset
                                                                                      ().GetBlockReports(bpid);

            // Send block report
            StorageBlockReport[] reports = new StorageBlockReport[perVolumeBlockLists.Count];
            int reportIndex = 0;

            foreach (KeyValuePair <DatanodeStorage, BlockListAsLongs> kvPair in perVolumeBlockLists)
            {
                DatanodeStorage  dnStorage = kvPair.Key;
                BlockListAsLongs blockList = kvPair.Value;
                reports[reportIndex++] = new StorageBlockReport(dnStorage, blockList);
            }
            cluster.GetNameNodeRpc().BlockReport(dnR, bpid, reports, null);
            // verify number of blocks and files...
            Verify(filename, filesize);
            // create another file (with one volume failed).
            System.Console.Out.WriteLine("creating file test1.txt");
            Path fileName1 = new Path("/test1.txt");

            DFSTestUtil.CreateFile(fs, fileName1, filesize, repl, 1L);
            // should be able to replicate to both nodes (2 DN, repl=2)
            DFSTestUtil.WaitReplication(fs, fileName1, repl);
            System.Console.Out.WriteLine("file " + fileName1.GetName() + " is created and replicated"
                                         );
        }
        private static string GetServiceRpcServerAddress(MiniDFSCluster cluster)
        {
            NameNodeRpcServer rpcServer = (NameNodeRpcServer)cluster.GetNameNodeRpc();

            return(rpcServer.GetServiceRpcServer().GetListenerAddress().Address.ToString());
        }