예제 #1
0
        /// <summary>for snapshot file while adding a new file after snapshot.</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestSnapshotPathINodesWithAddedFile()
        {
            // Create a snapshot for the dir, and check the inodes for the path
            // pointing to a snapshot file
            hdfs.AllowSnapshot(sub1);
            hdfs.CreateSnapshot(sub1, "s4");
            // Add a new file /TestSnapshot/sub1/file3
            Path file3 = new Path(sub1, "file3");

            DFSTestUtil.CreateFile(hdfs, file3, 1024, Replication, seed);
            Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s4;
            {
                // Check the inodes for /TestSnapshot/sub1/.snapshot/s4/file3
                string       snapshotPath = sub1.ToString() + "/.snapshot/s4/file3";
                string[]     names        = INode.GetPathNames(snapshotPath);
                byte[][]     components   = INode.GetPathComponents(names);
                INodesInPath nodesInPath  = INodesInPath.Resolve(fsdir.rootDir, components, false);
                // Length of inodes should be (components.length - 1), since we will ignore
                // ".snapshot"
                NUnit.Framework.Assert.AreEqual(nodesInPath.Length(), components.Length - 1);
                // The number of non-null inodes should be components.length - 2, since
                // snapshot of file3 does not exist
                NUnit.Framework.Assert.AreEqual(GetNumNonNull(nodesInPath), components.Length - 2
                                                );
                s4 = GetSnapshot(nodesInPath, "s4", 3);
                // SnapshotRootIndex should still be 3: {root, Testsnapshot, sub1, s4, null}
                AssertSnapshot(nodesInPath, true, s4, 3);
                // Check the last INode in inodes, which should be null
                NUnit.Framework.Assert.IsNull(nodesInPath.GetINode(nodesInPath.Length() - 1));
            }
            // Check the inodes for /TestSnapshot/sub1/file3
            string[]     names_1       = INode.GetPathNames(file3.ToString());
            byte[][]     components_1  = INode.GetPathComponents(names_1);
            INodesInPath nodesInPath_1 = INodesInPath.Resolve(fsdir.rootDir, components_1, false
                                                              );

            // The number of inodes should be equal to components.length
            NUnit.Framework.Assert.AreEqual(nodesInPath_1.Length(), components_1.Length);
            // The returned nodesInPath should be non-snapshot
            AssertSnapshot(nodesInPath_1, false, s4, -1);
            // The last INode should be associated with file3
            NUnit.Framework.Assert.AreEqual(nodesInPath_1.GetINode(components_1.Length - 1).GetFullPathName
                                                (), file3.ToString());
            NUnit.Framework.Assert.AreEqual(nodesInPath_1.GetINode(components_1.Length - 2).GetFullPathName
                                                (), sub1.ToString());
            NUnit.Framework.Assert.AreEqual(nodesInPath_1.GetINode(components_1.Length - 3).GetFullPathName
                                                (), dir.ToString());
            hdfs.DeleteSnapshot(sub1, "s4");
            hdfs.DisallowSnapshot(sub1);
        }
예제 #2
0
        public virtual void TestStablePositionAfterCorruptRead()
        {
            short             ReplFactor = 1;
            long              FileLength = 512L;
            HdfsConfiguration conf       = GetConfiguration(null);
            MiniDFSCluster    cluster    = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.WaitActive();
            FileSystem fs   = cluster.GetFileSystem();
            Path       path = new Path("/corrupted");

            DFSTestUtil.CreateFile(fs, path, FileLength, ReplFactor, 12345L);
            DFSTestUtil.WaitReplication(fs, path, ReplFactor);
            ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, path);
            int           blockFilesCorrupted = cluster.CorruptBlockOnDataNodes(block);

            NUnit.Framework.Assert.AreEqual("All replicas not corrupted", ReplFactor, blockFilesCorrupted
                                            );
            FSDataInputStream dis = cluster.GetFileSystem().Open(path);
            ByteBuffer        buf = ByteBuffer.AllocateDirect((int)FileLength);
            bool sawException     = false;

            try
            {
                dis.Read(buf);
            }
            catch (ChecksumException)
            {
                sawException = true;
            }
            NUnit.Framework.Assert.IsTrue(sawException);
            NUnit.Framework.Assert.AreEqual(0, buf.Position());
            NUnit.Framework.Assert.AreEqual(buf.Capacity(), buf.Limit());
            dis = cluster.GetFileSystem().Open(path);
            buf.Position(3);
            buf.Limit(25);
            sawException = false;
            try
            {
                dis.Read(buf);
            }
            catch (ChecksumException)
            {
                sawException = true;
            }
            NUnit.Framework.Assert.IsTrue(sawException);
            NUnit.Framework.Assert.AreEqual(3, buf.Position());
            NUnit.Framework.Assert.AreEqual(25, buf.Limit());
            cluster.Shutdown();
        }
예제 #3
0
        private LocatedBlock CreateLocatedBlockNoStorageMedia()
        {
            DatanodeInfo[] dnInfos = new DatanodeInfo[] { DFSTestUtil.GetLocalDatanodeInfo("127.0.0.1"
                                                                                           , "h1", DatanodeInfo.AdminStates.DecommissionInprogress), DFSTestUtil.GetLocalDatanodeInfo
                                                              ("127.0.0.1", "h2", DatanodeInfo.AdminStates.Decommissioned), DFSTestUtil.GetLocalDatanodeInfo
                                                              ("127.0.0.1", "h3", DatanodeInfo.AdminStates.Normal) };
            LocatedBlock lb = new LocatedBlock(new ExtendedBlock("bp12", 12345, 10, 53), dnInfos
                                               , 5, false);

            lb.SetBlockToken(new Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier>
                                 (Sharpen.Runtime.GetBytesForString("identifier"), Sharpen.Runtime.GetBytesForString
                                     ("password"), new Text("kind"), new Text("service")));
            return(lb);
        }
        public virtual void TestWriteNewFile()
        {
            OutputStream fos = new AtomicFileOutputStream(DstFile);

            NUnit.Framework.Assert.IsFalse(DstFile.Exists());
            fos.Write(Sharpen.Runtime.GetBytesForString(TestString));
            fos.Flush();
            NUnit.Framework.Assert.IsFalse(DstFile.Exists());
            fos.Close();
            NUnit.Framework.Assert.IsTrue(DstFile.Exists());
            string readBackData = DFSTestUtil.ReadFile(DstFile);

            NUnit.Framework.Assert.AreEqual(TestString, readBackData);
        }
예제 #5
0
        public virtual void TestUnbufferClosesSockets()
        {
            Configuration conf = new Configuration();

            // Set a new ClientContext.  This way, we will have our own PeerCache,
            // rather than sharing one with other unit tests.
            conf.Set(DFSConfigKeys.DfsClientContext, "testUnbufferClosesSocketsContext");
            // Disable short-circuit reads.  With short-circuit, we wouldn't hold open a
            // TCP socket.
            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, false);
            // Set a really long socket timeout to avoid test timing issues.
            conf.SetLong(DFSConfigKeys.DfsClientSocketTimeoutKey, 100000000L);
            conf.SetLong(DFSConfigKeys.DfsClientSocketCacheExpiryMsecKey, 100000000L);
            MiniDFSCluster    cluster = null;
            FSDataInputStream stream  = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).Build();
                DistributedFileSystem dfs = (DistributedFileSystem)FileSystem.NewInstance(conf);
                Path TestPath             = new Path("/test1");
                DFSTestUtil.CreateFile(dfs, TestPath, 128, (short)1, 1);
                stream = dfs.Open(TestPath);
                // Read a byte.  This will trigger the creation of a block reader.
                stream.Seek(2);
                int b = stream.Read();
                NUnit.Framework.Assert.IsTrue(-1 != b);
                // The Peer cache should start off empty.
                PeerCache cache = dfs.GetClient().GetClientContext().GetPeerCache();
                NUnit.Framework.Assert.AreEqual(0, cache.Size());
                // Unbuffer should clear the block reader and return the socket to the
                // cache.
                stream.Unbuffer();
                stream.Seek(2);
                NUnit.Framework.Assert.AreEqual(1, cache.Size());
                int b2 = stream.Read();
                NUnit.Framework.Assert.AreEqual(b, b2);
            }
            finally
            {
                if (stream != null)
                {
                    IOUtils.Cleanup(null, stream);
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #6
0
        /// <exception cref="System.Exception"/>
        public virtual void TestDatanodeReRegistration()
        {
            // Create a test file
            DistributedFileSystem dfs = cluster.GetFileSystem();
            Path path = new Path("/testRR");

            // Create a file and shutdown the DNs, which populates InvalidateBlocks
            DFSTestUtil.CreateFile(dfs, path, dfs.GetDefaultBlockSize(), (short)NumOfDatanodes
                                   , unchecked ((int)(0xED0ED0)));
            foreach (DataNode dn in cluster.GetDataNodes())
            {
                dn.Shutdown();
            }
            dfs.Delete(path, false);
            namesystem.WriteLock();
            InvalidateBlocks invalidateBlocks;
            int expected = NumOfDatanodes;

            try
            {
                invalidateBlocks = (InvalidateBlocks)Whitebox.GetInternalState(cluster.GetNamesystem
                                                                                   ().GetBlockManager(), "invalidateBlocks");
                NUnit.Framework.Assert.AreEqual("Expected invalidate blocks to be the number of DNs"
                                                , (long)expected, invalidateBlocks.NumBlocks());
            }
            finally
            {
                namesystem.WriteUnlock();
            }
            // Re-register each DN and see that it wipes the invalidation work
            foreach (DataNode dn_1 in cluster.GetDataNodes())
            {
                DatanodeID           did = dn_1.GetDatanodeId();
                DatanodeRegistration reg = new DatanodeRegistration(new DatanodeID(UUID.RandomUUID
                                                                                       ().ToString(), did), new StorageInfo(HdfsServerConstants.NodeType.DataNode), new
                                                                    ExportedBlockKeys(), VersionInfo.GetVersion());
                namesystem.WriteLock();
                try
                {
                    bm.GetDatanodeManager().RegisterDatanode(reg);
                    expected--;
                    NUnit.Framework.Assert.AreEqual("Expected number of invalidate blocks to decrease"
                                                    , (long)expected, invalidateBlocks.NumBlocks());
                }
                finally
                {
                    namesystem.WriteUnlock();
                }
            }
        }
예제 #7
0
        /// <exception cref="System.Exception"/>
        private void TestBadBlockReportOnTransfer(bool corruptBlockByDeletingBlockFile)
        {
            Configuration  conf         = new HdfsConfiguration();
            FileSystem     fs           = null;
            DFSClient      dfsClient    = null;
            LocatedBlocks  blocks       = null;
            int            replicaCount = 0;
            short          replFactor   = 1;
            MiniDFSCluster cluster      = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();

            cluster.WaitActive();
            fs        = cluster.GetFileSystem();
            dfsClient = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()),
                                      conf);
            // Create file with replication factor of 1
            Path file1 = new Path("/tmp/testBadBlockReportOnTransfer/file1");

            DFSTestUtil.CreateFile(fs, file1, 1024, replFactor, 0);
            DFSTestUtil.WaitReplication(fs, file1, replFactor);
            // Corrupt the block belonging to the created file
            ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, file1);
            int           blockFilesCorrupted = corruptBlockByDeletingBlockFile ? cluster.CorruptBlockOnDataNodesByDeletingBlockFile
                                                    (block) : cluster.CorruptBlockOnDataNodes(block);

            NUnit.Framework.Assert.AreEqual("Corrupted too few blocks", replFactor, blockFilesCorrupted
                                            );
            // Increase replication factor, this should invoke transfer request
            // Receiving datanode fails on checksum and reports it to namenode
            replFactor = 2;
            fs.SetReplication(file1, replFactor);
            // Now get block details and check if the block is corrupt
            blocks = dfsClient.GetNamenode().GetBlockLocations(file1.ToString(), 0, long.MaxValue
                                                               );
            while (blocks.Get(0).IsCorrupt() != true)
            {
                try
                {
                    Log.Info("Waiting until block is marked as corrupt...");
                    Sharpen.Thread.Sleep(1000);
                }
                catch (Exception)
                {
                }
                blocks = dfsClient.GetNamenode().GetBlockLocations(file1.ToString(), 0, long.MaxValue
                                                                   );
            }
            replicaCount = blocks.Get(0).GetLocations().Length;
            NUnit.Framework.Assert.IsTrue(replicaCount == 1);
            cluster.Shutdown();
        }
예제 #8
0
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeWithFileCreateRename()
        {
            Path foo = new Path(dir, "foo");

            dfs.Mkdirs(foo);
            Path createdFile1foo = new Path(foo, "created_file1.data");
            Path bar             = new Path(dir, "bar");

            dfs.Mkdirs(bar);
            Path createdFile1bar = new Path(bar, "created_file1.data");

            // set storage policy on directory "foo" and "bar" to ONESSD
            dfs.SetStoragePolicy(foo, HdfsConstants.OnessdStoragePolicyName);
            dfs.SetStoragePolicy(bar, HdfsConstants.OnessdStoragePolicyName);
            // set quota by storage type on directory "foo"
            dfs.SetQuotaByStorageType(foo, StorageType.Ssd, Blocksize * 4);
            dfs.SetQuotaByStorageType(bar, StorageType.Ssd, Blocksize * 2);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            // Create file of size 3 * BLOCKSIZE under directory "foo"
            long file1Len = Blocksize * 3;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1foo, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            // Verify space consumed and remaining quota
            long ssdConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                   ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumed);
            // move file from foo to bar
            try
            {
                dfs.Rename(createdFile1foo, createdFile1bar);
                NUnit.Framework.Assert.Fail("Should have failed with QuotaByStorageTypeExceededException "
                                            );
            }
            catch (Exception t)
            {
                Log.Info("Got expected exception ", t);
            }
            ContentSummary cs = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(cs.GetSpaceConsumed(), file1Len * Replication);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Ssd), file1Len);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Disk), file1Len *
                                            2);
        }
예제 #9
0
        /// <summary>
        /// Test if the quota can be correctly updated for create file even
        /// QuotaByStorageTypeExceededException is thrown
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeExceptionWithFileCreate()
        {
            Path foo          = new Path(dir, "foo");
            Path createdFile1 = new Path(foo, "created_file1.data");

            dfs.Mkdirs(foo);
            dfs.SetStoragePolicy(foo, HdfsConstants.OnessdStoragePolicyName);
            dfs.SetQuotaByStorageType(foo, StorageType.Ssd, Blocksize * 4);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            // Create the 1st file of size 2 * BLOCKSIZE under directory "foo" and expect no exception
            long file1Len = Blocksize * 2;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            long currentSSDConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                          ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, currentSSDConsumed);
            // Create the 2nd file of size 1.5 * BLOCKSIZE under directory "foo" and expect no exception
            Path createdFile2 = new Path(foo, "created_file2.data");
            long file2Len     = Blocksize + Blocksize / 2;

            DFSTestUtil.CreateFile(dfs, createdFile2, bufLen, file2Len, Blocksize, Replication
                                   , seed);
            currentSSDConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                     ().GetTypeSpaces().Get(StorageType.Ssd);
            NUnit.Framework.Assert.AreEqual(file1Len + file2Len, currentSSDConsumed);
            // Create the 3rd file of size BLOCKSIZE under directory "foo" and expect quota exceeded exception
            Path createdFile3 = new Path(foo, "created_file3.data");
            long file3Len     = Blocksize;

            try
            {
                DFSTestUtil.CreateFile(dfs, createdFile3, bufLen, file3Len, Blocksize, Replication
                                       , seed);
                NUnit.Framework.Assert.Fail("Should have failed with QuotaByStorageTypeExceededException "
                                            );
            }
            catch (Exception t)
            {
                Log.Info("Got expected exception ", t);
                currentSSDConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                         ().GetTypeSpaces().Get(StorageType.Ssd);
                NUnit.Framework.Assert.AreEqual(file1Len + file2Len, currentSSDConsumed);
            }
        }
예제 #10
0
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        /// <exception cref="Sharpen.TimeoutException"/>
        private void ChangeBlockLen(MiniDFSCluster cluster, int lenDelta)
        {
            Path       fileName          = new Path("/file1");
            short      ReplicationFactor = (short)1;
            FileSystem fs      = cluster.GetFileSystem();
            int        fileLen = fs.GetConf().GetInt(DFSConfigKeys.DfsBytesPerChecksumKey, 512);

            DFSTestUtil.CreateFile(fs, fileName, fileLen, ReplicationFactor, 0);
            DFSTestUtil.WaitReplication(fs, fileName, ReplicationFactor);
            ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, fileName);

            // Change the length of a replica
            for (int i = 0; i < cluster.GetDataNodes().Count; i++)
            {
                if (DFSTestUtil.ChangeReplicaLength(cluster, block, i, lenDelta))
                {
                    break;
                }
            }
            // increase the file's replication factor
            fs.SetReplication(fileName, (short)(ReplicationFactor + 1));
            // block replication triggers corrupt block detection
            DFSClient dfsClient = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort
                                                                   ()), fs.GetConf());
            LocatedBlocks blocks = dfsClient.GetNamenode().GetBlockLocations(fileName.ToString
                                                                                 (), 0, fileLen);

            if (lenDelta < 0)
            {
                // replica truncated
                while (!blocks.Get(0).IsCorrupt() || ReplicationFactor != blocks.Get(0).GetLocations
                           ().Length)
                {
                    Sharpen.Thread.Sleep(100);
                    blocks = dfsClient.GetNamenode().GetBlockLocations(fileName.ToString(), 0, fileLen
                                                                       );
                }
            }
            else
            {
                // no corruption detected; block replicated
                while (ReplicationFactor + 1 != blocks.Get(0).GetLocations().Length)
                {
                    Sharpen.Thread.Sleep(100);
                    blocks = dfsClient.GetNamenode().GetBlockLocations(fileName.ToString(), 0, fileLen
                                                                       );
                }
            }
            fs.Delete(fileName, true);
        }
        /// <exception cref="System.IO.IOException"/>
        private LocatedBlocks CreateFileGetBlocks(string filenamePrefix)
        {
            Path filePath = new Path("/" + filenamePrefix + ".dat");

            // Write out a file with a few blocks, get block locations.
            DFSTestUtil.CreateFile(fs, filePath, BlockSize, BlockSize * NumBlocks, BlockSize,
                                   NumDatanodes, seed);
            // Get the block list for the file with the block locations.
            LocatedBlocks blocks = client.GetLocatedBlocks(filePath.ToString(), 0, BlockSize
                                                           * NumBlocks);

            Assert.AssertThat(cluster.GetNamesystem().GetUnderReplicatedBlocks(), IS.Is(0L));
            return(blocks);
        }
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="Sharpen.TimeoutException"/>
        /// <exception cref="System.Exception"/>
        internal static void WriteFile(Configuration conf, Path name, short replication,
                                       int numBlocks)
        {
            FileSystem         fileSys = FileSystem.Get(conf);
            FSDataOutputStream stm     = fileSys.Create(name, true, conf.GetInt("io.file.buffer.size"
                                                                                , 4096), replication, (long)Blocksize);

            for (int i = 0; i < numBlocks; i++)
            {
                stm.Write(databuf);
            }
            stm.Close();
            DFSTestUtil.WaitReplication(fileSys, name, replication);
        }
예제 #13
0
        public virtual void TestEarlierVersionEditLog()
        {
            Configuration conf    = new HdfsConfiguration();
            string        tarFile = Runtime.GetProperty("test.cache.data", "build/test/cache") + "/"
                                    + Hadoop10MultiblockTgz;
            string   testDir = PathUtils.GetTestDirName(GetType());
            FilePath dfsDir  = new FilePath(testDir, "image-1.0");

            if (dfsDir.Exists() && !FileUtil.FullyDelete(dfsDir))
            {
                throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
            }
            FileUtil.UnTar(new FilePath(tarFile), new FilePath(testDir));
            FilePath nameDir = new FilePath(dfsDir, "name");

            GenericTestUtils.AssertExists(nameDir);
            FilePath dataDir = new FilePath(dfsDir, "data");

            GenericTestUtils.AssertExists(dataDir);
            conf.Set(DFSConfigKeys.DfsNamenodeNameDirKey, nameDir.GetAbsolutePath());
            conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dataDir.GetAbsolutePath());
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Format(
                false).ManageDataDfsDirs(false).ManageNameDfsDirs(false).NumDataNodes(1).StartupOption
                                         (HdfsServerConstants.StartupOption.Upgrade).Build();

            try
            {
                FileSystem fs       = cluster.GetFileSystem();
                Path       testPath = new Path("/user/todd/4blocks");
                // Read it without caring about the actual data within - we just need
                // to make sure that the block states and locations are OK.
                DFSTestUtil.ReadFile(fs, testPath);
                // Ensure that we can append to it - if the blocks were in some funny
                // state we'd get some kind of issue here.
                FSDataOutputStream stm = fs.Append(testPath);
                try
                {
                    stm.Write(1);
                }
                finally
                {
                    IOUtils.CloseStream(stm);
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        /// <summary>
        /// The corrupt block has to be removed when the number of valid replicas
        /// matches replication factor for the file.
        /// </summary>
        /// <remarks>
        /// The corrupt block has to be removed when the number of valid replicas
        /// matches replication factor for the file. The above condition should hold
        /// true as long as there is one good replica. This test verifies that.
        /// The test strategy :
        /// Bring up Cluster with 2 DataNodes
        /// Create a file of replication factor 2
        /// Corrupt one replica of a block of the file
        /// Verify that there is  one good replicas and 1 corrupt replica
        /// (corrupt replica should not be removed since number of good
        /// replicas (1) is less than replication factor (2)).
        /// Set the replication factor to 1
        /// Verify that the corrupt replica is removed.
        /// (corrupt replica should  be removed since number of good
        /// replicas (1) is equal to replication factor (1))
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestWithReplicationFactorAsOne()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 1000L);
            conf.Set(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, Sharpen.Extensions.ToString
                         (2));
            MiniDFSCluster cluster    = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            FileSystem     fs         = cluster.GetFileSystem();
            FSNamesystem   namesystem = cluster.GetNamesystem();

            try
            {
                Path fileName = new Path("/foo1");
                DFSTestUtil.CreateFile(fs, fileName, 2, (short)2, 0L);
                DFSTestUtil.WaitReplication(fs, fileName, (short)2);
                ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, fileName);
                CorruptBlock(cluster, fs, fileName, 0, block);
                DFSTestUtil.WaitReplication(fs, fileName, (short)1);
                NUnit.Framework.Assert.AreEqual(1, CountReplicas(namesystem, block).LiveReplicas(
                                                    ));
                NUnit.Framework.Assert.AreEqual(1, CountReplicas(namesystem, block).CorruptReplicas
                                                    ());
                namesystem.SetReplication(fileName.ToString(), (short)1);
                // wait for 3 seconds so that all block reports are processed.
                for (int i = 0; i < 10; i++)
                {
                    try
                    {
                        Sharpen.Thread.Sleep(1000);
                    }
                    catch (Exception)
                    {
                    }
                    if (CountReplicas(namesystem, block).CorruptReplicas() == 0)
                    {
                        break;
                    }
                }
                NUnit.Framework.Assert.AreEqual(1, CountReplicas(namesystem, block).LiveReplicas(
                                                    ));
                NUnit.Framework.Assert.AreEqual(0, CountReplicas(namesystem, block).CorruptReplicas
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
예제 #15
0
        /// <summary>Test the fsimage saving/loading while file appending.</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestSaveLoadImageWithAppending()
        {
            Path sub1      = new Path(dir, "sub1");
            Path sub1file1 = new Path(sub1, "sub1file1");
            Path sub1file2 = new Path(sub1, "sub1file2");

            DFSTestUtil.CreateFile(hdfs, sub1file1, Blocksize, Replication, seed);
            DFSTestUtil.CreateFile(hdfs, sub1file2, Blocksize, Replication, seed);
            // 1. create snapshot s0
            hdfs.AllowSnapshot(dir);
            hdfs.CreateSnapshot(dir, "s0");
            // 2. create snapshot s1 before appending sub1file1 finishes
            HdfsDataOutputStream @out = AppendFileWithoutClosing(sub1file1, Blocksize);

            @out.Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag.UpdateLength));
            // also append sub1file2
            DFSTestUtil.AppendFile(hdfs, sub1file2, Blocksize);
            hdfs.CreateSnapshot(dir, "s1");
            @out.Close();
            // 3. create snapshot s2 before appending finishes
            @out = AppendFileWithoutClosing(sub1file1, Blocksize);
            @out.Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag.UpdateLength));
            hdfs.CreateSnapshot(dir, "s2");
            @out.Close();
            // 4. save fsimage before appending finishes
            @out = AppendFileWithoutClosing(sub1file1, Blocksize);
            @out.Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag.UpdateLength));
            // dump fsdir
            FilePath fsnBefore = DumpTree2File("before");
            // save the namesystem to a temp file
            FilePath imageFile = SaveFSImageToTempFile();

            // 5. load fsimage and compare
            // first restart the cluster, and format the cluster
            @out.Close();
            cluster.Shutdown();
            cluster = new MiniDFSCluster.Builder(conf).Format(true).NumDataNodes(Replication)
                      .Build();
            cluster.WaitActive();
            fsn  = cluster.GetNamesystem();
            hdfs = cluster.GetFileSystem();
            // then load the fsimage
            LoadFSImageFromTempFile(imageFile);
            // dump the fsdir tree again
            FilePath fsnAfter = DumpTree2File("after");

            // compare two dumped tree
            SnapshotTestHelper.CompareDumpedTreeInFile(fsnBefore, fsnAfter, true);
        }
예제 #16
0
        public virtual void TestMultipleVolFailuresOnNode()
        {
            // Reinitialize the cluster, configured with 4 storage locations per DataNode
            // and tolerating up to 2 failures.
            TearDown();
            InitCluster(3, 4, 2);
            // Calculate the total capacity of all the datanodes. Sleep for three seconds
            // to be sure the datanodes have had a chance to heartbeat their capacities.
            Sharpen.Thread.Sleep(WaitForHeartbeats);
            DatanodeManager dm = cluster.GetNamesystem().GetBlockManager().GetDatanodeManager
                                     ();
            long     origCapacity = DFSTestUtil.GetLiveDatanodeCapacity(dm);
            long     dnCapacity   = DFSTestUtil.GetDatanodeCapacity(dm, 0);
            FilePath dn1Vol1      = new FilePath(dataDir, "data" + (4 * 0 + 1));
            FilePath dn1Vol2      = new FilePath(dataDir, "data" + (4 * 0 + 2));
            FilePath dn2Vol1      = new FilePath(dataDir, "data" + (4 * 1 + 1));
            FilePath dn2Vol2      = new FilePath(dataDir, "data" + (4 * 1 + 2));

            // Make the first two volume directories on the first two datanodes
            // non-accessible.
            DataNodeTestUtils.InjectDataDirFailure(dn1Vol1, dn1Vol2, dn2Vol1, dn2Vol2);
            // Create file1 and wait for 3 replicas (ie all DNs can still store a block).
            // Then assert that all DNs are up, despite the volume failures.
            Path file1 = new Path("/test1");

            DFSTestUtil.CreateFile(fs, file1, 1024, (short)3, 1L);
            DFSTestUtil.WaitReplication(fs, file1, (short)3);
            AList <DataNode> dns = cluster.GetDataNodes();

            NUnit.Framework.Assert.IsTrue("DN1 should be up", dns[0].IsDatanodeUp());
            NUnit.Framework.Assert.IsTrue("DN2 should be up", dns[1].IsDatanodeUp());
            NUnit.Framework.Assert.IsTrue("DN3 should be up", dns[2].IsDatanodeUp());
            CheckFailuresAtDataNode(dns[0], 1, true, dn1Vol1.GetAbsolutePath(), dn1Vol2.GetAbsolutePath
                                        ());
            CheckFailuresAtDataNode(dns[1], 1, true, dn2Vol1.GetAbsolutePath(), dn2Vol2.GetAbsolutePath
                                        ());
            CheckFailuresAtDataNode(dns[2], 0, true);
            // Ensure we wait a sufficient amount of time
            System.Diagnostics.Debug.Assert((WaitForHeartbeats * 10) > WaitForDeath);
            // Eventually the NN should report four volume failures
            DFSTestUtil.WaitForDatanodeStatus(dm, 3, 0, 4, origCapacity - (1 * dnCapacity), WaitForHeartbeats
                                              );
            CheckAggregateFailuresAtNameNode(true, 4);
            CheckFailuresAtNameNode(dm, dns[0], true, dn1Vol1.GetAbsolutePath(), dn1Vol2.GetAbsolutePath
                                        ());
            CheckFailuresAtNameNode(dm, dns[1], true, dn2Vol1.GetAbsolutePath(), dn2Vol2.GetAbsolutePath
                                        ());
            CheckFailuresAtNameNode(dm, dns[2], true);
        }
예제 #17
0
        /// <summary>TC7: Corrupted replicas are present.</summary>
        /// <exception cref="System.IO.IOException">an exception might be thrown</exception>
        /// <exception cref="System.Exception"/>
        private void TestTC7(bool appendToNewBlock)
        {
            short repl = 2;
            Path  p    = new Path("/TC7/foo" + (appendToNewBlock ? "0" : "1"));

            System.Console.Out.WriteLine("p=" + p);
            //a. Create file with replication factor of 2. Write half block of data. Close file.
            int len1 = (int)(BlockSize / 2);

            {
                FSDataOutputStream @out = fs.Create(p, false, buffersize, repl, BlockSize);
                AppendTestUtil.Write(@out, 0, len1);
                @out.Close();
            }
            DFSTestUtil.WaitReplication(fs, p, repl);
            //b. Log into one datanode that has one replica of this block.
            //   Find the block file on this datanode and truncate it to zero size.
            LocatedBlocks locatedblocks = fs.dfs.GetNamenode().GetBlockLocations(p.ToString()
                                                                                 , 0L, len1);

            NUnit.Framework.Assert.AreEqual(1, locatedblocks.LocatedBlockCount());
            LocatedBlock  lb  = locatedblocks.Get(0);
            ExtendedBlock blk = lb.GetBlock();

            NUnit.Framework.Assert.AreEqual(len1, lb.GetBlockSize());
            DatanodeInfo[] datanodeinfos = lb.GetLocations();
            NUnit.Framework.Assert.AreEqual(repl, datanodeinfos.Length);
            DataNode dn = cluster.GetDataNode(datanodeinfos[0].GetIpcPort());
            FilePath f  = DataNodeTestUtils.GetBlockFile(dn, blk.GetBlockPoolId(), blk.GetLocalBlock
                                                             ());
            RandomAccessFile raf = new RandomAccessFile(f, "rw");

            AppendTestUtil.Log.Info("dn=" + dn + ", blk=" + blk + " (length=" + blk.GetNumBytes
                                        () + ")");
            NUnit.Framework.Assert.AreEqual(len1, raf.Length());
            raf.SetLength(0);
            raf.Close();
            //c. Open file in "append mode".  Append a new block worth of data. Close file.
            int len2 = (int)BlockSize;

            {
                FSDataOutputStream @out = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.Append
                                                                                     , CreateFlag.NewBlock), 4096, null) : fs.Append(p);
                AppendTestUtil.Write(@out, len1, len2);
                @out.Close();
            }
            //d. Reopen file and read two blocks worth of data.
            AppendTestUtil.Check(fs, p, len1 + len2);
        }
예제 #18
0
        public virtual void TestReplDueToNodeFailRespectsRackPolicy()
        {
            Configuration conf = GetConf();
            short         ReplicationFactor = 3;
            Path          filePath          = new Path("/testFile");

            // Last datanode is on a different rack
            string[]       racks   = new string[] { "/rack1", "/rack1", "/rack1", "/rack2", "/rack2" };
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(racks.Length
                                                                                   ).Racks(racks).Build();
            FSNamesystem    ns = cluster.GetNameNode().GetNamesystem();
            DatanodeManager dm = ns.GetBlockManager().GetDatanodeManager();

            try
            {
                // Create a file with one block with a replication factor of 2
                FileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, filePath, 1L, ReplicationFactor, 1L);
                ExtendedBlock b = DFSTestUtil.GetFirstBlock(fs, filePath);
                DFSTestUtil.WaitForReplication(cluster, b, 2, ReplicationFactor, 0);
                // Make the last datanode look like it failed to heartbeat by
                // calling removeDatanode and stopping it.
                AList <DataNode> datanodes = cluster.GetDataNodes();
                int        idx             = datanodes.Count - 1;
                DataNode   dataNode        = datanodes[idx];
                DatanodeID dnId            = dataNode.GetDatanodeId();
                cluster.StopDataNode(idx);
                dm.RemoveDatanode(dnId);
                // The block should still have sufficient # replicas, across racks.
                // The last node may not have contained a replica, but if it did
                // it should have been replicated within the same rack.
                DFSTestUtil.WaitForReplication(cluster, b, 2, ReplicationFactor, 0);
                // Fail the last datanode again, it's also on rack2 so there is
                // only 1 rack for all the replicas
                datanodes = cluster.GetDataNodes();
                idx       = datanodes.Count - 1;
                dataNode  = datanodes[idx];
                dnId      = dataNode.GetDatanodeId();
                cluster.StopDataNode(idx);
                dm.RemoveDatanode(dnId);
                // Make sure we have enough live replicas even though we are
                // short one rack and therefore need one replica
                DFSTestUtil.WaitForReplication(cluster, b, 1, ReplicationFactor, 1);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
예제 #19
0
 public virtual void SetUp()
 {
     conf = new Configuration();
     conf.SetInt(DFSConfigKeys.DfsNamenodeMaxXattrsPerInodeKey, 2);
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(Replication).Build();
     cluster.WaitActive();
     fsn   = cluster.GetNamesystem();
     fsdir = fsn.GetFSDirectory();
     hdfs  = cluster.GetFileSystem();
     DFSTestUtil.CreateFile(hdfs, file1, 1024, Replication, seed);
     DFSTestUtil.CreateFile(hdfs, file2, 1024, Replication, seed);
     DFSTestUtil.CreateFile(hdfs, file3, 1024, Replication, seed);
     DFSTestUtil.CreateFile(hdfs, file5, 1024, Replication, seed);
     hdfs.Mkdirs(sub2);
 }
예제 #20
0
 /// <summary>Create files and add them in the fileList.</summary>
 /// <remarks>
 /// Create files and add them in the fileList. Initially the last element
 /// in the fileList is set to null (where we start file creation).
 /// </remarks>
 /// <exception cref="System.Exception"/>
 internal virtual void InitFileList(FileSystem fs, string namePrefix, long fileLen
                                    , short replication, long seed, int numFiles)
 {
     fileList = new AList <Path>(numFiles);
     for (int i = 0; i < numFiles; i++)
     {
         Path file = new Path(nodePath, namePrefix + "-f" + i);
         fileList.AddItem(file);
         if (i < numFiles - 1)
         {
             DFSTestUtil.CreateFile(fs, file, fileLen, replication, seed);
         }
     }
     nullFileIndex = numFiles - 1;
 }
        /// <summary>
        /// Run file operations to create edits for all op codes
        /// to be tested.
        /// </summary>
        /// <remarks>
        /// Run file operations to create edits for all op codes
        /// to be tested.
        /// the following op codes are deprecated and therefore not tested:
        /// OP_DATANODE_ADD    ( 5)
        /// OP_DATANODE_REMOVE ( 6)
        /// OP_SET_NS_QUOTA    (11)
        /// OP_CLEAR_NS_QUOTA  (12)
        /// </remarks>
        /// <exception cref="System.IO.IOException"/>
        private CheckpointSignature RunOperations()
        {
            Log.Info("Creating edits by performing fs operations");
            // no check, if it's not it throws an exception which is what we want
            DistributedFileSystem dfs = cluster.GetFileSystem();

            DFSTestUtil.RunOperations(cluster, dfs, cluster.GetConfiguration(0), dfs.GetDefaultBlockSize
                                          (), 0);
            // OP_ROLLING_UPGRADE_START
            cluster.GetNamesystem().GetEditLog().LogStartRollingUpgrade(Time.Now());
            // OP_ROLLING_UPGRADE_FINALIZE
            cluster.GetNamesystem().GetEditLog().LogFinalizeRollingUpgrade(Time.Now());
            // Force a roll so we get an OP_END_LOG_SEGMENT txn
            return(cluster.GetNameNodeRpc().RollEditLog());
        }
예제 #22
0
 public void Run()
 {
     try
     {
         byte[] contents = DFSTestUtil.ReadFileBuffer(dfs, new Path(TestFile));
         NUnit.Framework.Assert.IsFalse(creationIsBlocked.Get());
         byte[] expected = DFSTestUtil.CalculateFileContentsFromSeed(Seed, TestFileLen);
         NUnit.Framework.Assert.IsTrue(Arrays.Equals(contents, expected));
     }
     catch (Exception e)
     {
         TestBlockReaderFactory.Log.Error("readerRunnable error", e);
         testFailed.Set(true);
     }
 }
예제 #23
0
        public virtual void TestDeserializeHAToken()
        {
            Configuration conf = DFSTestUtil.NewHAConfiguration(LogicalName);

            Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token = new Org.Apache.Hadoop.Security.Token.Token
                                                                                       <DelegationTokenIdentifier>();
            QueryStringDecoder decoder = new QueryStringDecoder(WebHdfsHandler.WebhdfsPrefix
                                                                + "/?" + NamenodeAddressParam.Name + "=" + LogicalName + "&" + DelegationParam.Name
                                                                + "=" + token.EncodeToUrlString());
            ParameterParser testParser = new ParameterParser(decoder, conf);

            Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> tok2 = testParser
                                                                                      .DelegationToken();
            NUnit.Framework.Assert.IsTrue(HAUtil.IsTokenForLogicalUri(tok2));
        }
예제 #24
0
 /// <summary>Tests DataTransferProtocol with the given client configuration.</summary>
 /// <param name="conf">client configuration</param>
 /// <exception cref="System.IO.IOException">if there is an I/O error</exception>
 private void DoTest(HdfsConfiguration conf)
 {
     fs = FileSystem.Get(cluster.GetURI(), conf);
     FileSystemTestHelper.CreateFile(fs, Path, NumBlocks, BlockSize);
     Assert.AssertArrayEquals(FileSystemTestHelper.GetFileData(NumBlocks, BlockSize),
                              Sharpen.Runtime.GetBytesForString(DFSTestUtil.ReadFile(fs, Path), "UTF-8"));
     BlockLocation[] blockLocations = fs.GetFileBlockLocations(Path, 0, long.MaxValue);
     NUnit.Framework.Assert.IsNotNull(blockLocations);
     NUnit.Framework.Assert.AreEqual(NumBlocks, blockLocations.Length);
     foreach (BlockLocation blockLocation in blockLocations)
     {
         NUnit.Framework.Assert.IsNotNull(blockLocation.GetHosts());
         NUnit.Framework.Assert.AreEqual(3, blockLocation.GetHosts().Length);
     }
 }
예제 #25
0
        public virtual void TestUseDelHint()
        {
            DatanodeStorageInfo delHint = new DatanodeStorageInfo(DFSTestUtil.GetLocalDatanodeDescriptor
                                                                      (), new DatanodeStorage("id"));
            IList <DatanodeStorageInfo> moreThan1Racks = Arrays.AsList(delHint);
            IList <StorageType>         excessTypes    = new AList <StorageType>();

            excessTypes.AddItem(StorageType.Default);
            NUnit.Framework.Assert.IsTrue(BlockManager.UseDelHint(true, delHint, null, moreThan1Racks
                                                                  , excessTypes));
            excessTypes.Remove(0);
            excessTypes.AddItem(StorageType.Ssd);
            NUnit.Framework.Assert.IsFalse(BlockManager.UseDelHint(true, delHint, null, moreThan1Racks
                                                                   , excessTypes));
        }
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="Sharpen.TimeoutException"/>
        /// <exception cref="System.Exception"/>
        private void CreateInputs(FileSystem fs, Path inDir, string fileName)
        {
            // create a multi-block file on hdfs
            Path             path        = new Path(inDir, fileName);
            short            replication = 2;
            DataOutputStream @out        = fs.Create(path, true, 4096, replication, 512, null);

            for (int i = 0; i < 1000; ++i)
            {
                @out.WriteChars("Hello\n");
            }
            @out.Close();
            System.Console.Out.WriteLine("Wrote file");
            DFSTestUtil.WaitReplication(fs, path, replication);
        }
예제 #27
0
        /// <exception cref="System.IO.IOException"/>
        private static void RunTest(string testCaseName, bool createFiles, int numInitialStorages
                                    , int expectedStoragesAfterTest)
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).StoragesPerDatanode(numInitialStorages
                                                                                               ).Build();
                cluster.WaitActive();
                DataNode dn0 = cluster.GetDataNodes()[0];
                // Ensure NN knows about the storage.
                DatanodeID         dnId         = dn0.GetDatanodeId();
                DatanodeDescriptor dnDescriptor = cluster.GetNamesystem().GetBlockManager().GetDatanodeManager
                                                      ().GetDatanode(dnId);
                Assert.AssertThat(dnDescriptor.GetStorageInfos().Length, IS.Is(numInitialStorages
                                                                               ));
                string bpid = cluster.GetNamesystem().GetBlockPoolId();
                DatanodeRegistration dnReg = dn0.GetDNRegistrationForBP(bpid);
                DataNodeTestUtils.TriggerBlockReport(dn0);
                if (createFiles)
                {
                    Path path = new Path("/", testCaseName);
                    DFSTestUtil.CreateFile(cluster.GetFileSystem(), path, 1024, (short)1, unchecked ((
                                                                                                         int)(0x1BAD5EED)));
                    DataNodeTestUtils.TriggerBlockReport(dn0);
                }
                // Generate a fake StorageReport that is missing one storage.
                StorageReport[] reports       = dn0.GetFSDataset().GetStorageReports(bpid);
                StorageReport[] prunedReports = new StorageReport[numInitialStorages - 1];
                System.Array.Copy(reports, 0, prunedReports, 0, prunedReports.Length);
                // Stop the DataNode and send fake heartbeat with missing storage.
                cluster.StopDataNode(0);
                cluster.GetNameNodeRpc().SendHeartbeat(dnReg, prunedReports, 0L, 0L, 0, 0, 0, null
                                                       );
                // Check that the missing storage was pruned.
                Assert.AssertThat(dnDescriptor.GetStorageInfos().Length, IS.Is(expectedStoragesAfterTest
                                                                               ));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #28
0
        public virtual void TestAppendWhileInSafeMode()
        {
            Banner("Starting with NN0 active and NN1 standby, creating some blocks");
            // Make 4.5 blocks so that append() will re-open an existing block
            // instead of just adding a new one
            DFSTestUtil.CreateFile(fs, new Path("/test"), 4 * BlockSize + BlockSize / 2, (short
                                                                                          )3, 1L);
            // Roll edit log so that, when the SBN restarts, it will load
            // the namespace during startup.
            nn0.GetRpcServer().RollEditLog();
            Banner("Restarting standby");
            RestartStandby();
            // It will initially have all of the blocks necessary.
            AssertSafeMode(nn1, 5, 5, 3, 0);
            // Append to a block while SBN is in safe mode. This should
            // not affect safemode initially, since the DN message
            // will get queued.
            FSDataOutputStream stm = fs.Append(new Path("/test"));

            try
            {
                AssertSafeMode(nn1, 5, 5, 3, 0);
                // if we roll edits now, the SBN should see that it's under construction
                // and change its total count and safe count down by one, since UC
                // blocks are not counted by safe mode.
                HATestUtil.WaitForStandbyToCatchUp(nn0, nn1);
                AssertSafeMode(nn1, 4, 4, 3, 0);
            }
            finally
            {
                IOUtils.CloseStream(stm);
            }
            // Delete those blocks while the SBN is in safe mode.
            // This will not ACK the deletions to the SBN, so it won't
            // notice until we roll the edit log.
            Banner("Removing the blocks without rolling the edit log");
            fs.Delete(new Path("/test"), true);
            BlockManagerTestUtil.ComputeAllPendingWork(nn0.GetNamesystem().GetBlockManager());
            Banner("Triggering deletions on DNs and Deletion Reports");
            cluster.TriggerHeartbeats();
            HATestUtil.WaitForDNDeletions(cluster);
            cluster.TriggerDeletionReports();
            AssertSafeMode(nn1, 4, 4, 3, 0);
            // When we roll the edit log, the deletions will go through.
            Banner("Waiting for standby to catch up to active namespace");
            HATestUtil.WaitForStandbyToCatchUp(nn0, nn1);
            AssertSafeMode(nn1, 0, 0, 3, 0);
        }
예제 #29
0
        /// <exception cref="System.Exception"/>
        private void DoTestOneOfTwoRacksDecommissioned(int testIndex)
        {
            // Block originally on A1, A2, B1
            IList <DatanodeStorageInfo> origStorages = GetStorages(0, 1, 3);
            IList <DatanodeDescriptor>  origNodes    = GetNodes(origStorages);
            BlockInfoContiguous         blockInfo    = AddBlockOnNodes(testIndex, origNodes);
            // Decommission all of the nodes in rack A
            IList <DatanodeDescriptor> decomNodes = StartDecommission(0, 1, 2);

            DatanodeStorageInfo[] pipeline = ScheduleSingleReplication(blockInfo);
            NUnit.Framework.Assert.IsTrue("Source of replication should be one of the nodes the block "
                                          + "was on. Was: " + pipeline[0], origStorages.Contains(pipeline[0]));
            // Only up to two nodes can be picked per rack when there are two racks.
            NUnit.Framework.Assert.AreEqual("Should have two targets", 2, pipeline.Length);
            bool foundOneOnRackB = false;

            for (int i = 1; i < pipeline.Length; i++)
            {
                DatanodeDescriptor target = pipeline[i].GetDatanodeDescriptor();
                if (rackB.Contains(target))
                {
                    foundOneOnRackB = true;
                }
                NUnit.Framework.Assert.IsFalse(decomNodes.Contains(target));
                NUnit.Framework.Assert.IsFalse(origNodes.Contains(target));
            }
            NUnit.Framework.Assert.IsTrue("Should have at least one target on rack B. Pipeline: "
                                          + Joiner.On(",").Join(pipeline), foundOneOnRackB);
            // Mark the block as received on the target nodes in the pipeline
            FulfillPipeline(blockInfo, pipeline);
            // the block is still under-replicated. Add a new node. This should allow
            // the third off-rack replica.
            DatanodeDescriptor rackCNode = DFSTestUtil.GetDatanodeDescriptor("7.7.7.7", "/rackC"
                                                                             );

            rackCNode.UpdateStorage(new DatanodeStorage(DatanodeStorage.GenerateUuid()));
            AddNodes(ImmutableList.Of(rackCNode));
            try
            {
                DatanodeStorageInfo[] pipeline2 = ScheduleSingleReplication(blockInfo);
                NUnit.Framework.Assert.AreEqual(2, pipeline2.Length);
                NUnit.Framework.Assert.AreEqual(rackCNode, pipeline2[1].GetDatanodeDescriptor());
            }
            finally
            {
                RemoveNode(rackCNode);
            }
        }
예제 #30
0
        public virtual void TestDfsAdminDeleteBlockPool()
        {
            Configuration  conf    = new Configuration();
            MiniDFSCluster cluster = null;

            try
            {
                conf.Set(DFSConfigKeys.DfsNameservices, "namesServerId1,namesServerId2");
                cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleFederatedTopology
                                                                          (conf.Get(DFSConfigKeys.DfsNameservices))).NumDataNodes(1).Build();
                cluster.WaitActive();
                FileSystem fs1 = cluster.GetFileSystem(0);
                FileSystem fs2 = cluster.GetFileSystem(1);
                DFSTestUtil.CreateFile(fs1, new Path("/alpha"), 1024, (short)1, 54);
                DFSTestUtil.CreateFile(fs2, new Path("/beta"), 1024, (short)1, 54);
                DataNode      dn1            = cluster.GetDataNodes()[0];
                string        bpid1          = cluster.GetNamesystem(0).GetBlockPoolId();
                string        bpid2          = cluster.GetNamesystem(1).GetBlockPoolId();
                FilePath      dn1StorageDir1 = cluster.GetInstanceStorageDir(0, 0);
                FilePath      dn1StorageDir2 = cluster.GetInstanceStorageDir(0, 1);
                Configuration nn1Conf        = cluster.GetConfiguration(0);
                nn1Conf.Set(DFSConfigKeys.DfsNameservices, "namesServerId1");
                dn1.RefreshNamenodes(nn1Conf);
                NUnit.Framework.Assert.AreEqual(1, dn1.GetAllBpOs().Length);
                DFSAdmin admin      = new DFSAdmin(nn1Conf);
                string   dn1Address = dn1.GetDatanodeId().GetIpAddr() + ":" + dn1.GetIpcPort();
                string[] args       = new string[] { "-deleteBlockPool", dn1Address, bpid2 };
                int      ret        = admin.Run(args);
                NUnit.Framework.Assert.IsFalse(0 == ret);
                VerifyBlockPoolDirectories(true, dn1StorageDir1, bpid2);
                VerifyBlockPoolDirectories(true, dn1StorageDir2, bpid2);
                string[] forceArgs = new string[] { "-deleteBlockPool", dn1Address, bpid2, "force" };
                ret = admin.Run(forceArgs);
                NUnit.Framework.Assert.AreEqual(0, ret);
                VerifyBlockPoolDirectories(false, dn1StorageDir1, bpid2);
                VerifyBlockPoolDirectories(false, dn1StorageDir2, bpid2);
                //bpid1 remains good
                VerifyBlockPoolDirectories(true, dn1StorageDir1, bpid1);
                VerifyBlockPoolDirectories(true, dn1StorageDir2, bpid1);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }