Пример #1
0
            /// <exception cref="System.Exception"/>
            public object Run()
            {
                DistributedFileSystem fs = this._enclosing.cluster.GetFileSystem();

                try
                {
                    Path ezRawEncFile = new Path(new Path(reservedRaw, zone), @base);
                    DFSTestUtil.CreateFile(fs, ezRawEncFile, len, (short)1, unchecked ((int)(0xFEED)));
                    NUnit.Framework.Assert.Fail("access to /.reserved/raw is superuser-only operation"
                                                );
                }
                catch (AccessControlException e)
                {
                    GenericTestUtils.AssertExceptionContains("Superuser privilege is required", e);
                }
                return(null);
            }
Пример #2
0
            /// <exception cref="System.Exception"/>
            public object Run()
            {
                DistributedFileSystem fs = this._enclosing.cluster.GetFileSystem();
                Path d1 = new Path(reservedRaw, "dir1");

                try
                {
                    fs.Mkdirs(d1);
                    NUnit.Framework.Assert.Fail("access to /.reserved/raw is superuser-only operation"
                                                );
                }
                catch (AccessControlException e)
                {
                    GenericTestUtils.AssertExceptionContains("Superuser privilege is required", e);
                }
                return(null);
            }
Пример #3
0
        public virtual void TestMkdir()
        {
            Configuration         conf    = new HdfsConfiguration();
            MiniDFSCluster        cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            DistributedFileSystem dfs     = cluster.GetFileSystem();

            try
            {
                // Create a dir in root dir, should succeed
                NUnit.Framework.Assert.IsTrue(dfs.Mkdir(new Path("/mkdir-" + Time.Now()), FsPermission
                                                        .GetDefault()));
                // Create a dir when parent dir exists as a file, should fail
                IOException expectedException = null;
                string      filePath          = "/mkdir-file-" + Time.Now();
                DFSTestUtil.WriteFile(dfs, new Path(filePath), "hello world");
                try
                {
                    dfs.Mkdir(new Path(filePath + "/mkdir"), FsPermission.GetDefault());
                }
                catch (IOException e)
                {
                    expectedException = e;
                }
                NUnit.Framework.Assert.IsTrue("Create a directory when parent dir exists as file using"
                                              + " mkdir() should throw ParentNotDirectoryException ", expectedException != null &&
                                              expectedException is ParentNotDirectoryException);
                // Create a dir in a non-exist directory, should fail
                expectedException = null;
                try
                {
                    dfs.Mkdir(new Path("/non-exist/mkdir-" + Time.Now()), FsPermission.GetDefault());
                }
                catch (IOException e)
                {
                    expectedException = e;
                }
                NUnit.Framework.Assert.IsTrue("Create a directory in a non-exist parent dir using"
                                              + " mkdir() should throw FileNotFoundException ", expectedException != null &&
                                              expectedException is FileNotFoundException);
            }
            finally
            {
                dfs.Close();
                cluster.Shutdown();
            }
        }
Пример #4
0
        public virtual void TestAddDelegationTokensDFSApi()
        {
            UserGroupInformation  ugi   = UserGroupInformation.CreateRemoteUser("JobTracker");
            DistributedFileSystem dfs   = cluster.GetFileSystem();
            Credentials           creds = new Credentials();

            Org.Apache.Hadoop.Security.Token.Token <object>[] tokens = dfs.AddDelegationTokens
                                                                           ("JobTracker", creds);
            NUnit.Framework.Assert.AreEqual(1, tokens.Length);
            NUnit.Framework.Assert.AreEqual(1, creds.NumberOfTokens());
            CheckTokenIdentifier(ugi, tokens[0]);
            Org.Apache.Hadoop.Security.Token.Token <object>[] tokens2 = dfs.AddDelegationTokens
                                                                            ("JobTracker", creds);
            NUnit.Framework.Assert.AreEqual(0, tokens2.Length);
            // already have token
            NUnit.Framework.Assert.AreEqual(1, creds.NumberOfTokens());
        }
        /// <exception cref="System.Exception"/>
        private void OldRename(Path src, Path dst, bool renameSucceeds, bool exception)
        {
            DistributedFileSystem fs = cluster.GetFileSystem();

            try
            {
                NUnit.Framework.Assert.AreEqual(renameSucceeds, fs.Rename(src, dst));
            }
            catch (Exception)
            {
                NUnit.Framework.Assert.IsTrue(exception);
            }
            NUnit.Framework.Assert.AreEqual(renameSucceeds, !FileContextTestHelper.Exists(fc,
                                                                                          src));
            NUnit.Framework.Assert.AreEqual(renameSucceeds, FileContextTestHelper.Exists(fc,
                                                                                         dst));
        }
Пример #6
0
 /// <summary>
 /// Move hot files to warm and cold, warm files to hot and cold,
 /// and cold files to hot and warm.
 /// </summary>
 /// <exception cref="System.Exception"/>
 internal virtual void MoveAround(DistributedFileSystem dfs)
 {
     foreach (Path srcDir in map.Keys)
     {
         int i = 0;
         foreach (Path dstDir in map.Keys)
         {
             if (!srcDir.Equals(dstDir))
             {
                 Path src = new Path(srcDir, "file" + i++);
                 Path dst = new Path(dstDir, srcDir.GetName() + "2" + dstDir.GetName());
                 Log.Info("rename " + src + " to " + dst);
                 dfs.Rename(src, dst);
             }
         }
     }
 }
Пример #7
0
        /// <summary>
        /// Check the file content, reading as user
        /// <paramref name="readingUser"/>
        ///
        /// </summary>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        internal static void CheckFileContentDirect(URI uri, Path name, byte[] expected,
                                                    int readOffset, string readingUser, Configuration conf, bool legacyShortCircuitFails
                                                    )
        {
            // Ensure short circuit is enabled
            DistributedFileSystem fs            = GetFileSystem(readingUser, uri, conf);
            ClientContext         clientContext = ClientContext.GetFromConf(conf);

            if (legacyShortCircuitFails)
            {
                NUnit.Framework.Assert.IsTrue(clientContext.GetDisableLegacyBlockReaderLocal());
            }
            HdfsDataInputStream stm    = (HdfsDataInputStream)fs.Open(name);
            ByteBuffer          actual = ByteBuffer.AllocateDirect(expected.Length - readOffset);

            IOUtils.SkipFully(stm, readOffset);
            actual.Limit(3);
            //Read a small number of bytes first.
            int nread = stm.Read(actual);

            actual.Limit(nread + 2);
            nread += stm.Read(actual);
            // Read across chunk boundary
            actual.Limit(Math.Min(actual.Capacity(), nread + 517));
            nread += stm.Read(actual);
            CheckData(ArrayFromByteBuffer(actual), readOffset, expected, nread, "A few bytes"
                      );
            //Now read rest of it
            actual.Limit(actual.Capacity());
            while (actual.HasRemaining())
            {
                int nbytes = stm.Read(actual);
                if (nbytes < 0)
                {
                    throw new EOFException("End of file reached before reading fully.");
                }
                nread += nbytes;
            }
            CheckData(ArrayFromByteBuffer(actual), readOffset, expected, "Read 3");
            if (legacyShortCircuitFails)
            {
                NUnit.Framework.Assert.IsTrue(clientContext.GetDisableLegacyBlockReaderLocal());
            }
            stm.Close();
        }
Пример #8
0
        public virtual void TestLeaseRecoveryAndAppend()
        {
            Configuration conf = new Configuration();

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                Path file = new Path("/testLeaseRecovery");
                DistributedFileSystem dfs = cluster.GetFileSystem();
                // create a file with 0 bytes
                FSDataOutputStream @out = dfs.Create(file);
                @out.Hflush();
                @out.Hsync();
                // abort the original stream
                ((DFSOutputStream)@out.GetWrappedStream()).Abort();
                DistributedFileSystem newdfs = (DistributedFileSystem)FileSystem.NewInstance(cluster
                                                                                             .GetConfiguration(0));
                // Append to a file , whose lease is held by another client should fail
                try
                {
                    newdfs.Append(file);
                    NUnit.Framework.Assert.Fail("Append to a file(lease is held by another client) should fail"
                                                );
                }
                catch (RemoteException e)
                {
                    NUnit.Framework.Assert.IsTrue(e.Message.Contains("file lease is currently owned")
                                                  );
                }
                // Lease recovery on first try should be successful
                bool recoverLease = newdfs.RecoverLease(file);
                NUnit.Framework.Assert.IsTrue(recoverLease);
                FSDataOutputStream append = newdfs.Append(file);
                append.Write(Sharpen.Runtime.GetBytesForString("test"));
                append.Close();
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                    cluster = null;
                }
            }
        }
Пример #9
0
 /// <summary>Create files/directories/snapshots.</summary>
 /// <exception cref="System.Exception"/>
 internal virtual void Prepare(DistributedFileSystem dfs, short repl)
 {
     foreach (Path d in dirs)
     {
         dfs.Mkdirs(d);
     }
     foreach (Path file in files)
     {
         DFSTestUtil.CreateFile(dfs, file, fileSize, repl, 0L);
     }
     foreach (KeyValuePair <Path, IList <string> > entry in snapshotMap)
     {
         foreach (string snapshot in entry.Value)
         {
             SnapshotTestHelper.CreateSnapshot(dfs, entry.Key, snapshot);
         }
     }
 }
Пример #10
0
 public virtual void ShutdownCluster()
 {
     if (client != null)
     {
         client.Close();
         client = null;
     }
     if (fs != null)
     {
         fs.Close();
         fs = null;
     }
     if (cluster != null)
     {
         cluster.Shutdown();
         cluster = null;
     }
 }
Пример #11
0
        /// <summary>Get a BlockReader for the given block.</summary>
        /// <exception cref="System.IO.IOException"/>
        public static BlockReader GetBlockReader(MiniDFSCluster cluster, LocatedBlock testBlock
                                                 , int offset, int lenToRead)
        {
            IPEndPoint    targetAddr = null;
            ExtendedBlock block      = testBlock.GetBlock();

            DatanodeInfo[] nodes = testBlock.GetLocations();
            targetAddr = NetUtils.CreateSocketAddr(nodes[0].GetXferAddr());
            DistributedFileSystem fs = cluster.GetFileSystem();

            return(new BlockReaderFactory(fs.GetClient().GetConf()).SetInetSocketAddress(targetAddr
                                                                                         ).SetBlock(block).SetFileName(targetAddr.ToString() + ":" + block.GetBlockId()).
                   SetBlockToken(testBlock.GetBlockToken()).SetStartOffset(offset).SetLength(lenToRead
                                                                                             ).SetVerifyChecksum(true).SetClientName("BlockReaderTestUtil").SetDatanodeInfo(nodes
                                                                                                                                                                            [0]).SetClientCacheContext(ClientContext.GetFromConf(fs.GetConf())).SetCachingStrategy
                       (CachingStrategy.NewDefaultStrategy()).SetConfiguration(fs.GetConf()).SetAllowShortCircuitLocalReads
                       (true).SetRemotePeerFactory(new _RemotePeerFactory_196(fs)).Build());
        }
Пример #12
0
 /// <exception cref="System.Exception"/>
 private void WaitForAllReplicas(int expectedReplicaNum, Path file, DistributedFileSystem
                                 dfs)
 {
     for (int i = 0; i < 5; i++)
     {
         LocatedBlocks lbs = dfs.GetClient().GetLocatedBlocks(file.ToString(), 0, BlockSize
                                                              );
         LocatedBlock lb = lbs.Get(0);
         if (lb.GetLocations().Length >= expectedReplicaNum)
         {
             return;
         }
         else
         {
             Sharpen.Thread.Sleep(1000);
         }
     }
 }
Пример #13
0
        /// <summary>
        /// Is the HDFS healthy?
        /// HDFS is considered as healthy if it is up and not in safemode.
        /// </summary>
        /// <param name="uri">the HDFS URI.  Note that the URI path is ignored.</param>
        /// <returns>true if HDFS is healthy; false, otherwise.</returns>
        public static bool IsHealthy(URI uri)
        {
            //check scheme
            string scheme = uri.GetScheme();

            if (!Sharpen.Runtime.EqualsIgnoreCase(HdfsConstants.HdfsUriScheme, scheme))
            {
                throw new ArgumentException("The scheme is not " + HdfsConstants.HdfsUriScheme +
                                            ", uri=" + uri);
            }
            Configuration conf = new Configuration();

            //disable FileSystem cache
            conf.SetBoolean(string.Format("fs.%s.impl.disable.cache", scheme), true);
            //disable client retry for rpc connection and rpc calls
            conf.SetBoolean(DFSConfigKeys.DfsClientRetryPolicyEnabledKey, false);
            conf.SetInt(CommonConfigurationKeysPublic.IpcClientConnectMaxRetriesKey, 0);
            DistributedFileSystem fs = null;

            try
            {
                fs = (DistributedFileSystem)FileSystem.Get(uri, conf);
                bool safemode = fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeGet);
                if (Log.IsDebugEnabled())
                {
                    Log.Debug("Is namenode in safemode? " + safemode + "; uri=" + uri);
                }
                fs.Close();
                fs = null;
                return(!safemode);
            }
            catch (IOException e)
            {
                if (Log.IsDebugEnabled())
                {
                    Log.Debug("Got an exception for uri=" + uri, e);
                }
                return(false);
            }
            finally
            {
                IOUtils.Cleanup(Log, fs);
            }
        }
Пример #14
0
        /// <exception cref="System.Exception"/>
        public virtual void TestFinalize()
        {
            Configuration    conf    = new HdfsConfiguration();
            MiniQJMHACluster cluster = null;
            Path             foo     = new Path("/foo");
            Path             bar     = new Path("/bar");

            try
            {
                cluster = new MiniQJMHACluster.Builder(conf).Build();
                MiniDFSCluster dfsCluster = cluster.GetDfsCluster();
                dfsCluster.WaitActive();
                // let NN1 tail editlog every 1s
                dfsCluster.GetConfiguration(1).SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
                dfsCluster.RestartNameNode(1);
                dfsCluster.TransitionToActive(0);
                DistributedFileSystem dfs = dfsCluster.GetFileSystem(0);
                dfs.Mkdirs(foo);
                FSImage fsimage = dfsCluster.GetNamesystem(0).GetFSImage();
                // start rolling upgrade
                RollingUpgradeInfo info = dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare
                                                             );
                NUnit.Framework.Assert.IsTrue(info.IsStarted());
                dfs.Mkdirs(bar);
                QueryForPreparation(dfs);
                // The NN should have a copy of the fsimage in case of rollbacks.
                NUnit.Framework.Assert.IsTrue(fsimage.HasRollbackFSImage());
                info = dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Finalize);
                NUnit.Framework.Assert.IsTrue(info.IsFinalized());
                NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
                // Once finalized, there should be no more fsimage for rollbacks.
                NUnit.Framework.Assert.IsFalse(fsimage.HasRollbackFSImage());
                // Should have no problem in restart and replaying edits that include
                // the FINALIZE op.
                dfsCluster.RestartNameNode(0);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Пример #15
0
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        public virtual void TestCheckpoint()
        {
            Configuration conf = new Configuration();

            conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
            conf.SetInt(DFSConfigKeys.DfsNamenodeCheckpointPeriodKey, 1);
            MiniQJMHACluster cluster = null;
            Path             foo     = new Path("/foo");

            try
            {
                cluster = new MiniQJMHACluster.Builder(conf).Build();
                MiniDFSCluster dfsCluster = cluster.GetDfsCluster();
                dfsCluster.WaitActive();
                dfsCluster.TransitionToActive(0);
                DistributedFileSystem dfs = dfsCluster.GetFileSystem(0);
                // start rolling upgrade
                RollingUpgradeInfo info = dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare
                                                             );
                NUnit.Framework.Assert.IsTrue(info.IsStarted());
                QueryForPreparation(dfs);
                dfs.Mkdirs(foo);
                long txid = dfs.RollEdits();
                NUnit.Framework.Assert.IsTrue(txid > 0);
                int retries = 0;
                while (++retries < 5)
                {
                    NNStorage storage = dfsCluster.GetNamesystem(1).GetFSImage().GetStorage();
                    if (storage.GetFsImageName(txid - 1) != null)
                    {
                        return;
                    }
                    Sharpen.Thread.Sleep(1000);
                }
                NUnit.Framework.Assert.Fail("new checkpoint does not exist");
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Пример #16
0
        /// <summary>
        /// Check the blocks of dst file are cleaned after rename with overwrite
        /// Restart NN to check the rename successfully
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestRenameWithOverwrite()
        {
            short          replFactor = 2;
            long           blockSize  = 512;
            Configuration  conf       = new Configuration();
            MiniDFSCluster cluster    = new MiniDFSCluster.Builder(conf).NumDataNodes(replFactor
                                                                                      ).Build();
            DistributedFileSystem dfs = cluster.GetFileSystem();

            try
            {
                long   fileLen = blockSize * 3;
                string src     = "/foo/src";
                string dst     = "/foo/dst";
                Path   srcPath = new Path(src);
                Path   dstPath = new Path(dst);
                DFSTestUtil.CreateFile(dfs, srcPath, fileLen, replFactor, 1);
                DFSTestUtil.CreateFile(dfs, dstPath, fileLen, replFactor, 1);
                LocatedBlocks lbs = NameNodeAdapter.GetBlockLocations(cluster.GetNameNode(), dst,
                                                                      0, fileLen);
                BlockManager bm = NameNodeAdapter.GetNamesystem(cluster.GetNameNode()).GetBlockManager
                                      ();
                NUnit.Framework.Assert.IsTrue(bm.GetStoredBlock(lbs.GetLocatedBlocks()[0].GetBlock
                                                                    ().GetLocalBlock()) != null);
                dfs.Rename(srcPath, dstPath, Options.Rename.Overwrite);
                NUnit.Framework.Assert.IsTrue(bm.GetStoredBlock(lbs.GetLocatedBlocks()[0].GetBlock
                                                                    ().GetLocalBlock()) == null);
                // Restart NN and check the rename successfully
                cluster.RestartNameNodes();
                NUnit.Framework.Assert.IsFalse(dfs.Exists(srcPath));
                NUnit.Framework.Assert.IsTrue(dfs.Exists(dstPath));
            }
            finally
            {
                if (dfs != null)
                {
                    dfs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Пример #17
0
        public virtual void TestBlockMoveAcrossStorageInSameNode()
        {
            Configuration conf = new HdfsConfiguration();
            // create only one datanode in the cluster to verify movement within
            // datanode.
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).StorageTypes
                                         (new StorageType[] { StorageType.Disk, StorageType.Archive }).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                Path file = new Path("/testBlockMoveAcrossStorageInSameNode/file");
                DFSTestUtil.CreateFile(dfs, file, 1024, (short)1, 1024);
                LocatedBlocks locatedBlocks = dfs.GetClient().GetLocatedBlocks(file.ToString(), 0
                                                                               );
                // get the current
                LocatedBlock   locatedBlock = locatedBlocks.Get(0);
                ExtendedBlock  block        = locatedBlock.GetBlock();
                DatanodeInfo[] locations    = locatedBlock.GetLocations();
                NUnit.Framework.Assert.AreEqual(1, locations.Length);
                StorageType[] storageTypes = locatedBlock.GetStorageTypes();
                // current block should be written to DISK
                NUnit.Framework.Assert.IsTrue(storageTypes[0] == StorageType.Disk);
                DatanodeInfo source = locations[0];
                // move block to ARCHIVE by using same DataNodeInfo for source, proxy and
                // destination so that movement happens within datanode
                NUnit.Framework.Assert.IsTrue(ReplaceBlock(block, source, source, source, StorageType
                                                           .Archive));
                // wait till namenode notified
                Sharpen.Thread.Sleep(3000);
                locatedBlocks = dfs.GetClient().GetLocatedBlocks(file.ToString(), 0);
                // get the current
                locatedBlock = locatedBlocks.Get(0);
                NUnit.Framework.Assert.AreEqual("Storage should be only one", 1, locatedBlock.GetLocations
                                                    ().Length);
                NUnit.Framework.Assert.IsTrue("Block should be moved to ARCHIVE", locatedBlock.GetStorageTypes
                                                  ()[0] == StorageType.Archive);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestDowngrade()
        {
            Configuration    conf    = new HdfsConfiguration();
            MiniQJMHACluster cluster = null;
            Path             foo     = new Path("/foo");
            Path             bar     = new Path("/bar");

            try
            {
                cluster = new MiniQJMHACluster.Builder(conf).Build();
                MiniDFSCluster dfsCluster = cluster.GetDfsCluster();
                dfsCluster.WaitActive();
                // let NN1 tail editlog every 1s
                dfsCluster.GetConfiguration(1).SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
                dfsCluster.RestartNameNode(1);
                dfsCluster.TransitionToActive(0);
                DistributedFileSystem dfs = dfsCluster.GetFileSystem(0);
                dfs.Mkdirs(foo);
                // start rolling upgrade
                RollingUpgradeInfo info = dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare
                                                             );
                NUnit.Framework.Assert.IsTrue(info.IsStarted());
                dfs.Mkdirs(bar);
                TestRollingUpgrade.QueryForPreparation(dfs);
                dfs.Close();
                dfsCluster.RestartNameNode(0, true, "-rollingUpgrade", "downgrade");
                // Once downgraded, there should be no more fsimage for rollbacks.
                NUnit.Framework.Assert.IsFalse(dfsCluster.GetNamesystem(0).GetFSImage().HasRollbackFSImage
                                                   ());
                // shutdown NN1
                dfsCluster.ShutdownNameNode(1);
                dfsCluster.TransitionToActive(0);
                dfs = dfsCluster.GetFileSystem(0);
                NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
                NUnit.Framework.Assert.IsTrue(dfs.Exists(bar));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Пример #19
0
            /// <exception cref="System.IO.IOException"/>
            public virtual int Run(Configuration conf, IList <string> args)
            {
                string path = StringUtils.PopOptionWithArgument("-path", args);

                if (path == null)
                {
                    System.Console.Error.WriteLine("Please specify the path with -path.\nUsage:" + GetLongUsage
                                                       ());
                    return(1);
                }
                DistributedFileSystem dfs = AdminHelper.GetDFS(conf);

                try
                {
                    HdfsFileStatus status = dfs.GetClient().GetFileInfo(path);
                    if (status == null)
                    {
                        System.Console.Error.WriteLine("File/Directory does not exist: " + path);
                        return(2);
                    }
                    byte storagePolicyId = status.GetStoragePolicy();
                    if (storagePolicyId == BlockStoragePolicySuite.IdUnspecified)
                    {
                        System.Console.Out.WriteLine("The storage policy of " + path + " is unspecified");
                        return(0);
                    }
                    BlockStoragePolicy[] policies = dfs.GetStoragePolicies();
                    foreach (BlockStoragePolicy p in policies)
                    {
                        if (p.GetId() == storagePolicyId)
                        {
                            System.Console.Out.WriteLine("The storage policy of " + path + ":\n" + p);
                            return(0);
                        }
                    }
                }
                catch (Exception e)
                {
                    System.Console.Error.WriteLine(AdminHelper.PrettifyException(e));
                    return(2);
                }
                System.Console.Error.WriteLine("Cannot identify the storage policy for " + path);
                return(2);
            }
Пример #20
0
        public virtual void TestLeaseExpireEmptyFiles()
        {
            Sharpen.Thread.UncaughtExceptionHandler oldUEH = Sharpen.Thread.GetDefaultUncaughtExceptionHandler
                                                                 ();
            Sharpen.Thread.SetDefaultUncaughtExceptionHandler(new _UncaughtExceptionHandler_43
                                                                  (this));
            System.Console.Out.WriteLine("testLeaseExpireEmptyFiles start");
            long          leasePeriod = 1000;
            int           DatanodeNum = 3;
            Configuration conf        = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1000);
            conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            // create cluster
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(DatanodeNum
                                                                                   ).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                // create a new file.
                TestFileCreation.CreateFile(dfs, new Path("/foo"), DatanodeNum);
                TestFileCreation.CreateFile(dfs, new Path("/foo2"), DatanodeNum);
                TestFileCreation.CreateFile(dfs, new Path("/foo3"), DatanodeNum);
                // set the soft and hard limit to be 1 second so that the
                // namenode triggers lease recovery
                cluster.SetLeasePeriod(leasePeriod, leasePeriod);
                // wait for the lease to expire
                try
                {
                    Sharpen.Thread.Sleep(5 * leasePeriod);
                }
                catch (Exception)
                {
                }
                NUnit.Framework.Assert.IsFalse(isConcurrentModificationException);
            }
            finally
            {
                Sharpen.Thread.SetDefaultUncaughtExceptionHandler(oldUEH);
                cluster.Shutdown();
            }
        }
Пример #21
0
        public virtual void TestDelegationTokenWithDoAs()
        {
            DistributedFileSystem dfs   = cluster.GetFileSystem();
            Credentials           creds = new Credentials();

            Org.Apache.Hadoop.Security.Token.Token <object>[] tokens = dfs.AddDelegationTokens
                                                                           ("JobTracker", creds);
            NUnit.Framework.Assert.AreEqual(1, tokens.Length);
            Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token = (Org.Apache.Hadoop.Security.Token.Token
                                                                                        <DelegationTokenIdentifier>)tokens[0];
            UserGroupInformation longUgi = UserGroupInformation.CreateRemoteUser("JobTracker/[email protected]"
                                                                                 );
            UserGroupInformation shortUgi = UserGroupInformation.CreateRemoteUser("JobTracker"
                                                                                  );

            longUgi.DoAs(new _PrivilegedExceptionAction_212(this, token, longUgi));
            shortUgi.DoAs(new _PrivilegedExceptionAction_223(this, token));
            longUgi.DoAs(new _PrivilegedExceptionAction_230(this, token, longUgi));
        }
Пример #22
0
        /// <exception cref="System.IO.IOException"/>
        private static void StartRollingUpgrade(Path foo, Path bar, Path file, byte[] data
                                                , MiniDFSCluster cluster)
        {
            DistributedFileSystem dfs = cluster.GetFileSystem();

            //start rolling upgrade
            dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
            dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Prepare);
            dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
            dfs.Mkdirs(bar);
            NUnit.Framework.Assert.IsTrue(dfs.Exists(foo));
            NUnit.Framework.Assert.IsTrue(dfs.Exists(bar));
            //truncate a file
            int newLength = DFSUtil.GetRandom().Next(data.Length - 1) + 1;

            dfs.Truncate(file, newLength);
            TestFileTruncate.CheckBlockRecovery(file, dfs);
            AppendTestUtil.CheckFullFile(dfs, file, newLength, data);
        }
        public virtual void SetupCluster()
        {
            SecurityUtilTestHelper.SetTokenServiceUseIp(true);
            conf.SetBoolean(DFSConfigKeys.DfsNamenodeDelegationTokenAlwaysUseKey, true);
            conf.Set(CommonConfigurationKeysPublic.HadoopSecurityAuthToLocal, "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//"
                     + "DEFAULT");
            cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology
                                                                      ()).NumDataNodes(0).Build();
            cluster.WaitActive();
            string logicalName = HATestUtil.GetLogicalHostname(cluster);

            HATestUtil.SetFailoverConfigurations(cluster, conf, logicalName, 0);
            nn0 = cluster.GetNameNode(0);
            nn1 = cluster.GetNameNode(1);
            fs  = HATestUtil.ConfigureFailoverFs(cluster, conf);
            dfs = (DistributedFileSystem)fs;
            cluster.TransitionToActive(0);
            dtSecretManager = NameNodeAdapter.GetDtSecretManager(nn0.GetNamesystem());
        }
Пример #24
0
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        internal static void QueryForPreparation(DistributedFileSystem dfs)
        {
            RollingUpgradeInfo info;
            int retries = 0;

            while (++retries < 10)
            {
                info = dfs.RollingUpgrade(HdfsConstants.RollingUpgradeAction.Query);
                if (info.CreatedRollbackImages())
                {
                    break;
                }
                Sharpen.Thread.Sleep(1000);
            }
            if (retries >= 10)
            {
                NUnit.Framework.Assert.Fail("Query return false");
            }
        }
Пример #25
0
        public virtual void TestWebHdfsCreateSnapshot()
        {
            MiniDFSCluster cluster = null;
            Configuration  conf    = WebHdfsTestUtil.CreateConf();

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build();
                cluster.WaitActive();
                DistributedFileSystem dfs     = cluster.GetFileSystem();
                FileSystem            webHdfs = WebHdfsTestUtil.GetWebHdfsFileSystem(conf, WebHdfsFileSystem
                                                                                     .Scheme);
                Path foo = new Path("/foo");
                dfs.Mkdirs(foo);
                try
                {
                    webHdfs.CreateSnapshot(foo);
                    NUnit.Framework.Assert.Fail("Cannot create snapshot on a non-snapshottable directory"
                                                );
                }
                catch (Exception e)
                {
                    GenericTestUtils.AssertExceptionContains("Directory is not a snapshottable directory"
                                                             , e);
                }
                // allow snapshots on /foo
                dfs.AllowSnapshot(foo);
                // create snapshots on foo using WebHdfs
                webHdfs.CreateSnapshot(foo, "s1");
                // create snapshot without specifying name
                Path spath = webHdfs.CreateSnapshot(foo, null);
                NUnit.Framework.Assert.IsTrue(webHdfs.Exists(spath));
                Path s1path = SnapshotTestHelper.GetSnapshotRoot(foo, "s1");
                NUnit.Framework.Assert.IsTrue(webHdfs.Exists(s1path));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Пример #26
0
        /// <summary>Stress test for pipeline/lease recovery.</summary>
        /// <remarks>
        /// Stress test for pipeline/lease recovery. Starts a number of
        /// threads, each of which creates a file and has another client
        /// break the lease. While these threads run, failover proceeds
        /// back and forth between two namenodes.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestPipelineRecoveryStress()
        {
            HAStressTestHarness harness = new HAStressTestHarness();

            // Disable permissions so that another user can recover the lease.
            harness.conf.SetBoolean(DFSConfigKeys.DfsPermissionsEnabledKey, false);
            // This test triggers rapid NN failovers.  The client retry policy uses an
            // exponential backoff.  This can quickly lead to long sleep times and even
            // timeout the whole test.  Cap the sleep time at 1s to prevent this.
            harness.conf.SetInt(DFSConfigKeys.DfsClientFailoverSleeptimeMaxKey, 1000);
            MiniDFSCluster cluster = harness.StartCluster();

            try
            {
                cluster.WaitActive();
                cluster.TransitionToActive(0);
                FileSystem                        fs            = harness.GetFailoverFs();
                DistributedFileSystem             fsAsOtherUser = CreateFsAsOtherUser(cluster, harness.conf);
                MultithreadedTestUtil.TestContext testers       = new MultithreadedTestUtil.TestContext
                                                                      ();
                for (int i = 0; i < StressNumThreads; i++)
                {
                    Path p = new Path("/test-" + i);
                    testers.AddThread(new TestPipelinesFailover.PipelineTestThread(testers, fs, fsAsOtherUser
                                                                                   , p));
                }
                // Start a separate thread which will make sure that replication
                // happens quickly by triggering deletion reports and replication
                // work calculation frequently.
                harness.AddReplicationTriggerThread(500);
                harness.AddFailoverThread(5000);
                harness.StartThreads();
                testers.StartThreads();
                testers.WaitFor(StressRuntime);
                testers.Stop();
                harness.StopThreads();
            }
            finally
            {
                System.Console.Error.WriteLine("===========================\n\n\n\n");
                harness.Shutdown();
            }
        }
Пример #27
0
        public virtual void TestComputePacketChunkSize()
        {
            DistributedFileSystem fs  = cluster.GetFileSystem();
            FSDataOutputStream    os  = fs.Create(new Path("/test"));
            DFSOutputStream       dos = (DFSOutputStream)Whitebox.GetInternalState(os, "wrappedStream"
                                                                                   );
            int        packetSize       = 64 * 1024;
            int        bytesPerChecksum = 512;
            MethodInfo method           = Sharpen.Runtime.GetDeclaredMethod(dos.GetType(), "computePacketChunkSize"
                                                                            , typeof(int), typeof(int));

            method.Invoke(dos, packetSize, bytesPerChecksum);
            FieldInfo field = Sharpen.Runtime.GetDeclaredField(dos.GetType(), "packetSize");

            NUnit.Framework.Assert.IsTrue((int)field.GetValue(dos) + 33 < packetSize);
            // If PKT_MAX_HEADER_LEN is 257, actual packet size come to over 64KB
            // without a fix on HDFS-7308.
            NUnit.Framework.Assert.IsTrue((int)field.GetValue(dos) + 257 < packetSize);
        }
Пример #28
0
        public virtual void TestReceivePacketMetrics()
        {
            Configuration conf     = new HdfsConfiguration();
            int           interval = 1;

            conf.Set(DFSConfigKeys.DfsMetricsPercentilesIntervalsKey, string.Empty + interval
                     );
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem fs = cluster.GetFileSystem();
                Path testFile            = new Path("/testFlushNanosMetric.txt");
                FSDataOutputStream fout  = fs.Create(testFile);
                fout.Write(new byte[1]);
                fout.Hsync();
                fout.Close();
                IList <DataNode>     datanodes = cluster.GetDataNodes();
                DataNode             datanode  = datanodes[0];
                MetricsRecordBuilder dnMetrics = MetricsAsserts.GetMetrics(datanode.GetMetrics().
                                                                           Name());
                // Expect two flushes, 1 for the flush that occurs after writing,
                // 1 that occurs on closing the data and metadata files.
                MetricsAsserts.AssertCounter("FlushNanosNumOps", 2L, dnMetrics);
                // Expect two syncs, one from the hsync, one on close.
                MetricsAsserts.AssertCounter("FsyncNanosNumOps", 2L, dnMetrics);
                // Wait for at least 1 rollover
                Sharpen.Thread.Sleep((interval + 1) * 1000);
                // Check the receivePacket percentiles that should be non-zero
                string sec = interval + "s";
                MetricsAsserts.AssertQuantileGauges("FlushNanos" + sec, dnMetrics);
                MetricsAsserts.AssertQuantileGauges("FsyncNanos" + sec, dnMetrics);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        /// <summary>
        /// Test that when we have an uncache request, and the client refuses to release
        /// the replica for a long time, we will un-mlock it.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestRevocation()
        {
            Assume.AssumeTrue(NativeCodeLoader.IsNativeCodeLoaded() && !Path.Windows);
            BlockReaderTestUtil.EnableHdfsCachingTracing();
            BlockReaderTestUtil.EnableShortCircuitShmTracing();
            Configuration conf = GetDefaultConf();

            // Set a really short revocation timeout.
            conf.SetLong(DFSConfigKeys.DfsDatanodeCacheRevocationTimeoutMs, 250L);
            // Poll very often
            conf.SetLong(DFSConfigKeys.DfsDatanodeCacheRevocationPollingMs, 2L);
            MiniDFSCluster cluster = null;

            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            cluster.WaitActive();
            DistributedFileSystem dfs = cluster.GetFileSystem();
            // Create and cache a file.
            string TestFile = "/test_file2";

            DFSTestUtil.CreateFile(dfs, new Path(TestFile), BlockSize, (short)1, unchecked ((int
                                                                                             )(0xcafe)));
            dfs.AddCachePool(new CachePoolInfo("pool"));
            long cacheDirectiveId = dfs.AddCacheDirective(new CacheDirectiveInfo.Builder().SetPool
                                                              ("pool").SetPath(new Path(TestFile)).SetReplication((short)1).Build());
            FsDatasetSpi <object> fsd = cluster.GetDataNodes()[0].GetFSDataset();

            DFSTestUtil.VerifyExpectedCacheUsage(BlockSize, 1, fsd);
            // Mmap the file.
            FSDataInputStream @in = dfs.Open(new Path(TestFile));
            ByteBuffer        buf = @in.Read(null, BlockSize, EnumSet.NoneOf <ReadOption>());

            // Attempt to uncache file.  The file should get uncached.
            Log.Info("removing cache directive {}", cacheDirectiveId);
            dfs.RemoveCacheDirective(cacheDirectiveId);
            Log.Info("finished removing cache directive {}", cacheDirectiveId);
            Sharpen.Thread.Sleep(1000);
            DFSTestUtil.VerifyExpectedCacheUsage(0, 0, fsd);
            // Cleanup
            @in.ReleaseBuffer(buf);
            @in.Close();
            cluster.Shutdown();
        }
Пример #30
0
        public static void Setup()
        {
            string currentUser = Runtime.GetProperty("user.name");

            config.Set("fs.permissions.umask-mode", "u=rwx,g=,o=");
            config.Set(DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserGroupConfKey
                           (currentUser), "*");
            config.Set(DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserIpConfKey
                           (currentUser), "*");
            fsHelper = new FileSystemTestHelper();
            // Set up java key store
            string testRoot = fsHelper.GetTestRootDir();

            testRootDir = new FilePath(testRoot).GetAbsoluteFile();
            Path jksPath = new Path(testRootDir.ToString(), "test.jks");

            config.Set(DFSConfigKeys.DfsEncryptionKeyProviderUri, JavaKeyStoreProvider.SchemeName
                       + "://file" + jksPath.ToUri());
            ProxyUsers.RefreshSuperUserGroupsConfiguration(config);
            cluster = new MiniDFSCluster.Builder(config).NumDataNodes(1).Build();
            cluster.WaitActive();
            hdfs     = cluster.GetFileSystem();
            nn       = cluster.GetNameNode();
            dfsAdmin = new HdfsAdmin(cluster.GetURI(), config);
            // Use ephemeral ports in case tests are running in parallel
            config.SetInt("nfs3.mountd.port", 0);
            config.SetInt("nfs3.server.port", 0);
            // Start NFS with allowed.hosts set to "* rw"
            config.Set("dfs.nfs.exports.allowed.hosts", "* rw");
            nfs = new Org.Apache.Hadoop.Hdfs.Nfs.Nfs3.Nfs3(config);
            nfs.StartServiceInternal(false);
            nfsd = (RpcProgramNfs3)nfs.GetRpcProgram();
            hdfs.GetClient().SetKeyProvider(nn.GetNamesystem().GetProvider());
            DFSTestUtil.CreateKey(TestKey, cluster, config);
            // Mock SecurityHandler which returns system user.name
            securityHandler = Org.Mockito.Mockito.Mock <SecurityHandler>();
            Org.Mockito.Mockito.When(securityHandler.GetUser()).ThenReturn(currentUser);
            // Mock SecurityHandler which returns a dummy username "harry"
            securityHandlerUnpriviledged = Org.Mockito.Mockito.Mock <SecurityHandler>();
            Org.Mockito.Mockito.When(securityHandlerUnpriviledged.GetUser()).ThenReturn("harry"
                                                                                        );
        }