public virtual void BlockLengthHintIsPropagated()
        {
            string        MethodName = GenericTestUtils.GetMethodName();
            Path          path       = new Path("/" + MethodName + ".dat");
            Configuration conf       = new HdfsConfiguration();

            TestWriteBlockGetsBlockLengthHint.FsDatasetChecker.SetFactory(conf);
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockLength);
            conf.SetInt(DFSConfigKeys.DfsDatanodeScanPeriodHoursKey, -1);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            try
            {
                cluster.WaitActive();
                // FsDatasetChecker#createRbw asserts during block creation if the test
                // fails.
                DFSTestUtil.CreateFile(cluster.GetFileSystem(), path, 4096, ExpectedBlockLength,
                                       ExpectedBlockLength, (short)1, unchecked ((int)(0x1BAD5EED)));
            }
            finally
            {
                // Buffer size.
                cluster.Shutdown();
            }
        }
Esempio n. 2
0
        /// <exception cref="System.Exception"/>
        private void DoTestFSOutputSummer(string checksumType)
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
            conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, BytesPerChecksum);
            conf.Set(DFSConfigKeys.DfsChecksumTypeKey, checksumType);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumOfDatanodes
                                                                                   ).Build();

            fileSys = cluster.GetFileSystem();
            try
            {
                Path   file = new Path("try.dat");
                Random rand = new Random(seed);
                rand.NextBytes(expected);
                WriteFile1(file);
                WriteFile2(file);
                WriteFile3(file);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
        }
        /// <exception cref="System.IO.IOException"/>
        internal static void Setrep(int fromREP, int toREP, bool simulatedStorage)
        {
            Configuration conf = new HdfsConfiguration();

            if (simulatedStorage)
            {
                SimulatedFSDataset.SetFactory(conf);
            }
            conf.Set(DFSConfigKeys.DfsReplicationKey, string.Empty + fromREP);
            conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 1000L);
            conf.Set(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, Sharpen.Extensions.ToString
                         (2));
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(10).Build(
                );
            FileSystem fs = cluster.GetFileSystem();

            NUnit.Framework.Assert.IsTrue("Not a HDFS: " + fs.GetUri(), fs is DistributedFileSystem
                                          );
            try
            {
                Path root = TestDFSShell.Mkdir(fs, new Path("/test/setrep" + fromREP + "-" + toREP
                                                            ));
                Path f = TestDFSShell.WriteFile(fs, new Path(root, "foo"));
                {
                    // Verify setrep for changing replication
                    string[] args = new string[] { "-setrep", "-w", string.Empty + toREP, string.Empty
                                                   + f };
                    FsShell shell = new FsShell();
                    shell.SetConf(conf);
                    try
                    {
                        NUnit.Framework.Assert.AreEqual(0, shell.Run(args));
                    }
                    catch (Exception e)
                    {
                        NUnit.Framework.Assert.IsTrue("-setrep " + e, false);
                    }
                }
                //get fs again since the old one may be closed
                fs = cluster.GetFileSystem();
                FileStatus file = fs.GetFileStatus(f);
                long       len  = file.GetLen();
                foreach (BlockLocation locations in fs.GetFileBlockLocations(file, 0, len))
                {
                    NUnit.Framework.Assert.IsTrue(locations.GetHosts().Length == toREP);
                }
                TestDFSShell.Show("done setrep waiting: " + root);
            }
            finally
            {
                try
                {
                    fs.Close();
                }
                catch (Exception)
                {
                }
                cluster.Shutdown();
            }
        }
Esempio n. 4
0
        public static void SetupCluster()
        {
            if (DomainSocket.GetLoadingFailureReason() != null)
            {
                return;
            }
            sockDir = new TemporarySocketDirectory();
            HdfsConfiguration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, new FilePath(sockDir.GetDir(), "TestParallelShortCircuitReadUnCached._PORT.sock"
                                                                        ).GetAbsolutePath());
            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true);
            // Enabling data transfer encryption should have no effect when using
            // short-circuit local reads.  This is a regression test for HDFS-5353.
            conf.SetBoolean(DFSConfigKeys.DfsEncryptDataTransferKey, true);
            conf.SetBoolean(DFSConfigKeys.DfsBlockAccessTokenEnableKey, true);
            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, false);
            conf.SetBoolean(DFSConfigKeys.DfsClientDomainSocketDataTraffic, true);
            // We want to test reading from stale sockets.
            conf.SetInt(DFSConfigKeys.DfsDatanodeSocketReuseKeepaliveKey, 1);
            conf.SetLong(DFSConfigKeys.DfsClientSocketCacheExpiryMsecKey, 5 * 60 * 1000);
            conf.SetInt(DFSConfigKeys.DfsClientSocketCacheCapacityKey, 32);
            // Avoid using the FileInputStreamCache.
            conf.SetInt(DFSConfigKeys.DfsClientReadShortcircuitStreamsCacheSizeKey, 0);
            DomainSocket.DisableBindPathValidation();
            DFSInputStream.tcpReadsDisabledForTesting = true;
            SetupCluster(1, conf);
        }
Esempio n. 5
0
        public virtual void HSyncEndBlock_02()
        {
            Configuration conf = new HdfsConfiguration();
            int           customPerChecksumSize = 512;
            int           customBlockSize       = customPerChecksumSize * 3;

            // Modify defaul filesystem settings
            conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, customPerChecksumSize);
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, customBlockSize);
            DoTheJob(conf, fName, customBlockSize, (short)2, true, EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                              .EndBlock));
        }
Esempio n. 6
0
        public virtual void HFlush_03()
        {
            Configuration conf = new HdfsConfiguration();
            int           customPerChecksumSize = 400;
            int           customBlockSize       = customPerChecksumSize * 3;

            // Modify defaul filesystem settings
            conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, customPerChecksumSize);
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, customBlockSize);
            DoTheJob(conf, fName, customBlockSize, (short)2, false, EnumSet.NoneOf <HdfsDataOutputStream.SyncFlag
                                                                                    >());
        }
        public virtual void Pipeline_02_03()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1);
            // create cluster
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(4).Build();

            try
            {
                //change the lease limits.
                cluster.SetLeasePeriod(SoftLeaseLimit, HardLeaseLimit);
                //wait for the cluster
                cluster.WaitActive();
                FileSystem fs   = cluster.GetFileSystem();
                Path       p    = new Path(Dir, "file1");
                int        half = BlockSize / 2;
                {
                    //a. On Machine M1, Create file. Write half block of data.
                    //   Invoke DFSOutputStream.hflush() on the dfs file handle.
                    //   Do not close file yet.
                    FSDataOutputStream @out = fs.Create(p, true, fs.GetConf().GetInt(CommonConfigurationKeys
                                                                                     .IoFileBufferSizeKey, 4096), (short)3, BlockSize);
                    Write(@out, 0, half);
                    //hflush
                    ((DFSOutputStream)@out.GetWrappedStream()).Hflush();
                }
                //b. On another machine M2, open file and verify that the half-block
                //   of data can be read successfully.
                CheckFile(p, half, conf);
                AppendTestUtil.Log.Info("leasechecker.interruptAndJoin()");
                ((DistributedFileSystem)fs).dfs.GetLeaseRenewer().InterruptAndJoin();
                {
                    //c. On M1, append another half block of data.  Close file on M1.
                    //sleep to let the lease is expired.
                    Sharpen.Thread.Sleep(2 * SoftLeaseLimit);
                    UserGroupInformation current = UserGroupInformation.GetCurrentUser();
                    UserGroupInformation ugi     = UserGroupInformation.CreateUserForTesting(current.GetShortUserName
                                                                                                 () + "x", new string[] { "supergroup" });
                    DistributedFileSystem dfs  = ugi.DoAs(new _PrivilegedExceptionAction_102(conf));
                    FSDataOutputStream    @out = Append(dfs, p);
                    Write(@out, 0, half);
                    @out.Close();
                }
                //d. On M2, open file and read 1 block of data from it. Close file.
                CheckFile(p, 2 * half, conf);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Esempio n. 8
0
        public virtual void TestReadFromOneDN()
        {
            HdfsConfiguration configuration = new HdfsConfiguration();
            // One of the goals of this test is to verify that we don't open more
            // than one socket.  So use a different client context, so that we
            // get our own socket cache, rather than sharing with the other test
            // instances.  Also use a really long socket timeout so that nothing
            // gets closed before we get around to checking the cache size at the end.
            string contextName = "testReadFromOneDNContext";

            configuration.Set(DFSConfigKeys.DfsClientContext, contextName);
            configuration.SetLong(DFSConfigKeys.DfsClientSocketTimeoutKey, 100000000L);
            BlockReaderTestUtil util = new BlockReaderTestUtil(1, configuration);
            Path testFile            = new Path("/testConnCache.dat");

            byte[]    authenticData = util.WriteFile(testFile, FileSize / 1024);
            DFSClient client        = new DFSClient(new IPEndPoint("localhost", util.GetCluster().GetNameNodePort
                                                                       ()), util.GetConf());
            ClientContext  cacheContext = ClientContext.Get(contextName, client.GetConf());
            DFSInputStream @in          = client.Open(testFile.ToString());

            Log.Info("opened " + testFile.ToString());
            byte[] dataBuf = new byte[BlockSize];
            // Initial read
            Pread(@in, 0, dataBuf, 0, dataBuf.Length, authenticData);
            // Read again and verify that the socket is the same
            Pread(@in, FileSize - dataBuf.Length, dataBuf, 0, dataBuf.Length, authenticData);
            Pread(@in, 1024, dataBuf, 0, dataBuf.Length, authenticData);
            // No seek; just read
            Pread(@in, -1, dataBuf, 0, dataBuf.Length, authenticData);
            Pread(@in, 64, dataBuf, 0, dataBuf.Length / 2, authenticData);
            @in.Close();
            client.Close();
            NUnit.Framework.Assert.AreEqual(1, ClientContext.GetFromConf(configuration).GetPeerCache
                                                ().Size());
        }
Esempio n. 9
0
        public virtual void TestFSInputChecker()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
            conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, BytesPerSum);
            rand.NextBytes(expected);
            // test DFS
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build();
            FileSystem     fileSys = cluster.GetFileSystem();

            try
            {
                TestChecker(fileSys, true);
                TestChecker(fileSys, false);
                TestSeekAndRead(fileSys);
            }
            finally
            {
                fileSys.Close();
                cluster.Shutdown();
            }
            // test Local FS
            fileSys = FileSystem.GetLocal(conf);
            try
            {
                TestChecker(fileSys, true);
                TestChecker(fileSys, false);
                TestFileCorruption((LocalFileSystem)fileSys);
                TestSeekAndRead(fileSys);
            }
            finally
            {
                fileSys.Close();
            }
        }
Esempio n. 10
0
        public virtual void TestBlockSynchronization()
        {
            int           OrgFileSize = 3000;
            Configuration conf        = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(5).Build();
            cluster.WaitActive();
            //create a file
            DistributedFileSystem dfs = cluster.GetFileSystem();
            string filestr            = "/foo";
            Path   filepath           = new Path(filestr);

            DFSTestUtil.CreateFile(dfs, filepath, OrgFileSize, ReplicationNum, 0L);
            NUnit.Framework.Assert.IsTrue(dfs.Exists(filepath));
            DFSTestUtil.WaitReplication(dfs, filepath, ReplicationNum);
            //get block info for the last block
            LocatedBlock locatedblock = TestInterDatanodeProtocol.GetLastLocatedBlock(dfs.dfs
                                                                                      .GetNamenode(), filestr);

            DatanodeInfo[] datanodeinfos = locatedblock.GetLocations();
            NUnit.Framework.Assert.AreEqual(ReplicationNum, datanodeinfos.Length);
            //connect to data nodes
            DataNode[] datanodes = new DataNode[ReplicationNum];
            for (int i = 0; i < ReplicationNum; i++)
            {
                datanodes[i] = cluster.GetDataNode(datanodeinfos[i].GetIpcPort());
                NUnit.Framework.Assert.IsTrue(datanodes[i] != null);
            }
            //verify Block Info
            ExtendedBlock lastblock = locatedblock.GetBlock();

            DataNode.Log.Info("newblocks=" + lastblock);
            for (int i_1 = 0; i_1 < ReplicationNum; i_1++)
            {
                CheckMetaInfo(lastblock, datanodes[i_1]);
            }
            DataNode.Log.Info("dfs.dfs.clientName=" + dfs.dfs.clientName);
            cluster.GetNameNodeRpc().Append(filestr, dfs.dfs.clientName, new EnumSetWritable <
                                                CreateFlag>(EnumSet.Of(CreateFlag.Append)));
            // expire lease to trigger block recovery.
            WaitLeaseRecovery(cluster);
            Block[] updatedmetainfo = new Block[ReplicationNum];
            long    oldSize         = lastblock.GetNumBytes();

            lastblock = TestInterDatanodeProtocol.GetLastLocatedBlock(dfs.dfs.GetNamenode(),
                                                                      filestr).GetBlock();
            long currentGS = lastblock.GetGenerationStamp();

            for (int i_2 = 0; i_2 < ReplicationNum; i_2++)
            {
                updatedmetainfo[i_2] = DataNodeTestUtils.GetFSDataset(datanodes[i_2]).GetStoredBlock
                                           (lastblock.GetBlockPoolId(), lastblock.GetBlockId());
                NUnit.Framework.Assert.AreEqual(lastblock.GetBlockId(), updatedmetainfo[i_2].GetBlockId
                                                    ());
                NUnit.Framework.Assert.AreEqual(oldSize, updatedmetainfo[i_2].GetNumBytes());
                NUnit.Framework.Assert.AreEqual(currentGS, updatedmetainfo[i_2].GetGenerationStamp
                                                    ());
            }
            // verify that lease recovery does not occur when namenode is in safemode
            System.Console.Out.WriteLine("Testing that lease recovery cannot happen during safemode."
                                         );
            filestr  = "/foo.safemode";
            filepath = new Path(filestr);
            dfs.Create(filepath, (short)1);
            cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter,
                                                 false);
            NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(filestr));
            DFSTestUtil.WaitReplication(dfs, filepath, (short)1);
            WaitLeaseRecovery(cluster);
            // verify that we still cannot recover the lease
            LeaseManager lm = NameNodeAdapter.GetLeaseManager(cluster.GetNamesystem());

            NUnit.Framework.Assert.IsTrue("Found " + lm.CountLease() + " lease, expected 1",
                                          lm.CountLease() == 1);
            cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave,
                                                 false);
        }
        /// <summary>Tests all FsEditLogOps that are converted to inotify events.</summary>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="Sharpen.URISyntaxException"/>
        /// <exception cref="System.Exception"/>
        /// <exception cref="Org.Apache.Hadoop.Hdfs.Inotify.MissingEventsException"/>
        public virtual void TestBasic()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize);
            conf.SetBoolean(DFSConfigKeys.DfsNamenodeAclsEnabledKey, true);
            // so that we can get an atime change
            conf.SetLong(DFSConfigKeys.DfsNamenodeAccesstimePrecisionKey, 1);
            MiniQJMHACluster.Builder builder = new MiniQJMHACluster.Builder(conf);
            builder.GetDfsBuilder().NumDataNodes(2);
            MiniQJMHACluster cluster = builder.Build();

            try
            {
                cluster.GetDfsCluster().WaitActive();
                cluster.GetDfsCluster().TransitionToActive(0);
                DFSClient client = new DFSClient(cluster.GetDfsCluster().GetNameNode(0).GetNameNodeAddress
                                                     (), conf);
                FileSystem fs = cluster.GetDfsCluster().GetFileSystem(0);
                DFSTestUtil.CreateFile(fs, new Path("/file"), BlockSize, (short)1, 0L);
                DFSTestUtil.CreateFile(fs, new Path("/file3"), BlockSize, (short)1, 0L);
                DFSTestUtil.CreateFile(fs, new Path("/file5"), BlockSize, (short)1, 0L);
                DFSInotifyEventInputStream eis = client.GetInotifyEventStream();
                client.Rename("/file", "/file4", null);
                // RenameOp -> RenameEvent
                client.Rename("/file4", "/file2");
                // RenameOldOp -> RenameEvent
                // DeleteOp, AddOp -> UnlinkEvent, CreateEvent
                OutputStream os = client.Create("/file2", true, (short)2, BlockSize);
                os.Write(new byte[BlockSize]);
                os.Close();
                // CloseOp -> CloseEvent
                // AddOp -> AppendEvent
                os = client.Append("/file2", BlockSize, EnumSet.Of(CreateFlag.Append), null, null
                                   );
                os.Write(new byte[BlockSize]);
                os.Close();
                // CloseOp -> CloseEvent
                Sharpen.Thread.Sleep(10);
                // so that the atime will get updated on the next line
                client.Open("/file2").Read(new byte[1]);
                // TimesOp -> MetadataUpdateEvent
                // SetReplicationOp -> MetadataUpdateEvent
                client.SetReplication("/file2", (short)1);
                // ConcatDeleteOp -> AppendEvent, UnlinkEvent, CloseEvent
                client.Concat("/file2", new string[] { "/file3" });
                client.Delete("/file2", false);
                // DeleteOp -> UnlinkEvent
                client.Mkdirs("/dir", null, false);
                // MkdirOp -> CreateEvent
                // SetPermissionsOp -> MetadataUpdateEvent
                client.SetPermission("/dir", FsPermission.ValueOf("-rw-rw-rw-"));
                // SetOwnerOp -> MetadataUpdateEvent
                client.SetOwner("/dir", "username", "groupname");
                client.CreateSymlink("/dir", "/dir2", false);
                // SymlinkOp -> CreateEvent
                client.SetXAttr("/file5", "user.field", Sharpen.Runtime.GetBytesForString("value"
                                                                                          ), EnumSet.Of(XAttrSetFlag.Create));
                // SetXAttrOp -> MetadataUpdateEvent
                // RemoveXAttrOp -> MetadataUpdateEvent
                client.RemoveXAttr("/file5", "user.field");
                // SetAclOp -> MetadataUpdateEvent
                client.SetAcl("/file5", AclEntry.ParseAclSpec("user::rwx,user:foo:rw-,group::r--,other::---"
                                                              , true));
                client.RemoveAcl("/file5");
                // SetAclOp -> MetadataUpdateEvent
                client.Rename("/file5", "/dir");
                // RenameOldOp -> RenameEvent
                EventBatch batch = null;
                // RenameOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                long txid = batch.GetTxid();
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Rename);
                Event.RenameEvent re = (Event.RenameEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.AreEqual("/file4", re.GetDstPath());
                NUnit.Framework.Assert.AreEqual("/file", re.GetSrcPath());
                NUnit.Framework.Assert.IsTrue(re.GetTimestamp() > 0);
                long eventsBehind = eis.GetTxidsBehindEstimate();
                // RenameOldOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Rename);
                Event.RenameEvent re2 = (Event.RenameEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(re2.GetDstPath().Equals("/file2"));
                NUnit.Framework.Assert.IsTrue(re2.GetSrcPath().Equals("/file4"));
                NUnit.Framework.Assert.IsTrue(re.GetTimestamp() > 0);
                // AddOp with overwrite
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Create);
                Event.CreateEvent ce = (Event.CreateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(ce.GetiNodeType() == Event.CreateEvent.INodeType.File
                                              );
                NUnit.Framework.Assert.IsTrue(ce.GetPath().Equals("/file2"));
                NUnit.Framework.Assert.IsTrue(ce.GetCtime() > 0);
                NUnit.Framework.Assert.IsTrue(ce.GetReplication() > 0);
                NUnit.Framework.Assert.IsTrue(ce.GetSymlinkTarget() == null);
                NUnit.Framework.Assert.IsTrue(ce.GetOverwrite());
                NUnit.Framework.Assert.AreEqual(BlockSize, ce.GetDefaultBlockSize());
                // CloseOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Close);
                Event.CloseEvent ce2 = (Event.CloseEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(ce2.GetPath().Equals("/file2"));
                NUnit.Framework.Assert.IsTrue(ce2.GetFileSize() > 0);
                NUnit.Framework.Assert.IsTrue(ce2.GetTimestamp() > 0);
                // AppendOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Append);
                Event.AppendEvent append2 = (Event.AppendEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.AreEqual("/file2", append2.GetPath());
                NUnit.Framework.Assert.IsFalse(append2.ToNewBlock());
                // CloseOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Close);
                NUnit.Framework.Assert.IsTrue(((Event.CloseEvent)batch.GetEvents()[0]).GetPath().
                                              Equals("/file2"));
                // TimesOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Metadata);
                Event.MetadataUpdateEvent mue = (Event.MetadataUpdateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(mue.GetPath().Equals("/file2"));
                NUnit.Framework.Assert.IsTrue(mue.GetMetadataType() == Event.MetadataUpdateEvent.MetadataType
                                              .Times);
                // SetReplicationOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Metadata);
                Event.MetadataUpdateEvent mue2 = (Event.MetadataUpdateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(mue2.GetPath().Equals("/file2"));
                NUnit.Framework.Assert.IsTrue(mue2.GetMetadataType() == Event.MetadataUpdateEvent.MetadataType
                                              .Replication);
                NUnit.Framework.Assert.IsTrue(mue2.GetReplication() == 1);
                // ConcatDeleteOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(3, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Append);
                NUnit.Framework.Assert.IsTrue(((Event.AppendEvent)batch.GetEvents()[0]).GetPath()
                                              .Equals("/file2"));
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[1].GetEventType() == Event.EventType
                                              .Unlink);
                Event.UnlinkEvent ue2 = (Event.UnlinkEvent)batch.GetEvents()[1];
                NUnit.Framework.Assert.IsTrue(ue2.GetPath().Equals("/file3"));
                NUnit.Framework.Assert.IsTrue(ue2.GetTimestamp() > 0);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[2].GetEventType() == Event.EventType
                                              .Close);
                Event.CloseEvent ce3 = (Event.CloseEvent)batch.GetEvents()[2];
                NUnit.Framework.Assert.IsTrue(ce3.GetPath().Equals("/file2"));
                NUnit.Framework.Assert.IsTrue(ce3.GetTimestamp() > 0);
                // DeleteOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Unlink);
                Event.UnlinkEvent ue = (Event.UnlinkEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(ue.GetPath().Equals("/file2"));
                NUnit.Framework.Assert.IsTrue(ue.GetTimestamp() > 0);
                // MkdirOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Create);
                Event.CreateEvent ce4 = (Event.CreateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(ce4.GetiNodeType() == Event.CreateEvent.INodeType.Directory
                                              );
                NUnit.Framework.Assert.IsTrue(ce4.GetPath().Equals("/dir"));
                NUnit.Framework.Assert.IsTrue(ce4.GetCtime() > 0);
                NUnit.Framework.Assert.IsTrue(ce4.GetReplication() == 0);
                NUnit.Framework.Assert.IsTrue(ce4.GetSymlinkTarget() == null);
                // SetPermissionsOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Metadata);
                Event.MetadataUpdateEvent mue3 = (Event.MetadataUpdateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(mue3.GetPath().Equals("/dir"));
                NUnit.Framework.Assert.IsTrue(mue3.GetMetadataType() == Event.MetadataUpdateEvent.MetadataType
                                              .Perms);
                NUnit.Framework.Assert.IsTrue(mue3.GetPerms().ToString().Contains("rw-rw-rw-"));
                // SetOwnerOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Metadata);
                Event.MetadataUpdateEvent mue4 = (Event.MetadataUpdateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(mue4.GetPath().Equals("/dir"));
                NUnit.Framework.Assert.IsTrue(mue4.GetMetadataType() == Event.MetadataUpdateEvent.MetadataType
                                              .Owner);
                NUnit.Framework.Assert.IsTrue(mue4.GetOwnerName().Equals("username"));
                NUnit.Framework.Assert.IsTrue(mue4.GetGroupName().Equals("groupname"));
                // SymlinkOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Create);
                Event.CreateEvent ce5 = (Event.CreateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(ce5.GetiNodeType() == Event.CreateEvent.INodeType.Symlink
                                              );
                NUnit.Framework.Assert.IsTrue(ce5.GetPath().Equals("/dir2"));
                NUnit.Framework.Assert.IsTrue(ce5.GetCtime() > 0);
                NUnit.Framework.Assert.IsTrue(ce5.GetReplication() == 0);
                NUnit.Framework.Assert.IsTrue(ce5.GetSymlinkTarget().Equals("/dir"));
                // SetXAttrOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Metadata);
                Event.MetadataUpdateEvent mue5 = (Event.MetadataUpdateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(mue5.GetPath().Equals("/file5"));
                NUnit.Framework.Assert.IsTrue(mue5.GetMetadataType() == Event.MetadataUpdateEvent.MetadataType
                                              .Xattrs);
                NUnit.Framework.Assert.IsTrue(mue5.GetxAttrs().Count == 1);
                NUnit.Framework.Assert.IsTrue(mue5.GetxAttrs()[0].GetName().Contains("field"));
                NUnit.Framework.Assert.IsTrue(!mue5.IsxAttrsRemoved());
                // RemoveXAttrOp
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Metadata);
                Event.MetadataUpdateEvent mue6 = (Event.MetadataUpdateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(mue6.GetPath().Equals("/file5"));
                NUnit.Framework.Assert.IsTrue(mue6.GetMetadataType() == Event.MetadataUpdateEvent.MetadataType
                                              .Xattrs);
                NUnit.Framework.Assert.IsTrue(mue6.GetxAttrs().Count == 1);
                NUnit.Framework.Assert.IsTrue(mue6.GetxAttrs()[0].GetName().Contains("field"));
                NUnit.Framework.Assert.IsTrue(mue6.IsxAttrsRemoved());
                // SetAclOp (1)
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Metadata);
                Event.MetadataUpdateEvent mue7 = (Event.MetadataUpdateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(mue7.GetPath().Equals("/file5"));
                NUnit.Framework.Assert.IsTrue(mue7.GetMetadataType() == Event.MetadataUpdateEvent.MetadataType
                                              .Acls);
                NUnit.Framework.Assert.IsTrue(mue7.GetAcls().Contains(AclEntry.ParseAclEntry("user::rwx"
                                                                                             , true)));
                // SetAclOp (2)
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Metadata);
                Event.MetadataUpdateEvent mue8 = (Event.MetadataUpdateEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(mue8.GetPath().Equals("/file5"));
                NUnit.Framework.Assert.IsTrue(mue8.GetMetadataType() == Event.MetadataUpdateEvent.MetadataType
                                              .Acls);
                NUnit.Framework.Assert.IsTrue(mue8.GetAcls() == null);
                // RenameOp (2)
                batch = WaitForNextEvents(eis);
                NUnit.Framework.Assert.AreEqual(1, batch.GetEvents().Length);
                txid = CheckTxid(batch, txid);
                NUnit.Framework.Assert.IsTrue(batch.GetEvents()[0].GetEventType() == Event.EventType
                                              .Rename);
                Event.RenameEvent re3 = (Event.RenameEvent)batch.GetEvents()[0];
                NUnit.Framework.Assert.IsTrue(re3.GetDstPath().Equals("/dir/file5"));
                NUnit.Framework.Assert.IsTrue(re3.GetSrcPath().Equals("/file5"));
                NUnit.Framework.Assert.IsTrue(re.GetTimestamp() > 0);
                // Returns null when there are no further events
                NUnit.Framework.Assert.IsTrue(eis.Poll() == null);
                // make sure the estimate hasn't changed since the above assertion
                // tells us that we are fully caught up to the current namesystem state
                // and we should not have been behind at all when eventsBehind was set
                // either, since there were few enough events that they should have all
                // been read to the client during the first poll() call
                NUnit.Framework.Assert.IsTrue(eis.GetTxidsBehindEstimate() == eventsBehind);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
 /// <exception cref="System.IO.IOException"/>
 public virtual void StartDataNodes(Configuration conf, int numDataNodes, StorageType
                                    [][] storageTypes, bool manageDfsDirs, HdfsServerConstants.StartupOption operation
                                    , string[] racks, string[] nodeGroups, string[] hosts, long[][] storageCapacities
                                    , long[] simulatedCapacities, bool setupHostsFile, bool checkDataNodeAddrConfig,
                                    bool checkDataNodeHostConfig)
 {
     lock (this)
     {
         System.Diagnostics.Debug.Assert(storageCapacities == null || simulatedCapacities
                                         == null);
         System.Diagnostics.Debug.Assert(storageTypes == null || storageTypes.Length == numDataNodes
                                         );
         System.Diagnostics.Debug.Assert(storageCapacities == null || storageCapacities.Length
                                         == numDataNodes);
         if (operation == HdfsServerConstants.StartupOption.Recover)
         {
             return;
         }
         if (checkDataNodeHostConfig)
         {
             conf.SetIfUnset(DFSConfigKeys.DfsDatanodeHostNameKey, "127.0.0.1");
         }
         else
         {
             conf.Set(DFSConfigKeys.DfsDatanodeHostNameKey, "127.0.0.1");
         }
         conf.Set(DFSConfigKeys.DfsDatanodeHostNameKey, "127.0.0.1");
         int curDatanodesNum = dataNodes.Count;
         // for mincluster's the default initialDelay for BRs is 0
         if (conf.Get(DFSConfigKeys.DfsBlockreportInitialDelayKey) == null)
         {
             conf.SetLong(DFSConfigKeys.DfsBlockreportInitialDelayKey, 0);
         }
         // If minicluster's name node is null assume that the conf has been
         // set with the right address:port of the name node.
         //
         if (racks != null && numDataNodes > racks.Length)
         {
             throw new ArgumentException("The length of racks [" + racks.Length + "] is less than the number of datanodes ["
                                         + numDataNodes + "].");
         }
         if (nodeGroups != null && numDataNodes > nodeGroups.Length)
         {
             throw new ArgumentException("The length of nodeGroups [" + nodeGroups.Length + "] is less than the number of datanodes ["
                                         + numDataNodes + "].");
         }
         if (hosts != null && numDataNodes > hosts.Length)
         {
             throw new ArgumentException("The length of hosts [" + hosts.Length + "] is less than the number of datanodes ["
                                         + numDataNodes + "].");
         }
         //Generate some hostnames if required
         if (racks != null && hosts == null)
         {
             hosts = new string[numDataNodes];
             for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++)
             {
                 hosts[i - curDatanodesNum] = "host" + i + ".foo.com";
             }
         }
         if (simulatedCapacities != null && numDataNodes > simulatedCapacities.Length)
         {
             throw new ArgumentException("The length of simulatedCapacities [" + simulatedCapacities
                                         .Length + "] is less than the number of datanodes [" + numDataNodes + "].");
         }
         string[] dnArgs = (operation == null || operation != HdfsServerConstants.StartupOption
                            .Rollback) ? null : new string[] { operation.GetName() };
         DataNode[] dns = new DataNode[numDataNodes];
         for (int i_1 = curDatanodesNum; i_1 < curDatanodesNum + numDataNodes; i_1++)
         {
             Configuration dnConf = new HdfsConfiguration(conf);
             // Set up datanode address
             SetupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
             if (manageDfsDirs)
             {
                 string dirs = MakeDataNodeDirs(i_1, storageTypes == null ? null : storageTypes[i_1
                                                ]);
                 dnConf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dirs);
                 conf.Set(DFSConfigKeys.DfsDatanodeDataDirKey, dirs);
             }
             if (simulatedCapacities != null)
             {
                 SimulatedFSDataset.SetFactory(dnConf);
                 dnConf.SetLong(SimulatedFSDataset.ConfigPropertyCapacity, simulatedCapacities[i_1
                                                                                               - curDatanodesNum]);
             }
             Log.Info("Starting DataNode " + i_1 + " with " + DFSConfigKeys.DfsDatanodeDataDirKey
                      + ": " + dnConf.Get(DFSConfigKeys.DfsDatanodeDataDirKey));
             if (hosts != null)
             {
                 dnConf.Set(DFSConfigKeys.DfsDatanodeHostNameKey, hosts[i_1 - curDatanodesNum]);
                 Log.Info("Starting DataNode " + i_1 + " with hostname set to: " + dnConf.Get(DFSConfigKeys
                                                                                              .DfsDatanodeHostNameKey));
             }
             if (racks != null)
             {
                 string name = hosts[i_1 - curDatanodesNum];
                 if (nodeGroups == null)
                 {
                     Log.Info("Adding node with hostname : " + name + " to rack " + racks[i_1 - curDatanodesNum
                              ]);
                     StaticMapping.AddNodeToRack(name, racks[i_1 - curDatanodesNum]);
                 }
                 else
                 {
                     Log.Info("Adding node with hostname : " + name + " to serverGroup " + nodeGroups[
                                  i_1 - curDatanodesNum] + " and rack " + racks[i_1 - curDatanodesNum]);
                     StaticMapping.AddNodeToRack(name, racks[i_1 - curDatanodesNum] + nodeGroups[i_1 -
                                                                                                 curDatanodesNum]);
                 }
             }
             Configuration newconf = new HdfsConfiguration(dnConf);
             // save config
             if (hosts != null)
             {
                 NetUtils.AddStaticResolution(hosts[i_1 - curDatanodesNum], "localhost");
             }
             SecureDataNodeStarter.SecureResources secureResources = null;
             if (UserGroupInformation.IsSecurityEnabled())
             {
                 try
                 {
                     secureResources = SecureDataNodeStarter.GetSecureResources(dnConf);
                 }
                 catch (Exception ex)
                 {
                     Sharpen.Runtime.PrintStackTrace(ex);
                 }
             }
             DataNode dn = DataNode.InstantiateDataNode(dnArgs, dnConf, secureResources);
             if (dn == null)
             {
                 throw new IOException("Cannot start DataNode in " + dnConf.Get(DFSConfigKeys.DfsDatanodeDataDirKey
                                                                                ));
             }
             //since the HDFS does things based on IP:port, we need to add the mapping
             //for IP:port to rackId
             string ipAddr = dn.GetXferAddress().Address.GetHostAddress();
             if (racks != null)
             {
                 int port = dn.GetXferAddress().Port;
                 if (nodeGroups == null)
                 {
                     Log.Info("Adding node with IP:port : " + ipAddr + ":" + port + " to rack " + racks
                              [i_1 - curDatanodesNum]);
                     StaticMapping.AddNodeToRack(ipAddr + ":" + port, racks[i_1 - curDatanodesNum]);
                 }
                 else
                 {
                     Log.Info("Adding node with IP:port : " + ipAddr + ":" + port + " to nodeGroup " +
                              nodeGroups[i_1 - curDatanodesNum] + " and rack " + racks[i_1 - curDatanodesNum]
                              );
                     StaticMapping.AddNodeToRack(ipAddr + ":" + port, racks[i_1 - curDatanodesNum] + nodeGroups
                                                 [i_1 - curDatanodesNum]);
                 }
             }
             dn.RunDatanodeDaemon();
             dataNodes.AddItem(new MiniDFSCluster.DataNodeProperties(this, dn, newconf, dnArgs
                                                                     , secureResources, dn.GetIpcPort()));
             dns[i_1 - curDatanodesNum] = dn;
         }
         curDatanodesNum   += numDataNodes;
         this.numDataNodes += numDataNodes;
         WaitActive();
         if (storageCapacities != null)
         {
             for (int i = curDatanodesNum; i_1 < curDatanodesNum + numDataNodes; ++i_1)
             {
                 IList <FsVolumeSpi> volumes = dns[i_1].GetFSDataset().GetVolumes();
                 System.Diagnostics.Debug.Assert(volumes.Count == storagesPerDatanode);
                 for (int j = 0; j < volumes.Count; ++j)
                 {
                     FsVolumeImpl volume = (FsVolumeImpl)volumes[j];
                     volume.SetCapacityForTesting(storageCapacities[i_1][j]);
                 }
             }
         }
     }
 }
Esempio n. 13
0
        /// <summary>
        /// Test that blocks should get replicated if we have corrupted blocks and
        /// having good replicas at least equal or greater to minreplication
        /// Simulate rbw blocks by creating dummy copies, then a DN restart to detect
        /// those corrupted blocks asap.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestReplicationWhenBlockCorruption()
        {
            MiniDFSCluster cluster = null;

            try
            {
                Configuration conf = new HdfsConfiguration();
                conf.SetLong(DFSConfigKeys.DfsNamenodeReplicationPendingTimeoutSecKey, 1);
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build();
                FileSystem         fs     = cluster.GetFileSystem();
                FSDataOutputStream create = fs.Create(new Path("/test"));
                fs.SetReplication(new Path("/test"), (short)1);
                create.Write(new byte[1024]);
                create.Close();
                IList <FilePath> nonParticipatedNodeDirs = new AList <FilePath>();
                FilePath         participatedNodeDirs    = null;
                for (int i = 0; i < cluster.GetDataNodes().Count; i++)
                {
                    FilePath storageDir = cluster.GetInstanceStorageDir(i, 0);
                    string   bpid       = cluster.GetNamesystem().GetBlockPoolId();
                    FilePath data_dir   = MiniDFSCluster.GetFinalizedDir(storageDir, bpid);
                    if (data_dir.ListFiles().Length == 0)
                    {
                        nonParticipatedNodeDirs.AddItem(data_dir);
                    }
                    else
                    {
                        participatedNodeDirs = data_dir;
                    }
                }
                string     blockFile = null;
                FilePath[] listFiles = participatedNodeDirs.ListFiles();
                foreach (FilePath file in listFiles)
                {
                    if (file.GetName().StartsWith(Block.BlockFilePrefix) && !file.GetName().EndsWith(
                            "meta"))
                    {
                        blockFile = file.GetName();
                        foreach (FilePath file1 in nonParticipatedNodeDirs)
                        {
                            file1.Mkdirs();
                            new FilePath(file1, blockFile).CreateNewFile();
                            new FilePath(file1, blockFile + "_1000.meta").CreateNewFile();
                        }
                        break;
                    }
                }
                fs.SetReplication(new Path("/test"), (short)3);
                cluster.RestartDataNodes();
                // Lets detect all DNs about dummy copied
                // blocks
                cluster.WaitActive();
                cluster.TriggerBlockReports();
                DFSTestUtil.WaitReplication(fs, new Path("/test"), (short)3);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Esempio n. 14
0
        /// <exception cref="System.IO.IOException"/>
        public virtual void RunBlockReaderLocalTest(TestBlockReaderLocal.BlockReaderLocalTest
                                                    test, bool checksum, long readahead)
        {
            Assume.AssumeThat(DomainSocket.GetLoadingFailureReason(), CoreMatchers.EqualTo(null
                                                                                           ));
            MiniDFSCluster    cluster = null;
            HdfsConfiguration conf    = new HdfsConfiguration();

            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, !checksum
                            );
            conf.SetLong(DFSConfigKeys.DfsBytesPerChecksumKey, TestBlockReaderLocal.BlockReaderLocalTest
                         .BytesPerChecksum);
            conf.Set(DFSConfigKeys.DfsChecksumTypeKey, "CRC32C");
            conf.SetLong(DFSConfigKeys.DfsClientCacheReadahead, readahead);
            test.SetConfiguration(conf);
            FileInputStream   dataIn           = null;
            FileInputStream   metaIn           = null;
            Path              TestPath         = new Path("/a");
            long              RandomSeed       = 4567L;
            BlockReaderLocal  blockReaderLocal = null;
            FSDataInputStream fsIn             = null;

            byte[]           original = new byte[TestBlockReaderLocal.BlockReaderLocalTest.TestLength];
            FileSystem       fs       = null;
            ShortCircuitShm  shm      = null;
            RandomAccessFile raf      = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, TestPath, TestBlockReaderLocal.BlockReaderLocalTest.TestLength
                                       , (short)1, RandomSeed);
                try
                {
                    DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
                }
                catch (Exception e)
                {
                    NUnit.Framework.Assert.Fail("unexpected InterruptedException during " + "waitReplication: "
                                                + e);
                }
                catch (TimeoutException e)
                {
                    NUnit.Framework.Assert.Fail("unexpected TimeoutException during " + "waitReplication: "
                                                + e);
                }
                fsIn = fs.Open(TestPath);
                IOUtils.ReadFully(fsIn, original, 0, TestBlockReaderLocal.BlockReaderLocalTest.TestLength
                                  );
                fsIn.Close();
                fsIn = null;
                ExtendedBlock     block             = DFSTestUtil.GetFirstBlock(fs, TestPath);
                FilePath          dataFile          = cluster.GetBlockFile(0, block);
                FilePath          metaFile          = cluster.GetBlockMetadataFile(0, block);
                ShortCircuitCache shortCircuitCache = ClientContext.GetFromConf(conf).GetShortCircuitCache
                                                          ();
                cluster.Shutdown();
                cluster = null;
                test.Setup(dataFile, checksum);
                FileInputStream[] streams = new FileInputStream[] { new FileInputStream(dataFile)
                                                                    , new FileInputStream(metaFile) };
                dataIn = streams[0];
                metaIn = streams[1];
                ExtendedBlockId key = new ExtendedBlockId(block.GetBlockId(), block.GetBlockPoolId
                                                              ());
                raf = new RandomAccessFile(new FilePath(sockDir.GetDir().GetAbsolutePath(), UUID.
                                                        RandomUUID().ToString()), "rw");
                raf.SetLength(8192);
                FileInputStream shmStream = new FileInputStream(raf.GetFD());
                shm = new ShortCircuitShm(ShortCircuitShm.ShmId.CreateRandom(), shmStream);
                ShortCircuitReplica replica = new ShortCircuitReplica(key, dataIn, metaIn, shortCircuitCache
                                                                      , Time.Now(), shm.AllocAndRegisterSlot(ExtendedBlockId.FromExtendedBlock(block))
                                                                      );
                blockReaderLocal = new BlockReaderLocal.Builder(new DFSClient.Conf(conf)).SetFilename
                                       (TestPath.GetName()).SetBlock(block).SetShortCircuitReplica(replica).SetCachingStrategy
                                       (new CachingStrategy(false, readahead)).SetVerifyChecksum(checksum).Build();
                dataIn = null;
                metaIn = null;
                test.DoTest(blockReaderLocal, original);
                // BlockReaderLocal should not alter the file position.
                NUnit.Framework.Assert.AreEqual(0, streams[0].GetChannel().Position());
                NUnit.Framework.Assert.AreEqual(0, streams[1].GetChannel().Position());
            }
            finally
            {
                if (fsIn != null)
                {
                    fsIn.Close();
                }
                if (fs != null)
                {
                    fs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
                if (dataIn != null)
                {
                    dataIn.Close();
                }
                if (metaIn != null)
                {
                    metaIn.Close();
                }
                if (blockReaderLocal != null)
                {
                    blockReaderLocal.Close();
                }
                if (shm != null)
                {
                    shm.Free();
                }
                if (raf != null)
                {
                    raf.Close();
                }
            }
        }
Esempio n. 15
0
        public virtual void TestGetBlocks()
        {
            Configuration Conf = new HdfsConfiguration();
            short         ReplicationFactor = (short)2;
            int           DefaultBlockSize  = 1024;
            Random        r = new Random();

            Conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockSize);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(Conf).NumDataNodes(ReplicationFactor
                                                                                   ).Build();

            try
            {
                cluster.WaitActive();
                // create a file with two blocks
                FileSystem         fs   = cluster.GetFileSystem();
                FSDataOutputStream @out = fs.Create(new Path("/tmp.txt"), ReplicationFactor);
                byte[]             data = new byte[1024];
                long fileLen            = 2 * DefaultBlockSize;
                long bytesToWrite       = fileLen;
                while (bytesToWrite > 0)
                {
                    r.NextBytes(data);
                    int bytesToWriteNext = (1024 < bytesToWrite) ? 1024 : (int)bytesToWrite;
                    @out.Write(data, 0, bytesToWriteNext);
                    bytesToWrite -= bytesToWriteNext;
                }
                @out.Close();
                // get blocks & data nodes
                IList <LocatedBlock> locatedBlocks;
                DatanodeInfo[]       dataNodes = null;
                bool notWritten;
                do
                {
                    DFSClient dfsclient = new DFSClient(NameNode.GetAddress(Conf), Conf);
                    locatedBlocks = dfsclient.GetNamenode().GetBlockLocations("/tmp.txt", 0, fileLen)
                                    .GetLocatedBlocks();
                    NUnit.Framework.Assert.AreEqual(2, locatedBlocks.Count);
                    notWritten = false;
                    for (int i = 0; i < 2; i++)
                    {
                        dataNodes = locatedBlocks[i].GetLocations();
                        if (dataNodes.Length != ReplicationFactor)
                        {
                            notWritten = true;
                            try
                            {
                                Sharpen.Thread.Sleep(10);
                            }
                            catch (Exception)
                            {
                            }
                            break;
                        }
                    }
                }while (notWritten);
                // get RPC client to namenode
                IPEndPoint       addr     = new IPEndPoint("localhost", cluster.GetNameNodePort());
                NamenodeProtocol namenode = NameNodeProxies.CreateProxy <NamenodeProtocol>(Conf, NameNode
                                                                                           .GetUri(addr)).GetProxy();
                // get blocks of size fileLen from dataNodes[0]
                BlocksWithLocations.BlockWithLocations[] locs;
                locs = namenode.GetBlocks(dataNodes[0], fileLen).GetBlocks();
                NUnit.Framework.Assert.AreEqual(locs.Length, 2);
                NUnit.Framework.Assert.AreEqual(locs[0].GetStorageIDs().Length, 2);
                NUnit.Framework.Assert.AreEqual(locs[1].GetStorageIDs().Length, 2);
                // get blocks of size BlockSize from dataNodes[0]
                locs = namenode.GetBlocks(dataNodes[0], DefaultBlockSize).GetBlocks();
                NUnit.Framework.Assert.AreEqual(locs.Length, 1);
                NUnit.Framework.Assert.AreEqual(locs[0].GetStorageIDs().Length, 2);
                // get blocks of size 1 from dataNodes[0]
                locs = namenode.GetBlocks(dataNodes[0], 1).GetBlocks();
                NUnit.Framework.Assert.AreEqual(locs.Length, 1);
                NUnit.Framework.Assert.AreEqual(locs[0].GetStorageIDs().Length, 2);
                // get blocks of size 0 from dataNodes[0]
                GetBlocksWithException(namenode, dataNodes[0], 0);
                // get blocks of size -1 from dataNodes[0]
                GetBlocksWithException(namenode, dataNodes[0], -1);
                // get blocks of size BlockSize from a non-existent datanode
                DatanodeInfo info = DFSTestUtil.GetDatanodeInfo("1.2.3.4");
                GetBlocksWithException(namenode, info, 2);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Esempio n. 16
0
        public virtual void TestReadSelectNonStaleDatanode()
        {
            HdfsConfiguration conf = new HdfsConfiguration();

            conf.SetBoolean(DFSConfigKeys.DfsNamenodeAvoidStaleDatanodeForReadKey, true);
            long staleInterval = 30 * 1000 * 60;

            conf.SetLong(DFSConfigKeys.DfsNamenodeStaleDatanodeIntervalKey, staleInterval);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes
                                                                                   ).Racks(racks).Build();

            cluster.WaitActive();
            IPEndPoint addr   = new IPEndPoint("localhost", cluster.GetNameNodePort());
            DFSClient  client = new DFSClient(addr, conf);
            IList <DatanodeDescriptor> nodeInfoList = cluster.GetNameNode().GetNamesystem().GetBlockManager
                                                          ().GetDatanodeManager().GetDatanodeListForReport(HdfsConstants.DatanodeReportType
                                                                                                           .Live);

            NUnit.Framework.Assert.AreEqual("Unexpected number of datanodes", numDatanodes, nodeInfoList
                                            .Count);
            FileSystem         fileSys = cluster.GetFileSystem();
            FSDataOutputStream stm     = null;

            try
            {
                // do the writing but do not close the FSDataOutputStream
                // in order to mimic the ongoing writing
                Path fileName = new Path("/file1");
                stm = fileSys.Create(fileName, true, fileSys.GetConf().GetInt(CommonConfigurationKeys
                                                                              .IoFileBufferSizeKey, 4096), (short)3, blockSize);
                stm.Write(new byte[(blockSize * 3) / 2]);
                // We do not close the stream so that
                // the writing seems to be still ongoing
                stm.Hflush();
                LocatedBlocks blocks = client.GetNamenode().GetBlockLocations(fileName.ToString()
                                                                              , 0, blockSize);
                DatanodeInfo[] nodes = blocks.Get(0).GetLocations();
                NUnit.Framework.Assert.AreEqual(nodes.Length, 3);
                DataNode           staleNode     = null;
                DatanodeDescriptor staleNodeInfo = null;
                // stop the heartbeat of the first node
                staleNode = this.StopDataNodeHeartbeat(cluster, nodes[0].GetHostName());
                NUnit.Framework.Assert.IsNotNull(staleNode);
                // set the first node as stale
                staleNodeInfo = cluster.GetNameNode().GetNamesystem().GetBlockManager().GetDatanodeManager
                                    ().GetDatanode(staleNode.GetDatanodeId());
                DFSTestUtil.ResetLastUpdatesWithOffset(staleNodeInfo, -(staleInterval + 1));
                LocatedBlocks blocksAfterStale = client.GetNamenode().GetBlockLocations(fileName.
                                                                                        ToString(), 0, blockSize);
                DatanodeInfo[] nodesAfterStale = blocksAfterStale.Get(0).GetLocations();
                NUnit.Framework.Assert.AreEqual(nodesAfterStale.Length, 3);
                NUnit.Framework.Assert.AreEqual(nodesAfterStale[2].GetHostName(), nodes[0].GetHostName
                                                    ());
                // restart the staleNode's heartbeat
                DataNodeTestUtils.SetHeartbeatsDisabledForTests(staleNode, false);
                // reset the first node as non-stale, so as to avoid two stale nodes
                DFSTestUtil.ResetLastUpdatesWithOffset(staleNodeInfo, 0);
                LocatedBlock lastBlock = client.GetLocatedBlocks(fileName.ToString(), 0, long.MaxValue
                                                                 ).GetLastLocatedBlock();
                nodes = lastBlock.GetLocations();
                NUnit.Framework.Assert.AreEqual(nodes.Length, 3);
                // stop the heartbeat of the first node for the last block
                staleNode = this.StopDataNodeHeartbeat(cluster, nodes[0].GetHostName());
                NUnit.Framework.Assert.IsNotNull(staleNode);
                // set the node as stale
                DatanodeDescriptor dnDesc = cluster.GetNameNode().GetNamesystem().GetBlockManager
                                                ().GetDatanodeManager().GetDatanode(staleNode.GetDatanodeId());
                DFSTestUtil.ResetLastUpdatesWithOffset(dnDesc, -(staleInterval + 1));
                LocatedBlock lastBlockAfterStale = client.GetLocatedBlocks(fileName.ToString(), 0
                                                                           , long.MaxValue).GetLastLocatedBlock();
                nodesAfterStale = lastBlockAfterStale.GetLocations();
                NUnit.Framework.Assert.AreEqual(nodesAfterStale.Length, 3);
                NUnit.Framework.Assert.AreEqual(nodesAfterStale[2].GetHostName(), nodes[0].GetHostName
                                                    ());
            }
            finally
            {
                if (stm != null)
                {
                    stm.Close();
                }
                client.Close();
                cluster.Shutdown();
            }
        }
Esempio n. 17
0
        public virtual void TestQuotaCommands()
        {
            Configuration conf = new HdfsConfiguration();
            // set a smaller block size so that we can test with smaller
            // Space quotas
            int DefaultBlockSize = 512;

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockSize);
            // Make it relinquish locks. When run serially, the result should
            // be identical.
            conf.SetInt(DFSConfigKeys.DfsContentSummaryLimitKey, 2);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            NUnit.Framework.Assert.IsTrue("Not a HDFS: " + fs.GetUri(), fs is DistributedFileSystem
                                          );
            DistributedFileSystem dfs = (DistributedFileSystem)fs;
            DFSAdmin admin            = new DFSAdmin(conf);

            try
            {
                int   fileLen     = 1024;
                short replication = 5;
                long  spaceQuota  = fileLen * replication * 15 / 8;
                // 1: create a directory /test and set its quota to be 3
                Path parent = new Path("/test");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(parent));
                string[] args = new string[] { "-setQuota", "3", parent.ToString() };
                RunCommand(admin, args, false);
                //try setting space quota with a 'binary prefix'
                RunCommand(admin, false, "-setSpaceQuota", "2t", parent.ToString());
                NUnit.Framework.Assert.AreEqual(2L << 40, dfs.GetContentSummary(parent).GetSpaceQuota
                                                    ());
                // set diskspace quota to 10000
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota), parent
                           .ToString());
                // 2: create directory /test/data0
                Path childDir0 = new Path(parent, "data0");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(childDir0));
                // 3: create a file /test/datafile0
                Path childFile0 = new Path(parent, "datafile0");
                DFSTestUtil.CreateFile(fs, childFile0, fileLen, replication, 0);
                // 4: count -q /test
                ContentSummary c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetFileCount() + c.GetDirectoryCount(), 3);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 3);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileLen * replication);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), spaceQuota);
                // 5: count -q /test/data0
                c = dfs.GetContentSummary(childDir0);
                NUnit.Framework.Assert.AreEqual(c.GetFileCount() + c.GetDirectoryCount(), 1);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                // check disk space consumed
                c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileLen * replication);
                // 6: create a directory /test/data1
                Path childDir1    = new Path(parent, "data1");
                bool hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Mkdirs(childDir1));
                }
                catch (QuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                OutputStream fout;
                // 7: create a file /test/datafile1
                Path childFile1 = new Path(parent, "datafile1");
                hasException = false;
                try
                {
                    fout = dfs.Create(childFile1);
                }
                catch (QuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // 8: clear quota /test
                RunCommand(admin, new string[] { "-clrQuota", parent.ToString() }, false);
                c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), spaceQuota);
                // 9: clear quota /test/data0
                RunCommand(admin, new string[] { "-clrQuota", childDir0.ToString() }, false);
                c = dfs.GetContentSummary(childDir0);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                // 10: create a file /test/datafile1
                fout = dfs.Create(childFile1, replication);
                // 10.s: but writing fileLen bytes should result in an quota exception
                try
                {
                    fout.Write(new byte[fileLen]);
                    fout.Close();
                    NUnit.Framework.Assert.Fail();
                }
                catch (QuotaExceededException)
                {
                    IOUtils.CloseStream(fout);
                }
                //delete the file
                dfs.Delete(childFile1, false);
                // 9.s: clear diskspace quota
                RunCommand(admin, false, "-clrSpaceQuota", parent.ToString());
                c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), -1);
                // now creating childFile1 should succeed
                DFSTestUtil.CreateFile(dfs, childFile1, fileLen, replication, 0);
                // 11: set the quota of /test to be 1
                // HADOOP-5872 - we can set quota even if it is immediately violated
                args = new string[] { "-setQuota", "1", parent.ToString() };
                RunCommand(admin, args, false);
                RunCommand(admin, false, "-setSpaceQuota", Sharpen.Extensions.ToString(fileLen),
                           args[2]);
                // for space quota
                // 12: set the quota of /test/data0 to be 1
                args = new string[] { "-setQuota", "1", childDir0.ToString() };
                RunCommand(admin, args, false);
                // 13: not able create a directory under data0
                hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Mkdirs(new Path(childDir0, "in")));
                }
                catch (QuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                c = dfs.GetContentSummary(childDir0);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount() + c.GetFileCount(), 1);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 1);
                // 14a: set quota on a non-existent directory
                Path nonExistentPath = new Path("/test1");
                NUnit.Framework.Assert.IsFalse(dfs.Exists(nonExistentPath));
                args = new string[] { "-setQuota", "1", nonExistentPath.ToString() };
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", "1g", nonExistentPath.ToString());
                // for space quota
                // 14b: set quota on a file
                NUnit.Framework.Assert.IsTrue(dfs.IsFile(childFile0));
                args[1] = childFile0.ToString();
                RunCommand(admin, args, true);
                // same for space quota
                RunCommand(admin, true, "-setSpaceQuota", "1t", args[1]);
                // 15a: clear quota on a file
                args[0] = "-clrQuota";
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-clrSpaceQuota", args[1]);
                // 15b: clear quota on a non-existent directory
                args[1] = nonExistentPath.ToString();
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-clrSpaceQuota", args[1]);
                // 16a: set the quota of /test to be 0
                args = new string[] { "-setQuota", "0", parent.ToString() };
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", "0", args[2]);
                // 16b: set the quota of /test to be -1
                args[1] = "-1";
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
                // 16c: set the quota of /test to be Long.MAX_VALUE+1
                args[1] = (long.MaxValue + 1L).ToString();
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
                // 16d: set the quota of /test to be a non integer
                args[1] = "33aa1.5";
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
                // 16e: set space quota with a value larger than Long.MAX_VALUE
                RunCommand(admin, true, "-setSpaceQuota", (long.MaxValue / 1024 / 1024 + 1024) +
                           "m", args[2]);
                // 17:  setQuota by a non-administrator
                string username          = "******";
                UserGroupInformation ugi = UserGroupInformation.CreateUserForTesting(username, new
                                                                                     string[] { "groupyy" });
                string[] args2 = args.MemberwiseClone();
                // need final ref for doAs block
                ugi.DoAs(new _PrivilegedExceptionAction_275(this, username, conf, args2, parent));
                // 18: clrQuota by a non-administrator
                // 19: clrQuota on the root directory ("/") should fail
                RunCommand(admin, true, "-clrQuota", "/");
                // 20: setQuota on the root directory ("/") should succeed
                RunCommand(admin, false, "-setQuota", "1000000", "/");
                RunCommand(admin, true, "-clrQuota", "/");
                RunCommand(admin, false, "-clrSpaceQuota", "/");
                RunCommand(admin, new string[] { "-clrQuota", parent.ToString() }, false);
                RunCommand(admin, false, "-clrSpaceQuota", parent.ToString());
                // 2: create directory /test/data2
                Path childDir2 = new Path(parent, "data2");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(childDir2));
                Path childFile2  = new Path(childDir2, "datafile2");
                Path childFile3  = new Path(childDir2, "datafile3");
                long spaceQuota2 = DefaultBlockSize * replication;
                long fileLen2    = DefaultBlockSize;
                // set space quota to a real low value
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           childDir2.ToString());
                // clear space quota
                RunCommand(admin, false, "-clrSpaceQuota", childDir2.ToString());
                // create a file that is greater than the size of space quota
                DFSTestUtil.CreateFile(fs, childFile2, fileLen2, replication, 0);
                // now set space quota again. This should succeed
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           childDir2.ToString());
                hasException = false;
                try
                {
                    DFSTestUtil.CreateFile(fs, childFile3, fileLen2, replication, 0);
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // now test the same for root
                Path childFile4 = new Path("/", "datafile2");
                Path childFile5 = new Path("/", "datafile3");
                RunCommand(admin, true, "-clrQuota", "/");
                RunCommand(admin, false, "-clrSpaceQuota", "/");
                // set space quota to a real low value
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           "/");
                RunCommand(admin, false, "-clrSpaceQuota", "/");
                DFSTestUtil.CreateFile(fs, childFile4, fileLen2, replication, 0);
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           "/");
                hasException = false;
                try
                {
                    DFSTestUtil.CreateFile(fs, childFile5, fileLen2, replication, 0);
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                NUnit.Framework.Assert.AreEqual(4, cluster.GetNamesystem().GetFSDirectory().GetYieldCount
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }