Exemple #1
0
        public virtual void TestDeletionWithZeroSizeBlock()
        {
            Path foo = new Path("/foo");
            Path bar = new Path(foo, "bar");

            DFSTestUtil.CreateFile(hdfs, bar, Blocksize, Replication, 0L);
            SnapshotTestHelper.CreateSnapshot(hdfs, foo, "s0");
            hdfs.Append(bar);
            INodeFile barNode = fsdir.GetINode4Write(bar.ToString()).AsFile();

            BlockInfoContiguous[] blks = barNode.GetBlocks();
            NUnit.Framework.Assert.AreEqual(1, blks.Length);
            NUnit.Framework.Assert.AreEqual(Blocksize, blks[0].GetNumBytes());
            ExtendedBlock previous = new ExtendedBlock(fsn.GetBlockPoolId(), blks[0]);

            cluster.GetNameNodeRpc().AddBlock(bar.ToString(), hdfs.GetClient().GetClientName(
                                                  ), previous, null, barNode.GetId(), null);
            SnapshotTestHelper.CreateSnapshot(hdfs, foo, "s1");
            barNode = fsdir.GetINode4Write(bar.ToString()).AsFile();
            blks    = barNode.GetBlocks();
            NUnit.Framework.Assert.AreEqual(2, blks.Length);
            NUnit.Framework.Assert.AreEqual(Blocksize, blks[0].GetNumBytes());
            NUnit.Framework.Assert.AreEqual(0, blks[1].GetNumBytes());
            hdfs.Delete(bar, true);
            Path sbar = SnapshotTestHelper.GetSnapshotPath(foo, "s1", bar.GetName());

            barNode = fsdir.GetINode(sbar.ToString()).AsFile();
            blks    = barNode.GetBlocks();
            NUnit.Framework.Assert.AreEqual(1, blks.Length);
            NUnit.Framework.Assert.AreEqual(Blocksize, blks[0].GetNumBytes());
        }
Exemple #2
0
        /// <exception cref="System.Exception"/>
        public virtual void TestTwoReplicaSameStorageTypeShouldNotSelect()
        {
            // HDFS-8147
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).StorageTypes
                                         (new StorageType[][] { new StorageType[] { StorageType.Disk, StorageType.Archive }, new StorageType[] { StorageType.Disk, StorageType.Disk }, new StorageType[]
                                                                { StorageType.Disk, StorageType.Archive } }).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                string file = "/testForTwoReplicaSameStorageTypeShouldNotSelect";
                // write to DISK
                FSDataOutputStream @out = dfs.Create(new Path(file), (short)2);
                @out.WriteChars("testForTwoReplicaSameStorageTypeShouldNotSelect");
                @out.Close();
                // verify before movement
                LocatedBlock  lb           = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                StorageType[] storageTypes = lb.GetStorageTypes();
                foreach (StorageType storageType in storageTypes)
                {
                    NUnit.Framework.Assert.IsTrue(StorageType.Disk == storageType);
                }
                // move to ARCHIVE
                dfs.SetStoragePolicy(new Path(file), "COLD");
                int rc = ToolRunner.Run(conf, new Mover.Cli(), new string[] { "-p", file.ToString
                                                                                  () });
                NUnit.Framework.Assert.AreEqual("Movement to ARCHIVE should be successfull", 0, rc
                                                );
                // Wait till namenode notified
                Sharpen.Thread.Sleep(3000);
                lb           = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                storageTypes = lb.GetStorageTypes();
                int archiveCount = 0;
                foreach (StorageType storageType_1 in storageTypes)
                {
                    if (StorageType.Archive == storageType_1)
                    {
                        archiveCount++;
                    }
                }
                NUnit.Framework.Assert.AreEqual(archiveCount, 2);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemple #3
0
        public virtual void TestHedgedReadLoopTooManyTimes()
        {
            Configuration conf = new Configuration();
            int           numHedgedReadPoolThreads = 5;
            int           hedgedReadTimeoutMillis  = 50;

            conf.SetInt(DFSConfigKeys.DfsDfsclientHedgedReadThreadpoolSize, numHedgedReadPoolThreads
                        );
            conf.SetLong(DFSConfigKeys.DfsDfsclientHedgedReadThresholdMillis, hedgedReadTimeoutMillis
                         );
            conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 0);
            // Set up the InjectionHandler
            DFSClientFaultInjector.instance = Org.Mockito.Mockito.Mock <DFSClientFaultInjector
                                                                        >();
            DFSClientFaultInjector injector = DFSClientFaultInjector.instance;
            int sleepMs = 100;

            Org.Mockito.Mockito.DoAnswer(new _Answer_296(hedgedReadTimeoutMillis, sleepMs)).When
                (injector).FetchFromDatanodeException();
            Org.Mockito.Mockito.DoAnswer(new _Answer_309(sleepMs)).When(injector).ReadFromDatanodeDelay
                ();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Format(
                true).Build();
            DistributedFileSystem fileSys   = cluster.GetFileSystem();
            DFSClient             dfsClient = fileSys.GetClient();
            FSDataOutputStream    output    = null;
            DFSInputStream        input     = null;
            string filename = "/hedgedReadMaxOut.dat";

            try
            {
                Path file = new Path(filename);
                output = fileSys.Create(file, (short)2);
                byte[] data = new byte[64 * 1024];
                output.Write(data);
                output.Flush();
                output.Write(data);
                output.Flush();
                output.Write(data);
                output.Flush();
                output.Close();
                byte[] buffer = new byte[64 * 1024];
                input = dfsClient.Open(filename);
                input.Read(0, buffer, 0, 1024);
                input.Close();
                NUnit.Framework.Assert.AreEqual(3, input.GetHedgedReadOpsLoopNumForTesting());
            }
            catch (BlockMissingException)
            {
                NUnit.Framework.Assert.IsTrue(false);
            }
            finally
            {
                Org.Mockito.Mockito.Reset(injector);
                IOUtils.Cleanup(null, input);
                IOUtils.Cleanup(null, output);
                fileSys.Close();
                cluster.Shutdown();
            }
        }
Exemple #4
0
        // Regression test for HDFS-8070
        /// <exception cref="System.Exception"/>
        public virtual void TestPreReceiptVerificationDfsClientCanDoScr()
        {
            BlockReaderTestUtil.EnableShortCircuitShmTracing();
            TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
            Configuration            conf    = CreateShortCircuitConf("testPreReceiptVerificationDfsClientCanDoScr"
                                                                      , sockDir);

            conf.SetLong(DFSConfigKeys.DfsClientReadShortcircuitStreamsCacheExpiryMsKey, 1000000000L
                         );
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.WaitActive();
            DistributedFileSystem fs = cluster.GetFileSystem();

            fs.GetClient().GetConf().brfFailureInjector = new TestShortCircuitCache.TestPreReceiptVerificationFailureInjector
                                                              ();
            Path TestPath1 = new Path("/test_file1");

            DFSTestUtil.CreateFile(fs, TestPath1, 4096, (short)1, unchecked ((int)(0xFADE2)));
            Path TestPath2 = new Path("/test_file2");

            DFSTestUtil.CreateFile(fs, TestPath2, 4096, (short)1, unchecked ((int)(0xFADE2)));
            DFSTestUtil.ReadFileBuffer(fs, TestPath1);
            DFSTestUtil.ReadFileBuffer(fs, TestPath2);
            ShortCircuitRegistry registry = cluster.GetDataNodes()[0].GetShortCircuitRegistry
                                                ();

            registry.Visit(new _Visitor_780());
            cluster.Shutdown();
            sockDir.Close();
        }
Exemple #5
0
        public virtual void TestMoverFailedRetry()
        {
            // HDFS-8147
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsMoverRetryMaxAttemptsKey, "2");
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).StorageTypes
                                         (new StorageType[][] { new StorageType[] { StorageType.Disk, StorageType.Archive }, new StorageType[] { StorageType.Disk, StorageType.Archive }, new StorageType
                                                                [] { StorageType.Disk, StorageType.Archive } }).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                string file = "/testMoverFailedRetry";
                // write to DISK
                FSDataOutputStream @out = dfs.Create(new Path(file), (short)2);
                @out.WriteChars("testMoverFailedRetry");
                @out.Close();
                // Delete block file so, block move will fail with FileNotFoundException
                LocatedBlock lb = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                cluster.CorruptBlockOnDataNodesByDeletingBlockFile(lb.GetBlock());
                // move to ARCHIVE
                dfs.SetStoragePolicy(new Path(file), "COLD");
                int rc = ToolRunner.Run(conf, new Mover.Cli(), new string[] { "-p", file.ToString
                                                                                  () });
                NUnit.Framework.Assert.AreEqual("Movement should fail after some retry", ExitStatus
                                                .IoException.GetExitCode(), rc);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemple #6
0
        public virtual void TestScheduleSameBlock()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(4).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                string file = "/testScheduleSameBlock/file";
                {
                    FSDataOutputStream @out = dfs.Create(new Path(file));
                    @out.WriteChars("testScheduleSameBlock");
                    @out.Close();
                }
                Org.Apache.Hadoop.Hdfs.Server.Mover.Mover mover = NewMover(conf);
                mover.Init();
                Mover.Processor         processor    = new Mover.Processor(this);
                LocatedBlock            lb           = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                IList <Mover.MLocation> locations    = Mover.MLocation.ToLocations(lb);
                Mover.MLocation         ml           = locations[0];
                Dispatcher.DBlock       db           = mover.NewDBlock(lb.GetBlock().GetLocalBlock(), locations);
                IList <StorageType>     storageTypes = new AList <StorageType>(Arrays.AsList(StorageType
                                                                                             .Default, StorageType.Default));
                NUnit.Framework.Assert.IsTrue(processor.ScheduleMoveReplica(db, ml, storageTypes)
                                              );
                NUnit.Framework.Assert.IsFalse(processor.ScheduleMoveReplica(db, ml, storageTypes
                                                                             ));
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemple #7
0
        /// <exception cref="System.Exception"/>
        public virtual void TestShmBasedStaleness()
        {
            BlockReaderTestUtil.EnableShortCircuitShmTracing();
            TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
            Configuration            conf    = CreateShortCircuitConf("testShmBasedStaleness", sockDir);
            MiniDFSCluster           cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.WaitActive();
            DistributedFileSystem fs    = cluster.GetFileSystem();
            ShortCircuitCache     cache = fs.GetClient().GetClientContext().GetShortCircuitCache(
                );
            string TestFile    = "/test_file";
            int    TestFileLen = 8193;
            int    Seed        = unchecked ((int)(0xFADED));

            DFSTestUtil.CreateFile(fs, new Path(TestFile), TestFileLen, (short)1, Seed);
            FSDataInputStream fis = fs.Open(new Path(TestFile));
            int           first   = fis.Read();
            ExtendedBlock block   = DFSTestUtil.GetFirstBlock(fs, new Path(TestFile));

            NUnit.Framework.Assert.IsTrue(first != -1);
            cache.Accept(new _CacheVisitor_502(block));
            // Stop the Namenode.  This will close the socket keeping the client's
            // shared memory segment alive, and make it stale.
            cluster.GetDataNodes()[0].Shutdown();
            cache.Accept(new _CacheVisitor_518(block));
            cluster.Shutdown();
            sockDir.Close();
        }
Exemple #8
0
 /// <param name="blockSize"/>
 /// <param name="perVolumeCapacity">
 /// limit the capacity of each volume to the given
 /// value. If negative, then don't limit.
 /// </param>
 /// <exception cref="System.IO.IOException"/>
 private void StartCluster(int blockSize, int numDatanodes, long perVolumeCapacity
                           )
 {
     InitConfig(blockSize);
     cluster = new MiniDFSCluster.Builder(conf).StoragesPerDatanode(StoragesPerDatanode
                                                                    ).NumDataNodes(numDatanodes).Build();
     fs     = cluster.GetFileSystem();
     client = fs.GetClient();
     cluster.WaitActive();
     if (perVolumeCapacity >= 0)
     {
         foreach (DataNode dn in cluster.GetDataNodes())
         {
             foreach (FsVolumeSpi volume in dn.GetFSDataset().GetVolumes())
             {
                 ((FsVolumeImpl)volume).SetCapacityForTesting(perVolumeCapacity);
             }
         }
     }
     if (numDatanodes == 1)
     {
         IList <FsVolumeSpi> volumes = cluster.GetDataNodes()[0].GetFSDataset().GetVolumes(
             );
         Assert.AssertThat(volumes.Count, IS.Is(1));
         singletonVolume = ((FsVolumeImpl)volumes[0]);
     }
 }
        public virtual void TestOpenFilesWithRename()
        {
            Path path = new Path("/test");

            DoWriteAndAbort(fs, path);
            // check for zero sized blocks
            Path fileWithEmptyBlock = new Path("/test/test/test4");

            fs.Create(fileWithEmptyBlock);
            NamenodeProtocols nameNodeRpc = cluster.GetNameNodeRpc();
            string            clientName  = fs.GetClient().GetClientName();

            // create one empty block
            nameNodeRpc.AddBlock(fileWithEmptyBlock.ToString(), clientName, null, null, INodeId
                                 .GrandfatherInodeId, null);
            fs.CreateSnapshot(path, "s2");
            fs.Rename(new Path("/test/test"), new Path("/test/test-renamed"));
            fs.Delete(new Path("/test/test-renamed"), true);
            NameNode nameNode = cluster.GetNameNode();

            NameNodeAdapter.EnterSafeMode(nameNode, false);
            NameNodeAdapter.SaveNamespace(nameNode);
            NameNodeAdapter.LeaveSafeMode(nameNode);
            cluster.RestartNameNode(true);
        }
        public virtual void TestTC2ForAppend2()
        {
            Path p = new Path("/TC2/foo2");
            //a. Create file with one and a half block of data. Close file.
            int len1 = (int)(BlockSize + BlockSize / 2);

            {
                FSDataOutputStream @out = fs.Create(p, false, buffersize, Replication, BlockSize);
                AppendTestUtil.Write(@out, 0, len1);
                @out.Close();
            }
            AppendTestUtil.Check(fs, p, len1);
            //   Reopen file to append quarter block of data. Close file.
            int len2 = (int)BlockSize / 4;

            {
                FSDataOutputStream @out = fs.Append(p, EnumSet.Of(CreateFlag.Append, CreateFlag.NewBlock
                                                                  ), 4096, null);
                AppendTestUtil.Write(@out, len1, len2);
                @out.Close();
            }
            // b. Reopen file and read 1.75 blocks of data. Close file.
            AppendTestUtil.Check(fs, p, len1 + len2);
            IList <LocatedBlock> blocks = fs.GetClient().GetLocatedBlocks(p.ToString(), 0L).GetLocatedBlocks
                                              ();

            NUnit.Framework.Assert.AreEqual(3, blocks.Count);
            NUnit.Framework.Assert.AreEqual(BlockSize, blocks[0].GetBlockSize());
            NUnit.Framework.Assert.AreEqual(BlockSize / 2, blocks[1].GetBlockSize());
            NUnit.Framework.Assert.AreEqual(BlockSize / 4, blocks[2].GetBlockSize());
        }
        public virtual void SetupCluster()
        {
            conf = new Configuration();
            conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
            HAUtil.SetAllowStandbyReads(conf, true);
            fsHelper = new FileSystemTestHelper();
            string testRoot = fsHelper.GetTestRootDir();

            testRootDir = new FilePath(testRoot).GetAbsoluteFile();
            conf.Set(DFSConfigKeys.DfsEncryptionKeyProviderUri, JavaKeyStoreProvider.SchemeName
                     + "://file" + new Path(testRootDir.ToString(), "test.jks").ToUri());
            cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology.SimpleHATopology
                                                                      ()).NumDataNodes(1).Build();
            cluster.WaitActive();
            cluster.TransitionToActive(0);
            fs = (DistributedFileSystem)HATestUtil.ConfigureFailoverFs(cluster, conf);
            DFSTestUtil.CreateKey(TestKey, cluster, 0, conf);
            DFSTestUtil.CreateKey(TestKey, cluster, 1, conf);
            nn0       = cluster.GetNameNode(0);
            nn1       = cluster.GetNameNode(1);
            dfsAdmin0 = new HdfsAdmin(cluster.GetURI(0), conf);
            dfsAdmin1 = new HdfsAdmin(cluster.GetURI(1), conf);
            KeyProviderCryptoExtension nn0Provider = cluster.GetNameNode(0).GetNamesystem().GetProvider
                                                         ();

            fs.GetClient().SetKeyProvider(nn0Provider);
        }
Exemple #12
0
        public virtual void TestScheduleBlockWithinSameNode()
        {
            Configuration  conf    = new HdfsConfiguration();
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).StorageTypes
                                         (new StorageType[] { StorageType.Disk, StorageType.Archive }).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                string file = "/testScheduleWithinSameNode/file";
                Path   dir  = new Path("/testScheduleWithinSameNode");
                dfs.Mkdirs(dir);
                // write to DISK
                dfs.SetStoragePolicy(dir, "HOT");
                {
                    FSDataOutputStream @out = dfs.Create(new Path(file));
                    @out.WriteChars("testScheduleWithinSameNode");
                    @out.Close();
                }
                //verify before movement
                LocatedBlock  lb           = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                StorageType[] storageTypes = lb.GetStorageTypes();
                foreach (StorageType storageType in storageTypes)
                {
                    NUnit.Framework.Assert.IsTrue(StorageType.Disk == storageType);
                }
                // move to ARCHIVE
                dfs.SetStoragePolicy(dir, "COLD");
                int rc = ToolRunner.Run(conf, new Mover.Cli(), new string[] { "-p", dir.ToString(
                                                                                  ) });
                NUnit.Framework.Assert.AreEqual("Movement to ARCHIVE should be successfull", 0, rc
                                                );
                // Wait till namenode notified
                Sharpen.Thread.Sleep(3000);
                lb           = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0);
                storageTypes = lb.GetStorageTypes();
                foreach (StorageType storageType_1 in storageTypes)
                {
                    NUnit.Framework.Assert.IsTrue(StorageType.Archive == storageType_1);
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemple #13
0
 public virtual void StartUpCluster()
 {
     conf    = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDatanodes).Build();
     fs      = cluster.GetFileSystem();
     client  = fs.GetClient();
     bpid    = cluster.GetNamesystem().GetBlockPoolId();
 }
 /// <exception cref="System.Exception"/>
 private BlockLocation[] GetBlockLocations(Path p)
 {
     DFSTestUtil.WaitReplication(dfs, p, (short)3);
     BlockLocation[] locations = dfs.GetClient().GetBlockLocations(p.ToUri().GetPath()
                                                                   , 0, long.MaxValue);
     NUnit.Framework.Assert.IsTrue(locations.Length == 1 && locations[0].GetHosts().Length
                                   == 3);
     return(locations);
 }
Exemple #15
0
        public virtual void TestBlockMoveAcrossStorageInSameNode()
        {
            Configuration conf = new HdfsConfiguration();
            // create only one datanode in the cluster to verify movement within
            // datanode.
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).StorageTypes
                                         (new StorageType[] { StorageType.Disk, StorageType.Archive }).Build();

            try
            {
                cluster.WaitActive();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                Path file = new Path("/testBlockMoveAcrossStorageInSameNode/file");
                DFSTestUtil.CreateFile(dfs, file, 1024, (short)1, 1024);
                LocatedBlocks locatedBlocks = dfs.GetClient().GetLocatedBlocks(file.ToString(), 0
                                                                               );
                // get the current
                LocatedBlock   locatedBlock = locatedBlocks.Get(0);
                ExtendedBlock  block        = locatedBlock.GetBlock();
                DatanodeInfo[] locations    = locatedBlock.GetLocations();
                NUnit.Framework.Assert.AreEqual(1, locations.Length);
                StorageType[] storageTypes = locatedBlock.GetStorageTypes();
                // current block should be written to DISK
                NUnit.Framework.Assert.IsTrue(storageTypes[0] == StorageType.Disk);
                DatanodeInfo source = locations[0];
                // move block to ARCHIVE by using same DataNodeInfo for source, proxy and
                // destination so that movement happens within datanode
                NUnit.Framework.Assert.IsTrue(ReplaceBlock(block, source, source, source, StorageType
                                                           .Archive));
                // wait till namenode notified
                Sharpen.Thread.Sleep(3000);
                locatedBlocks = dfs.GetClient().GetLocatedBlocks(file.ToString(), 0);
                // get the current
                locatedBlock = locatedBlocks.Get(0);
                NUnit.Framework.Assert.AreEqual("Storage should be only one", 1, locatedBlock.GetLocations
                                                    ().Length);
                NUnit.Framework.Assert.IsTrue("Block should be moved to ARCHIVE", locatedBlock.GetStorageTypes
                                                  ()[0] == StorageType.Archive);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemple #16
0
        /// <summary>
        /// If ramDiskStorageLimit is &gt;=0, then RAM_DISK capacity is artificially
        /// capped.
        /// </summary>
        /// <remarks>
        /// If ramDiskStorageLimit is &gt;=0, then RAM_DISK capacity is artificially
        /// capped. If ramDiskStorageLimit &lt; 0 then it is ignored.
        /// </remarks>
        /// <exception cref="System.IO.IOException"/>
        protected internal void StartUpCluster(bool hasTransientStorage, int ramDiskReplicaCapacity
                                               , bool useSCR, bool useLegacyBlockReaderLocal)
        {
            Configuration conf = new Configuration();

            conf.SetLong(DfsBlockSizeKey, BlockSize);
            conf.SetInt(DfsNamenodeLazyPersistFileScrubIntervalSec, LazyWriteFileScrubberIntervalSec
                        );
            conf.SetLong(DfsHeartbeatIntervalKey, HeartbeatIntervalSec);
            conf.SetInt(DfsNamenodeHeartbeatRecheckIntervalKey, HeartbeatRecheckIntervalMsec);
            conf.SetInt(DfsDatanodeLazyWriterIntervalSec, LazyWriterIntervalSec);
            conf.SetInt(DfsDatanodeRamDiskLowWatermarkBytes, EvictionLowWatermark * BlockSize
                        );
            if (useSCR)
            {
                conf.SetBoolean(DfsClientReadShortcircuitKey, true);
                // Do not share a client context across tests.
                conf.Set(DfsClientContext, UUID.RandomUUID().ToString());
                if (useLegacyBlockReaderLocal)
                {
                    conf.SetBoolean(DfsClientUseLegacyBlockreaderlocal, true);
                    conf.Set(DfsBlockLocalPathAccessUserKey, UserGroupInformation.GetCurrentUser().GetShortUserName
                                 ());
                }
                else
                {
                    sockDir = new TemporarySocketDirectory();
                    conf.Set(DfsDomainSocketPathKey, new FilePath(sockDir.GetDir(), this.GetType().Name
                                                                  + "._PORT.sock").GetAbsolutePath());
                }
            }
            long[] capacities = null;
            if (hasTransientStorage && ramDiskReplicaCapacity >= 0)
            {
                // Convert replica count to byte count, add some delta for .meta and
                // VERSION files.
                long ramDiskStorageLimit = ((long)ramDiskReplicaCapacity * BlockSize) + (BlockSize
                                                                                         - 1);
                capacities = new long[] { ramDiskStorageLimit, -1 };
            }
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(ReplFactor).StorageCapacities
                          (capacities).StorageTypes(hasTransientStorage ? new StorageType[] { StorageType.
                                                                                              RamDisk, StorageType.Default } : null).Build();
            fs     = cluster.GetFileSystem();
            client = fs.GetClient();
            try
            {
                jmx = InitJMX();
            }
            catch (Exception e)
            {
                NUnit.Framework.Assert.Fail("Failed initialize JMX for testing: " + e);
            }
            Log.Info("Cluster startup complete");
        }
Exemple #17
0
        // Regression test for HADOOP-11802
        /// <exception cref="System.Exception"/>
        public virtual void TestDataXceiverHandlesRequestShortCircuitShmFailure()
        {
            BlockReaderTestUtil.EnableShortCircuitShmTracing();
            TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
            Configuration            conf    = CreateShortCircuitConf("testDataXceiverHandlesRequestShortCircuitShmFailure"
                                                                      , sockDir);

            conf.SetLong(DFSConfigKeys.DfsClientReadShortcircuitStreamsCacheExpiryMsKey, 1000000000L
                         );
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.WaitActive();
            DistributedFileSystem fs = cluster.GetFileSystem();
            Path TestPath1           = new Path("/test_file1");

            DFSTestUtil.CreateFile(fs, TestPath1, 4096, (short)1, unchecked ((int)(0xFADE1)));
            Log.Info("Setting failure injector and performing a read which " + "should fail..."
                     );
            DataNodeFaultInjector failureInjector = Org.Mockito.Mockito.Mock <DataNodeFaultInjector
                                                                              >();

            Org.Mockito.Mockito.DoAnswer(new _Answer_710()).When(failureInjector).SendShortCircuitShmResponse
                ();
            DataNodeFaultInjector prevInjector = DataNodeFaultInjector.instance;

            DataNodeFaultInjector.instance = failureInjector;
            try
            {
                // The first read will try to allocate a shared memory segment and slot.
                // The shared memory segment allocation will fail because of the failure
                // injector.
                DFSTestUtil.ReadFileBuffer(fs, TestPath1);
                NUnit.Framework.Assert.Fail("expected readFileBuffer to fail, but it succeeded.");
            }
            catch (Exception t)
            {
                GenericTestUtils.AssertExceptionContains("TCP reads were disabled for " + "testing, but we failed to do a non-TCP read."
                                                         , t);
            }
            CheckNumberOfSegmentsAndSlots(0, 0, cluster.GetDataNodes()[0].GetShortCircuitRegistry
                                              ());
            Log.Info("Clearing failure injector and performing another read...");
            DataNodeFaultInjector.instance = prevInjector;
            fs.GetClient().GetClientContext().GetDomainSocketFactory().ClearPathMap();
            // The second read should succeed.
            DFSTestUtil.ReadFileBuffer(fs, TestPath1);
            // We should have added a new short-circuit shared memory segment and slot.
            CheckNumberOfSegmentsAndSlots(1, 1, cluster.GetDataNodes()[0].GetShortCircuitRegistry
                                              ());
            cluster.Shutdown();
            sockDir.Close();
        }
Exemple #18
0
        /// <summary>Get the internet address of the currently-active NN.</summary>
        /// <remarks>
        /// Get the internet address of the currently-active NN. This should rarely be
        /// used, since callers of this method who connect directly to the NN using the
        /// resulting InetSocketAddress will not be able to connect to the active NN if
        /// a failover were to occur after this method has been called.
        /// </remarks>
        /// <param name="fs">the file system to get the active address of.</param>
        /// <returns>the internet address of the currently-active NN.</returns>
        /// <exception cref="System.IO.IOException">if an error occurs while resolving the active NN.
        ///     </exception>
        public static IPEndPoint GetAddressOfActive(FileSystem fs)
        {
            if (!(fs is DistributedFileSystem))
            {
                throw new ArgumentException("FileSystem " + fs + " is not a DFS.");
            }
            // force client address resolution.
            fs.Exists(new Path("/"));
            DistributedFileSystem dfs       = (DistributedFileSystem)fs;
            DFSClient             dfsClient = dfs.GetClient();

            return(RPC.GetServerAddress(dfsClient.GetNamenode()));
        }
        public virtual void TestCommitWithInvalidGenStamp()
        {
            Path file = new Path("/file");
            FSDataOutputStream @out = null;

            try
            {
                @out = dfs.Create(file, (short)1);
                INodeFile     fileNode = dir.GetINode4Write(file.ToString()).AsFile();
                ExtendedBlock previous = null;
                Block         newBlock = DFSTestUtil.AddBlockToFile(cluster.GetDataNodes(), dfs, cluster.
                                                                    GetNamesystem(), file.ToString(), fileNode, dfs.GetClient().GetClientName(), previous
                                                                    , 100);
                Block newBlockClone = new Block(newBlock);
                previous = new ExtendedBlock(cluster.GetNamesystem().GetBlockPoolId(), newBlockClone
                                             );
                previous.SetGenerationStamp(123);
                try
                {
                    dfs.GetClient().GetNamenode().Complete(file.ToString(), dfs.GetClient().GetClientName
                                                               (), previous, fileNode.GetId());
                    NUnit.Framework.Assert.Fail("should throw exception because invalid genStamp");
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue(e.ToString().Contains("Commit block with mismatching GS. NN has "
                                                                        + newBlock + ", client submits " + newBlockClone));
                }
                previous = new ExtendedBlock(cluster.GetNamesystem().GetBlockPoolId(), newBlock);
                bool complete = dfs.GetClient().GetNamenode().Complete(file.ToString(), dfs.GetClient
                                                                           ().GetClientName(), previous, fileNode.GetId());
                NUnit.Framework.Assert.IsTrue("should complete successfully", complete);
            }
            finally
            {
                IOUtils.Cleanup(null, @out);
            }
        }
Exemple #20
0
        public virtual void TestUnbufferClosesSockets()
        {
            Configuration conf = new Configuration();

            // Set a new ClientContext.  This way, we will have our own PeerCache,
            // rather than sharing one with other unit tests.
            conf.Set(DFSConfigKeys.DfsClientContext, "testUnbufferClosesSocketsContext");
            // Disable short-circuit reads.  With short-circuit, we wouldn't hold open a
            // TCP socket.
            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, false);
            // Set a really long socket timeout to avoid test timing issues.
            conf.SetLong(DFSConfigKeys.DfsClientSocketTimeoutKey, 100000000L);
            conf.SetLong(DFSConfigKeys.DfsClientSocketCacheExpiryMsecKey, 100000000L);
            MiniDFSCluster    cluster = null;
            FSDataInputStream stream  = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).Build();
                DistributedFileSystem dfs = (DistributedFileSystem)FileSystem.NewInstance(conf);
                Path TestPath             = new Path("/test1");
                DFSTestUtil.CreateFile(dfs, TestPath, 128, (short)1, 1);
                stream = dfs.Open(TestPath);
                // Read a byte.  This will trigger the creation of a block reader.
                stream.Seek(2);
                int b = stream.Read();
                NUnit.Framework.Assert.IsTrue(-1 != b);
                // The Peer cache should start off empty.
                PeerCache cache = dfs.GetClient().GetClientContext().GetPeerCache();
                NUnit.Framework.Assert.AreEqual(0, cache.Size());
                // Unbuffer should clear the block reader and return the socket to the
                // cache.
                stream.Unbuffer();
                stream.Seek(2);
                NUnit.Framework.Assert.AreEqual(1, cache.Size());
                int b2 = stream.Read();
                NUnit.Framework.Assert.AreEqual(b, b2);
            }
            finally
            {
                if (stream != null)
                {
                    IOUtils.Cleanup(null, stream);
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
Exemple #21
0
        public virtual void TestAppend2AfterSoftLimit()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1);
            //Set small soft-limit for lease
            long           softLimit = 1L;
            long           hardLimit = 9999999L;
            MiniDFSCluster cluster   = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.SetLeasePeriod(softLimit, hardLimit);
            cluster.WaitActive();
            DistributedFileSystem fs  = cluster.GetFileSystem();
            DistributedFileSystem fs2 = new DistributedFileSystem();

            fs2.Initialize(fs.GetUri(), conf);
            Path testPath = new Path("/testAppendAfterSoftLimit");

            byte[] fileContents = AppendTestUtil.InitBuffer(32);
            // create a new file without closing
            FSDataOutputStream @out = fs.Create(testPath);

            @out.Write(fileContents);
            //Wait for > soft-limit
            Sharpen.Thread.Sleep(250);
            try
            {
                FSDataOutputStream appendStream2 = fs2.Append(testPath, EnumSet.Of(CreateFlag.Append
                                                                                   , CreateFlag.NewBlock), 4096, null);
                appendStream2.Write(fileContents);
                appendStream2.Close();
                NUnit.Framework.Assert.AreEqual(fileContents.Length, fs.GetFileStatus(testPath).GetLen
                                                    ());
                // make sure we now have 1 block since the first writer was revoked
                LocatedBlocks blks = fs.GetClient().GetLocatedBlocks(testPath.ToString(), 0L);
                NUnit.Framework.Assert.AreEqual(1, blks.GetLocatedBlocks().Count);
                foreach (LocatedBlock blk in blks.GetLocatedBlocks())
                {
                    NUnit.Framework.Assert.AreEqual(fileContents.Length, blk.GetBlockSize());
                }
            }
            finally
            {
                fs.Close();
                fs2.Close();
                cluster.Shutdown();
            }
        }
Exemple #22
0
            /// <exception cref="System.IO.IOException"/>
            public virtual int Run(Configuration conf, IList <string> args)
            {
                string idString = StringUtils.PopFirstNonOption(args);

                if (idString == null)
                {
                    System.Console.Error.WriteLine("You must specify a directive ID to remove.");
                    return(1);
                }
                long id;

                try
                {
                    id = long.Parse(idString);
                }
                catch (FormatException)
                {
                    System.Console.Error.WriteLine("Invalid directive ID " + idString + ": expected "
                                                   + "a numeric value.");
                    return(1);
                }
                if (id <= 0)
                {
                    System.Console.Error.WriteLine("Invalid directive ID " + id + ": ids must " + "be greater than 0."
                                                   );
                    return(1);
                }
                if (!args.IsEmpty())
                {
                    System.Console.Error.WriteLine("Can't understand argument: " + args[0]);
                    System.Console.Error.WriteLine("Usage is " + GetShortUsage());
                    return(1);
                }
                DistributedFileSystem dfs = AdminHelper.GetDFS(conf);

                try
                {
                    dfs.GetClient().RemoveCacheDirective(id);
                    System.Console.Out.WriteLine("Removed cached directive " + id);
                }
                catch (IOException e)
                {
                    System.Console.Error.WriteLine(AdminHelper.PrettifyException(e));
                    return(2);
                }
                return(0);
            }
 /// <exception cref="System.Exception"/>
 private void WaitForAllReplicas(int expectedReplicaNum, Path file, DistributedFileSystem
                                 dfs)
 {
     for (int i = 0; i < 5; i++)
     {
         LocatedBlocks lbs = dfs.GetClient().GetLocatedBlocks(file.ToString(), 0, BlockSize
                                                              );
         LocatedBlock lb = lbs.Get(0);
         if (lb.GetLocations().Length >= expectedReplicaNum)
         {
             return;
         }
         else
         {
             Sharpen.Thread.Sleep(1000);
         }
     }
 }
Exemple #24
0
        /// <summary>Get a BlockReader for the given block.</summary>
        /// <exception cref="System.IO.IOException"/>
        public static BlockReader GetBlockReader(MiniDFSCluster cluster, LocatedBlock testBlock
                                                 , int offset, int lenToRead)
        {
            IPEndPoint    targetAddr = null;
            ExtendedBlock block      = testBlock.GetBlock();

            DatanodeInfo[] nodes = testBlock.GetLocations();
            targetAddr = NetUtils.CreateSocketAddr(nodes[0].GetXferAddr());
            DistributedFileSystem fs = cluster.GetFileSystem();

            return(new BlockReaderFactory(fs.GetClient().GetConf()).SetInetSocketAddress(targetAddr
                                                                                         ).SetBlock(block).SetFileName(targetAddr.ToString() + ":" + block.GetBlockId()).
                   SetBlockToken(testBlock.GetBlockToken()).SetStartOffset(offset).SetLength(lenToRead
                                                                                             ).SetVerifyChecksum(true).SetClientName("BlockReaderTestUtil").SetDatanodeInfo(nodes
                                                                                                                                                                            [0]).SetClientCacheContext(ClientContext.GetFromConf(fs.GetConf())).SetCachingStrategy
                       (CachingStrategy.NewDefaultStrategy()).SetConfiguration(fs.GetConf()).SetAllowShortCircuitLocalReads
                       (true).SetRemotePeerFactory(new _RemotePeerFactory_196(fs)).Build());
        }
            /// <exception cref="System.IO.IOException"/>
            public virtual int Run(Configuration conf, IList <string> args)
            {
                string path = StringUtils.PopOptionWithArgument("-path", args);

                if (path == null)
                {
                    System.Console.Error.WriteLine("Please specify the path with -path.\nUsage:" + GetLongUsage
                                                       ());
                    return(1);
                }
                DistributedFileSystem dfs = AdminHelper.GetDFS(conf);

                try
                {
                    HdfsFileStatus status = dfs.GetClient().GetFileInfo(path);
                    if (status == null)
                    {
                        System.Console.Error.WriteLine("File/Directory does not exist: " + path);
                        return(2);
                    }
                    byte storagePolicyId = status.GetStoragePolicy();
                    if (storagePolicyId == BlockStoragePolicySuite.IdUnspecified)
                    {
                        System.Console.Out.WriteLine("The storage policy of " + path + " is unspecified");
                        return(0);
                    }
                    BlockStoragePolicy[] policies = dfs.GetStoragePolicies();
                    foreach (BlockStoragePolicy p in policies)
                    {
                        if (p.GetId() == storagePolicyId)
                        {
                            System.Console.Out.WriteLine("The storage policy of " + path + ":\n" + p);
                            return(0);
                        }
                    }
                }
                catch (Exception e)
                {
                    System.Console.Error.WriteLine(AdminHelper.PrettifyException(e));
                    return(2);
                }
                System.Console.Error.WriteLine("Cannot identify the storage policy for " + path);
                return(2);
            }
Exemple #26
0
        /// <summary>Test unlinking a file whose blocks we are caching in the DFSClient.</summary>
        /// <remarks>
        /// Test unlinking a file whose blocks we are caching in the DFSClient.
        /// The DataNode will notify the DFSClient that the replica is stale via the
        /// ShortCircuitShm.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestUnlinkingReplicasInFileDescriptorCache()
        {
            BlockReaderTestUtil.EnableShortCircuitShmTracing();
            TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
            Configuration            conf    = CreateShortCircuitConf("testUnlinkingReplicasInFileDescriptorCache"
                                                                      , sockDir);

            // We don't want the CacheCleaner to time out short-circuit shared memory
            // segments during the test, so set the timeout really high.
            conf.SetLong(DFSConfigKeys.DfsClientReadShortcircuitStreamsCacheExpiryMsKey, 1000000000L
                         );
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.WaitActive();
            DistributedFileSystem fs    = cluster.GetFileSystem();
            ShortCircuitCache     cache = fs.GetClient().GetClientContext().GetShortCircuitCache(
                );

            cache.GetDfsClientShmManager().Visit(new _Visitor_556());
            // The ClientShmManager starts off empty.
            Path TestPath    = new Path("/test_file");
            int  TestFileLen = 8193;
            int  Seed        = unchecked ((int)(0xFADE0));

            DFSTestUtil.CreateFile(fs, TestPath, TestFileLen, (short)1, Seed);
            byte[] contents = DFSTestUtil.ReadFileBuffer(fs, TestPath);
            byte[] expected = DFSTestUtil.CalculateFileContentsFromSeed(Seed, TestFileLen);
            NUnit.Framework.Assert.IsTrue(Arrays.Equals(contents, expected));
            // Loading this file brought the ShortCircuitReplica into our local
            // replica cache.
            DatanodeInfo datanode = new DatanodeInfo(cluster.GetDataNodes()[0].GetDatanodeId(
                                                         ));

            cache.GetDfsClientShmManager().Visit(new _Visitor_577(datanode));
            // Remove the file whose blocks we just read.
            fs.Delete(TestPath, false);
            // Wait for the replica to be purged from the DFSClient's cache.
            GenericTestUtils.WaitFor(new _Supplier_593(this, cache, datanode), 10, 60000);
            // Check that all slots have been invalidated.
            cluster.Shutdown();
            sockDir.Close();
        }
        public static void Setup()
        {
            string currentUser = Runtime.GetProperty("user.name");

            config.Set("fs.permissions.umask-mode", "u=rwx,g=,o=");
            config.Set(DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserGroupConfKey
                           (currentUser), "*");
            config.Set(DefaultImpersonationProvider.GetTestProvider().GetProxySuperuserIpConfKey
                           (currentUser), "*");
            fsHelper = new FileSystemTestHelper();
            // Set up java key store
            string testRoot = fsHelper.GetTestRootDir();

            testRootDir = new FilePath(testRoot).GetAbsoluteFile();
            Path jksPath = new Path(testRootDir.ToString(), "test.jks");

            config.Set(DFSConfigKeys.DfsEncryptionKeyProviderUri, JavaKeyStoreProvider.SchemeName
                       + "://file" + jksPath.ToUri());
            ProxyUsers.RefreshSuperUserGroupsConfiguration(config);
            cluster = new MiniDFSCluster.Builder(config).NumDataNodes(1).Build();
            cluster.WaitActive();
            hdfs     = cluster.GetFileSystem();
            nn       = cluster.GetNameNode();
            dfsAdmin = new HdfsAdmin(cluster.GetURI(), config);
            // Use ephemeral ports in case tests are running in parallel
            config.SetInt("nfs3.mountd.port", 0);
            config.SetInt("nfs3.server.port", 0);
            // Start NFS with allowed.hosts set to "* rw"
            config.Set("dfs.nfs.exports.allowed.hosts", "* rw");
            nfs = new Org.Apache.Hadoop.Hdfs.Nfs.Nfs3.Nfs3(config);
            nfs.StartServiceInternal(false);
            nfsd = (RpcProgramNfs3)nfs.GetRpcProgram();
            hdfs.GetClient().SetKeyProvider(nn.GetNamesystem().GetProvider());
            DFSTestUtil.CreateKey(TestKey, cluster, config);
            // Mock SecurityHandler which returns system user.name
            securityHandler = Org.Mockito.Mockito.Mock <SecurityHandler>();
            Org.Mockito.Mockito.When(securityHandler.GetUser()).ThenReturn(currentUser);
            // Mock SecurityHandler which returns a dummy username "harry"
            securityHandlerUnpriviledged = Org.Mockito.Mockito.Mock <SecurityHandler>();
            Org.Mockito.Mockito.When(securityHandlerUnpriviledged.GetUser()).ThenReturn("harry"
                                                                                        );
        }
Exemple #28
0
        /// <summary>
        /// If ramDiskStorageLimit is &gt;=0, then RAM_DISK capacity is artificially
        /// capped.
        /// </summary>
        /// <remarks>
        /// If ramDiskStorageLimit is &gt;=0, then RAM_DISK capacity is artificially
        /// capped. If ramDiskStorageLimit &lt; 0 then it is ignored.
        /// </remarks>
        /// <exception cref="System.IO.IOException"/>
        protected internal void StartUpCluster(int numDataNodes, StorageType[] storageTypes
                                               , long ramDiskStorageLimit, bool useSCR)
        {
            Configuration conf = new Configuration();

            conf.SetLong(DfsBlockSizeKey, BlockSize);
            conf.SetInt(DfsNamenodeLazyPersistFileScrubIntervalSec, LazyWriteFileScrubberIntervalSec
                        );
            conf.SetLong(DfsHeartbeatIntervalKey, HeartbeatIntervalSec);
            conf.SetInt(DfsNamenodeHeartbeatRecheckIntervalKey, HeartbeatRecheckIntervalMsec);
            conf.SetInt(DfsDatanodeLazyWriterIntervalSec, LazyWriterIntervalSec);
            if (useSCR)
            {
                conf.SetBoolean(DfsClientReadShortcircuitKey, useSCR);
                conf.Set(DfsClientContext, UUID.RandomUUID().ToString());
                sockDir = new TemporarySocketDirectory();
                conf.Set(DfsDomainSocketPathKey, new FilePath(sockDir.GetDir(), this.GetType().Name
                                                              + "._PORT.sock").GetAbsolutePath());
                conf.Set(DfsBlockLocalPathAccessUserKey, UserGroupInformation.GetCurrentUser().GetShortUserName
                             ());
            }
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDataNodes).StorageTypes
                          (storageTypes != null ? storageTypes : new StorageType[] { StorageType.Default,
                                                                                     StorageType.Default }).Build();
            fs     = cluster.GetFileSystem();
            client = fs.GetClient();
            // Artificially cap the storage capacity of the RAM_DISK volume.
            if (ramDiskStorageLimit >= 0)
            {
                IList <FsVolumeSpi> volumes = cluster.GetDataNodes()[0].GetFSDataset().GetVolumes(
                    );
                foreach (FsVolumeSpi volume in volumes)
                {
                    if (volume.GetStorageType() == StorageType.RamDisk)
                    {
                        ((FsVolumeImpl)volume).SetCapacityForTesting(ramDiskStorageLimit);
                    }
                }
            }
            Log.Info("Cluster startup complete");
        }
Exemple #29
0
        // Regression test for HDFS-7915
        /// <exception cref="System.Exception"/>
        public virtual void TestDataXceiverCleansUpSlotsOnFailure()
        {
            BlockReaderTestUtil.EnableShortCircuitShmTracing();
            TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
            Configuration            conf    = CreateShortCircuitConf("testDataXceiverCleansUpSlotsOnFailure"
                                                                      , sockDir);

            conf.SetLong(DFSConfigKeys.DfsClientReadShortcircuitStreamsCacheExpiryMsKey, 1000000000L
                         );
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.WaitActive();
            DistributedFileSystem fs = cluster.GetFileSystem();
            Path TestPath1           = new Path("/test_file1");
            Path TestPath2           = new Path("/test_file2");
            int  TestFileLen         = 4096;
            int  Seed = unchecked ((int)(0xFADE1));

            DFSTestUtil.CreateFile(fs, TestPath1, TestFileLen, (short)1, Seed);
            DFSTestUtil.CreateFile(fs, TestPath2, TestFileLen, (short)1, Seed);
            // The first read should allocate one shared memory segment and slot.
            DFSTestUtil.ReadFileBuffer(fs, TestPath1);
            // The second read should fail, and we should only have 1 segment and 1 slot
            // left.
            fs.GetClient().GetConf().brfFailureInjector = new TestShortCircuitCache.TestCleanupFailureInjector
                                                              ();
            try
            {
                DFSTestUtil.ReadFileBuffer(fs, TestPath2);
            }
            catch (Exception t)
            {
                GenericTestUtils.AssertExceptionContains("TCP reads were disabled for " + "testing, but we failed to do a non-TCP read."
                                                         , t);
            }
            CheckNumberOfSegmentsAndSlots(1, 1, cluster.GetDataNodes()[0].GetShortCircuitRegistry
                                              ());
            cluster.Shutdown();
            sockDir.Close();
        }
Exemple #30
0
        public virtual void Setup()
        {
            conf     = new HdfsConfiguration();
            fsHelper = new FileSystemTestHelper();
            // Set up java key store
            string   testRoot    = fsHelper.GetTestRootDir();
            FilePath testRootDir = new FilePath(testRoot).GetAbsoluteFile();
            Path     jksPath     = new Path(testRootDir.ToString(), "test.jks");

            conf.Set(DFSConfigKeys.DfsEncryptionKeyProviderUri, JavaKeyStoreProvider.SchemeName
                     + "://file" + jksPath.ToUri());
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            Logger.GetLogger(typeof(EncryptionZoneManager)).SetLevel(Level.Trace);
            fs        = cluster.GetFileSystem();
            fsWrapper = new FileSystemTestWrapper(cluster.GetFileSystem());
            fcWrapper = new FileContextTestWrapper(FileContext.GetFileContext(cluster.GetURI(
                                                                                  ), conf));
            dfsAdmin = new HdfsAdmin(cluster.GetURI(), conf);
            // Need to set the client's KeyProvider to the NN's for JKS,
            // else the updates do not get flushed properly
            fs.GetClient().SetKeyProvider(cluster.GetNameNode().GetNamesystem().GetProvider()
                                          );
            DFSTestUtil.CreateKey(TestKey, cluster, conf);
        }