public virtual void TestFallbackRead()
        {
            HdfsConfiguration conf     = InitZeroCopyTest();
            MiniDFSCluster    cluster  = null;
            Path TestPath              = new Path("/a");
            int  TestFileLength        = 16385;
            int  RandomSeed            = 23453;
            FSDataInputStream     fsIn = null;
            DistributedFileSystem fs   = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, RandomSeed);
                try
                {
                    DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
                }
                catch (Exception e)
                {
                    NUnit.Framework.Assert.Fail("unexpected InterruptedException during " + "waitReplication: "
                                                + e);
                }
                catch (TimeoutException e)
                {
                    NUnit.Framework.Assert.Fail("unexpected TimeoutException during " + "waitReplication: "
                                                + e);
                }
                fsIn = fs.Open(TestPath);
                byte[] original = new byte[TestFileLength];
                IOUtils.ReadFully(fsIn, original, 0, TestFileLength);
                fsIn.Close();
                fsIn = fs.Open(TestPath);
                TestFallbackImpl(fsIn, original);
            }
            finally
            {
                if (fsIn != null)
                {
                    fsIn.Close();
                }
                if (fs != null)
                {
                    fs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #2
0
        /// <exception cref="System.Exception"/>
        public virtual void TestShmBasedStaleness()
        {
            BlockReaderTestUtil.EnableShortCircuitShmTracing();
            TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
            Configuration            conf    = CreateShortCircuitConf("testShmBasedStaleness", sockDir);
            MiniDFSCluster           cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.WaitActive();
            DistributedFileSystem fs    = cluster.GetFileSystem();
            ShortCircuitCache     cache = fs.GetClient().GetClientContext().GetShortCircuitCache(
                );
            string TestFile    = "/test_file";
            int    TestFileLen = 8193;
            int    Seed        = unchecked ((int)(0xFADED));

            DFSTestUtil.CreateFile(fs, new Path(TestFile), TestFileLen, (short)1, Seed);
            FSDataInputStream fis = fs.Open(new Path(TestFile));
            int           first   = fis.Read();
            ExtendedBlock block   = DFSTestUtil.GetFirstBlock(fs, new Path(TestFile));

            NUnit.Framework.Assert.IsTrue(first != -1);
            cache.Accept(new _CacheVisitor_502(block));
            // Stop the Namenode.  This will close the socket keeping the client's
            // shared memory segment alive, and make it stale.
            cluster.GetDataNodes()[0].Shutdown();
            cache.Accept(new _CacheVisitor_518(block));
            cluster.Shutdown();
            sockDir.Close();
        }
 public virtual void TestShortCircuitTraceHooks()
 {
     Assume.AssumeTrue(NativeCodeLoader.IsNativeCodeLoaded() && !Path.Windows);
     conf = new Configuration();
     conf.Set(DFSConfigKeys.DfsClientHtracePrefix + SpanReceiverHost.SpanReceiversConfSuffix
              , typeof(TestTracing.SetSpanReceiver).FullName);
     conf.SetLong("dfs.blocksize", 100 * 1024);
     conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true);
     conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, false);
     conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, "testShortCircuitTraceHooks._PORT"
              );
     conf.Set(DFSConfigKeys.DfsChecksumTypeKey, "CRC32C");
     cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
     dfs     = cluster.GetFileSystem();
     try
     {
         DFSTestUtil.CreateFile(dfs, TestPath, TestLength, (short)1, 5678L);
         TraceScope        ts     = Trace.StartSpan("testShortCircuitTraceHooks", Sampler.Always);
         FSDataInputStream stream = dfs.Open(TestPath);
         byte[]            buf    = new byte[TestLength];
         IOUtils.ReadFully(stream, buf, 0, TestLength);
         stream.Close();
         ts.Close();
         string[] expectedSpanNames = new string[] { "OpRequestShortCircuitAccessProto", "ShortCircuitShmRequestProto" };
         TestTracing.AssertSpanNamesFound(expectedSpanNames);
     }
     finally
     {
         dfs.Close();
         cluster.Shutdown();
     }
 }
        /// <summary>
        /// Test for the case where the client beings to read a long block, but doesn't
        /// read bytes off the stream quickly.
        /// </summary>
        /// <remarks>
        /// Test for the case where the client beings to read a long block, but doesn't
        /// read bytes off the stream quickly. The datanode should time out sending the
        /// chunks and the transceiver should die, even if it has a long keepalive.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestSlowReader()
        {
            // Set a client socket cache expiry time much longer than
            // the datanode-side expiration time.
            long          ClientExpiryMs = 600000L;
            Configuration clientConf     = new Configuration(conf);

            clientConf.SetLong(DFSConfigKeys.DfsClientSocketCacheExpiryMsecKey, ClientExpiryMs
                               );
            clientConf.Set(DFSConfigKeys.DfsClientContext, "testSlowReader");
            DistributedFileSystem fs = (DistributedFileSystem)FileSystem.Get(cluster.GetURI()
                                                                             , clientConf);

            // Restart the DN with a shorter write timeout.
            MiniDFSCluster.DataNodeProperties props = cluster.StopDataNode(0);
            props.conf.SetInt(DFSConfigKeys.DfsDatanodeSocketWriteTimeoutKey, WriteTimeout);
            props.conf.SetInt(DFSConfigKeys.DfsDatanodeSocketReuseKeepaliveKey, 120000);
            NUnit.Framework.Assert.IsTrue(cluster.RestartDataNode(props, true));
            dn = cluster.GetDataNodes()[0];
            // Wait for heartbeats to avoid a startup race where we
            // try to write the block while the DN is still starting.
            cluster.TriggerHeartbeats();
            DFSTestUtil.CreateFile(fs, TestFile, 1024 * 1024 * 8L, (short)1, 0L);
            FSDataInputStream stm = fs.Open(TestFile);

            stm.Read();
            AssertXceiverCount(1);
            GenericTestUtils.WaitFor(new _Supplier_193(this), 500, 50000);
            // DN should time out in sendChunks, and this should force
            // the xceiver to exit.
            IOUtils.CloseStream(stm);
        }
예제 #5
0
        public virtual void TestAppendLessThanChecksumChunk()
        {
            byte[]         buf     = new byte[1024];
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).NumDataNodes
                                         (1).Build();

            cluster.WaitActive();
            try
            {
                using (DistributedFileSystem fs = cluster.GetFileSystem())
                {
                    int  len1 = 200;
                    int  len2 = 300;
                    Path p    = new Path("/foo");
                    FSDataOutputStream @out = fs.Create(p);
                    @out.Write(buf, 0, len1);
                    @out.Close();
                    @out = fs.Append(p);
                    @out.Write(buf, 0, len2);
                    // flush but leave open
                    @out.Hflush();
                    // read data to verify the replica's content and checksum are correct
                    FSDataInputStream @in = fs.Open(p);
                    int length            = @in.Read(0, buf, 0, len1 + len2);
                    NUnit.Framework.Assert.IsTrue(length > 0);
                    @in.Close();
                    @out.Close();
                }
            }
            finally
            {
                cluster.Shutdown();
            }
        }
예제 #6
0
        public virtual void TestOpenManyFilesViaTcp()
        {
            int           NumOpens = 500;
            Configuration conf     = new Configuration();

            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, false);
            MiniDFSCluster cluster = null;

            FSDataInputStream[] streams = new FSDataInputStream[NumOpens];
            try
            {
                cluster = new MiniDFSCluster.Builder(conf).Build();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                Path TestPath             = new Path("/testFile");
                DFSTestUtil.CreateFile(dfs, TestPath, 131072, (short)1, 1);
                for (int i = 0; i < NumOpens; i++)
                {
                    streams[i] = dfs.Open(TestPath);
                    Log.Info("opening file " + i + "...");
                    NUnit.Framework.Assert.IsTrue(-1 != streams[i].Read());
                    streams[i].Unbuffer();
                }
            }
            finally
            {
                foreach (FSDataInputStream stream in streams)
                {
                    IOUtils.Cleanup(null, stream);
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #7
0
        /// <summary>
        /// Tests the fileLength when we sync the file and restart the cluster and
        /// Datanodes not report to Namenode yet.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestFileLengthWithHSyncAndClusterRestartWithOutDNsRegister()
        {
            Configuration conf = new HdfsConfiguration();

            // create cluster
            conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, 512);
            MiniDFSCluster      cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            HdfsDataInputStream @in     = null;

            try
            {
                Path path = new Path("/tmp/TestFileLengthOnClusterRestart", "test");
                DistributedFileSystem dfs  = cluster.GetFileSystem();
                FSDataOutputStream    @out = dfs.Create(path);
                int fileLength             = 1030;
                @out.Write(new byte[fileLength]);
                @out.Hsync();
                cluster.RestartNameNode();
                cluster.WaitActive();
                @in = (HdfsDataInputStream)dfs.Open(path, 1024);
                // Verify the length when we just restart NN. DNs will register
                // immediately.
                NUnit.Framework.Assert.AreEqual(fileLength, @in.GetVisibleLength());
                cluster.ShutdownDataNodes();
                cluster.RestartNameNode(false);
                // This is just for ensuring NN started.
                VerifyNNIsInSafeMode(dfs);
                try
                {
                    @in = (HdfsDataInputStream)dfs.Open(path);
                    NUnit.Framework.Assert.Fail("Expected IOException");
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue(e.GetLocalizedMessage().IndexOf("Name node is in safe mode"
                                                                                  ) >= 0);
                }
            }
            finally
            {
                if (null != @in)
                {
                    @in.Close();
                }
                cluster.Shutdown();
            }
        }
예제 #8
0
        /// <summary>
        /// Check file content, reading as user
        /// <paramref name="readingUser"/>
        ///
        /// </summary>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        internal static void CheckFileContent(URI uri, Path name, byte[] expected, int readOffset
                                              , string readingUser, Configuration conf, bool legacyShortCircuitFails)
        {
            // Ensure short circuit is enabled
            DistributedFileSystem fs = GetFileSystem(readingUser, uri, conf);
            ClientContext         getClientContext = ClientContext.GetFromConf(conf);

            if (legacyShortCircuitFails)
            {
                NUnit.Framework.Assert.IsFalse(getClientContext.GetDisableLegacyBlockReaderLocal(
                                                   ));
            }
            FSDataInputStream stm = fs.Open(name);

            byte[] actual = new byte[expected.Length - readOffset];
            stm.ReadFully(readOffset, actual);
            CheckData(actual, readOffset, expected, "Read 2");
            stm.Close();
            // Now read using a different API.
            actual = new byte[expected.Length - readOffset];
            stm    = fs.Open(name);
            IOUtils.SkipFully(stm, readOffset);
            //Read a small number of bytes first.
            int nread = stm.Read(actual, 0, 3);

            nread += stm.Read(actual, nread, 2);
            //Read across chunk boundary
            nread += stm.Read(actual, nread, 517);
            CheckData(actual, readOffset, expected, nread, "A few bytes");
            //Now read rest of it
            while (nread < actual.Length)
            {
                int nbytes = stm.Read(actual, nread, actual.Length - nread);
                if (nbytes < 0)
                {
                    throw new EOFException("End of file reached before reading fully.");
                }
                nread += nbytes;
            }
            CheckData(actual, readOffset, expected, "Read 3");
            if (legacyShortCircuitFails)
            {
                NUnit.Framework.Assert.IsTrue(getClientContext.GetDisableLegacyBlockReaderLocal()
                                              );
            }
            stm.Close();
        }
예제 #9
0
        /// <summary>Test to verify the race between finalizeBlock and Lease recovery</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestRaceBetweenReplicaRecoveryAndFinalizeBlock()
        {
            TearDown();
            // Stop the Mocked DN started in startup()
            Configuration conf = new HdfsConfiguration();

            conf.Set(DFSConfigKeys.DfsDatanodeXceiverStopTimeoutMillisKey, "1000");
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            try
            {
                cluster.WaitClusterUp();
                DistributedFileSystem fs = cluster.GetFileSystem();
                Path path = new Path("/test");
                FSDataOutputStream @out = fs.Create(path);
                @out.WriteBytes("data");
                @out.Hsync();
                IList <LocatedBlock> blocks             = DFSTestUtil.GetAllBlocks(fs.Open(path));
                LocatedBlock         block              = blocks[0];
                DataNode             dataNode           = cluster.GetDataNodes()[0];
                AtomicBoolean        recoveryInitResult = new AtomicBoolean(true);
                Sharpen.Thread       recoveryThread     = new _Thread_612(block, dataNode, recoveryInitResult
                                                                          );
                recoveryThread.Start();
                try
                {
                    @out.Close();
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue("Writing should fail", e.Message.Contains("are bad. Aborting..."
                                                                                            ));
                }
                finally
                {
                    recoveryThread.Join();
                }
                NUnit.Framework.Assert.IsTrue("Recovery should be initiated successfully", recoveryInitResult
                                              .Get());
                dataNode.UpdateReplicaUnderRecovery(block.GetBlock(), block.GetBlock().GetGenerationStamp
                                                        () + 1, block.GetBlock().GetBlockId(), block.GetBlockSize());
            }
            finally
            {
                if (null != cluster)
                {
                    cluster.Shutdown();
                    cluster = null;
                }
            }
        }
예제 #10
0
        public virtual void TestUnbufferClosesSockets()
        {
            Configuration conf = new Configuration();

            // Set a new ClientContext.  This way, we will have our own PeerCache,
            // rather than sharing one with other unit tests.
            conf.Set(DFSConfigKeys.DfsClientContext, "testUnbufferClosesSocketsContext");
            // Disable short-circuit reads.  With short-circuit, we wouldn't hold open a
            // TCP socket.
            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, false);
            // Set a really long socket timeout to avoid test timing issues.
            conf.SetLong(DFSConfigKeys.DfsClientSocketTimeoutKey, 100000000L);
            conf.SetLong(DFSConfigKeys.DfsClientSocketCacheExpiryMsecKey, 100000000L);
            MiniDFSCluster    cluster = null;
            FSDataInputStream stream  = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).Build();
                DistributedFileSystem dfs = (DistributedFileSystem)FileSystem.NewInstance(conf);
                Path TestPath             = new Path("/test1");
                DFSTestUtil.CreateFile(dfs, TestPath, 128, (short)1, 1);
                stream = dfs.Open(TestPath);
                // Read a byte.  This will trigger the creation of a block reader.
                stream.Seek(2);
                int b = stream.Read();
                NUnit.Framework.Assert.IsTrue(-1 != b);
                // The Peer cache should start off empty.
                PeerCache cache = dfs.GetClient().GetClientContext().GetPeerCache();
                NUnit.Framework.Assert.AreEqual(0, cache.Size());
                // Unbuffer should clear the block reader and return the socket to the
                // cache.
                stream.Unbuffer();
                stream.Seek(2);
                NUnit.Framework.Assert.AreEqual(1, cache.Size());
                int b2 = stream.Read();
                NUnit.Framework.Assert.AreEqual(b, b2);
            }
            finally
            {
                if (stream != null)
                {
                    IOUtils.Cleanup(null, stream);
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #11
0
        /// <summary>Test NN crash and client crash/stuck immediately after block allocation</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestOpenFileWhenNNAndClientCrashAfterAddBlock()
        {
            cluster.GetConfiguration(0).Set(DFSConfigKeys.DfsNamenodeSafemodeThresholdPctKey,
                                            "1.0f");
            string testData = "testData";

            // to make sure we write the full block before creating dummy block at NN.
            cluster.GetConfiguration(0).SetInt("io.bytes.per.checksum", testData.Length);
            cluster.RestartNameNode(0);
            try
            {
                cluster.WaitActive();
                cluster.TransitionToActive(0);
                cluster.TransitionToStandby(1);
                DistributedFileSystem dfs     = cluster.GetFileSystem(0);
                string             pathString = "/tmp1.txt";
                Path               filePath   = new Path(pathString);
                FSDataOutputStream create     = dfs.Create(filePath, FsPermission.GetDefault(), true,
                                                           1024, (short)3, testData.Length, null);
                create.Write(Sharpen.Runtime.GetBytesForString(testData));
                create.Hflush();
                long       fileId     = ((DFSOutputStream)create.GetWrappedStream()).GetFileId();
                FileStatus fileStatus = dfs.GetFileStatus(filePath);
                DFSClient  client     = DFSClientAdapter.GetClient(dfs);
                // add one dummy block at NN, but not write to DataNode
                ExtendedBlock previousBlock = DFSClientAdapter.GetPreviousBlock(client, fileId);
                DFSClientAdapter.GetNamenode(client).AddBlock(pathString, client.GetClientName(),
                                                              new ExtendedBlock(previousBlock), new DatanodeInfo[0], DFSClientAdapter.GetFileId
                                                                  ((DFSOutputStream)create.GetWrappedStream()), null);
                cluster.RestartNameNode(0, true);
                cluster.RestartDataNode(0);
                cluster.TransitionToActive(0);
                // let the block reports be processed.
                Sharpen.Thread.Sleep(2000);
                FSDataInputStream @is = dfs.Open(filePath);
                @is.Close();
                dfs.RecoverLease(filePath);
                // initiate recovery
                NUnit.Framework.Assert.IsTrue("Recovery also should be success", dfs.RecoverLease
                                                  (filePath));
            }
            finally
            {
                cluster.Shutdown();
            }
        }
예제 #12
0
        /// <summary>
        /// Check the file content, reading as user
        /// <paramref name="readingUser"/>
        ///
        /// </summary>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        internal static void CheckFileContentDirect(URI uri, Path name, byte[] expected,
                                                    int readOffset, string readingUser, Configuration conf, bool legacyShortCircuitFails
                                                    )
        {
            // Ensure short circuit is enabled
            DistributedFileSystem fs            = GetFileSystem(readingUser, uri, conf);
            ClientContext         clientContext = ClientContext.GetFromConf(conf);

            if (legacyShortCircuitFails)
            {
                NUnit.Framework.Assert.IsTrue(clientContext.GetDisableLegacyBlockReaderLocal());
            }
            HdfsDataInputStream stm    = (HdfsDataInputStream)fs.Open(name);
            ByteBuffer          actual = ByteBuffer.AllocateDirect(expected.Length - readOffset);

            IOUtils.SkipFully(stm, readOffset);
            actual.Limit(3);
            //Read a small number of bytes first.
            int nread = stm.Read(actual);

            actual.Limit(nread + 2);
            nread += stm.Read(actual);
            // Read across chunk boundary
            actual.Limit(Math.Min(actual.Capacity(), nread + 517));
            nread += stm.Read(actual);
            CheckData(ArrayFromByteBuffer(actual), readOffset, expected, nread, "A few bytes"
                      );
            //Now read rest of it
            actual.Limit(actual.Capacity());
            while (actual.HasRemaining())
            {
                int nbytes = stm.Read(actual);
                if (nbytes < 0)
                {
                    throw new EOFException("End of file reached before reading fully.");
                }
                nread += nbytes;
            }
            CheckData(ArrayFromByteBuffer(actual), readOffset, expected, "Read 3");
            if (legacyShortCircuitFails)
            {
                NUnit.Framework.Assert.IsTrue(clientContext.GetDisableLegacyBlockReaderLocal());
            }
            stm.Close();
        }
예제 #13
0
        /// <exception cref="System.Exception"/>
        private byte[] GetFileContentsUsingDfs(string fileName, int len)
        {
            FSDataInputStream @in = hdfs.Open(new Path(fileName));

            byte[] ret = new byte[len];
            @in.ReadFully(ret);
            try
            {
                @in.ReadByte();
                NUnit.Framework.Assert.Fail("expected end of file");
            }
            catch (EOFException)
            {
            }
            // expected. Unfortunately there is no associated message to check
            @in.Close();
            return(ret);
        }
        /// <summary>
        /// Test that when we have an uncache request, and the client refuses to release
        /// the replica for a long time, we will un-mlock it.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestRevocation()
        {
            Assume.AssumeTrue(NativeCodeLoader.IsNativeCodeLoaded() && !Path.Windows);
            BlockReaderTestUtil.EnableHdfsCachingTracing();
            BlockReaderTestUtil.EnableShortCircuitShmTracing();
            Configuration conf = GetDefaultConf();

            // Set a really short revocation timeout.
            conf.SetLong(DFSConfigKeys.DfsDatanodeCacheRevocationTimeoutMs, 250L);
            // Poll very often
            conf.SetLong(DFSConfigKeys.DfsDatanodeCacheRevocationPollingMs, 2L);
            MiniDFSCluster cluster = null;

            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            cluster.WaitActive();
            DistributedFileSystem dfs = cluster.GetFileSystem();
            // Create and cache a file.
            string TestFile = "/test_file2";

            DFSTestUtil.CreateFile(dfs, new Path(TestFile), BlockSize, (short)1, unchecked ((int
                                                                                             )(0xcafe)));
            dfs.AddCachePool(new CachePoolInfo("pool"));
            long cacheDirectiveId = dfs.AddCacheDirective(new CacheDirectiveInfo.Builder().SetPool
                                                              ("pool").SetPath(new Path(TestFile)).SetReplication((short)1).Build());
            FsDatasetSpi <object> fsd = cluster.GetDataNodes()[0].GetFSDataset();

            DFSTestUtil.VerifyExpectedCacheUsage(BlockSize, 1, fsd);
            // Mmap the file.
            FSDataInputStream @in = dfs.Open(new Path(TestFile));
            ByteBuffer        buf = @in.Read(null, BlockSize, EnumSet.NoneOf <ReadOption>());

            // Attempt to uncache file.  The file should get uncached.
            Log.Info("removing cache directive {}", cacheDirectiveId);
            dfs.RemoveCacheDirective(cacheDirectiveId);
            Log.Info("finished removing cache directive {}", cacheDirectiveId);
            Sharpen.Thread.Sleep(1000);
            DFSTestUtil.VerifyExpectedCacheUsage(0, 0, fsd);
            // Cleanup
            @in.ReleaseBuffer(buf);
            @in.Close();
            cluster.Shutdown();
        }
예제 #15
0
        /// <exception cref="System.IO.IOException"/>
        public static void Check(DistributedFileSystem fs, Path p, int position, int length
                                 )
        {
            byte[] buf = new byte[length];
            int    i   = 0;

            try
            {
                FSDataInputStream @in = fs.Open(p);
                @in.Read(position, buf, 0, buf.Length);
                for (i = position; i < length + position; i++)
                {
                    NUnit.Framework.Assert.AreEqual(unchecked ((byte)i), buf[i - position]);
                }
                @in.Close();
            }
            catch (IOException ioe)
            {
                throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe);
            }
        }
예제 #16
0
        /// <exception cref="System.Exception"/>
        private void VerifyFilesUnreadablebyHDFS(MiniDFSCluster cluster, Path root)
        {
            DistributedFileSystem fs    = cluster.GetFileSystem();
            Queue <Path>          paths = new List <Path>();

            paths.AddItem(root);
            while (!paths.IsEmpty())
            {
                Path       p    = paths.Poll();
                FileStatus stat = fs.GetFileStatus(p);
                if (!stat.IsDirectory())
                {
                    try
                    {
                        Log.Warn("\n\n ##Testing path [" + p + "]\n\n");
                        fs.Open(p);
                        NUnit.Framework.Assert.Fail("Super user should not be able to read [" + UserGroupInformation
                                                    .GetCurrentUser() + "] [" + p.GetName() + "]");
                    }
                    catch (AccessControlException e)
                    {
                        NUnit.Framework.Assert.IsTrue(e.Message.Contains("superuser is not allowed to perform this operation"
                                                                         ));
                    }
                    catch (Exception)
                    {
                        NUnit.Framework.Assert.Fail("Should get an AccessControlException here");
                    }
                }
                if (stat.IsDirectory())
                {
                    FileStatus[] ls = fs.ListStatus(p);
                    foreach (FileStatus f in ls)
                    {
                        paths.AddItem(f.GetPath());
                    }
                }
            }
        }
예제 #17
0
        /// <exception cref="System.Exception"/>
        public virtual void TestSymlinkHdfsDisable()
        {
            Configuration conf = new HdfsConfiguration();

            // disable symlink resolution
            conf.SetBoolean(CommonConfigurationKeys.FsClientResolveRemoteSymlinksKey, false);
            // spin up minicluster, get dfs and filecontext
            MiniDFSCluster        cluster = new MiniDFSCluster.Builder(conf).Build();
            DistributedFileSystem dfs     = cluster.GetFileSystem();
            FileContext           fc      = FileContext.GetFileContext(cluster.GetURI(0), conf);
            // Create test files/links
            FileContextTestHelper helper = new FileContextTestHelper("/tmp/TestSymlinkHdfsDisable"
                                                                     );
            Path root   = helper.GetTestRootPath(fc);
            Path target = new Path(root, "target");
            Path link   = new Path(root, "link");

            DFSTestUtil.CreateFile(dfs, target, 4096, (short)1, unchecked ((int)(0xDEADDEAD)));
            fc.CreateSymlink(target, link, false);
            // Try to resolve links with FileSystem and FileContext
            try
            {
                fc.Open(link);
                NUnit.Framework.Assert.Fail("Expected error when attempting to resolve link");
            }
            catch (IOException e)
            {
                GenericTestUtils.AssertExceptionContains("resolution is disabled", e);
            }
            try
            {
                dfs.Open(link);
                NUnit.Framework.Assert.Fail("Expected error when attempting to resolve link");
            }
            catch (IOException e)
            {
                GenericTestUtils.AssertExceptionContains("resolution is disabled", e);
            }
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestManyClosedSocketsInCache()
        {
            // Make a small file
            Configuration clientConf = new Configuration(conf);

            clientConf.Set(DFSConfigKeys.DfsClientContext, "testManyClosedSocketsInCache");
            DistributedFileSystem fs = (DistributedFileSystem)FileSystem.Get(cluster.GetURI()
                                                                             , clientConf);
            PeerCache peerCache = ClientContext.GetFromConf(clientConf).GetPeerCache();

            DFSTestUtil.CreateFile(fs, TestFile, 1L, (short)1, 0L);
            // Insert a bunch of dead sockets in the cache, by opening
            // many streams concurrently, reading all of the data,
            // and then closing them.
            InputStream[] stms = new InputStream[5];
            try
            {
                for (int i = 0; i < stms.Length; i++)
                {
                    stms[i] = fs.Open(TestFile);
                }
                foreach (InputStream stm in stms)
                {
                    IOUtils.CopyBytes(stm, new IOUtils.NullOutputStream(), 1024);
                }
            }
            finally
            {
                IOUtils.Cleanup(null, stms);
            }
            NUnit.Framework.Assert.AreEqual(5, peerCache.Size());
            // Let all the xceivers timeout
            Sharpen.Thread.Sleep(1500);
            AssertXceiverCount(0);
            // Client side still has the sockets cached
            NUnit.Framework.Assert.AreEqual(5, peerCache.Size());
            // Reading should not throw an exception.
            DFSTestUtil.ReadFile(fs, TestFile);
        }
예제 #19
0
        /// <summary>
        /// Test that we cannot read a file beyond its snapshot length
        /// when accessing it via a snapshot path.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestSnapshotfileLength()
        {
            hdfs.Mkdirs(sub);
            int bytesRead;

            byte[]            buffer     = new byte[Blocksize * 8];
            int               origLen    = Blocksize + 1;
            int               toAppend   = Blocksize;
            FSDataInputStream fis        = null;
            FileStatus        fileStatus = null;
            // Create and write a file.
            Path file1 = new Path(sub, file1Name);

            DFSTestUtil.CreateFile(hdfs, file1, Blocksize, 0, Blocksize, Replication, Seed);
            DFSTestUtil.AppendFile(hdfs, file1, origLen);
            // Create a snapshot on the parent directory.
            hdfs.AllowSnapshot(sub);
            hdfs.CreateSnapshot(sub, snapshot1);
            Path         file1snap1  = SnapshotTestHelper.GetSnapshotPath(sub, snapshot1, file1Name);
            FileChecksum snapChksum1 = hdfs.GetFileChecksum(file1snap1);

            Assert.AssertThat("file and snapshot file checksums are not equal", hdfs.GetFileChecksum
                                  (file1), CoreMatchers.Is(snapChksum1));
            // Append to the file.
            FSDataOutputStream @out = hdfs.Append(file1);

            // Nothing has been appended yet. All checksums should still be equal.
            Assert.AssertThat("file and snapshot checksums (open for append) are not equal",
                              hdfs.GetFileChecksum(file1), CoreMatchers.Is(snapChksum1));
            Assert.AssertThat("snapshot checksum (post-open for append) has changed", hdfs.GetFileChecksum
                                  (file1snap1), CoreMatchers.Is(snapChksum1));
            try
            {
                AppendTestUtil.Write(@out, 0, toAppend);
                // Test reading from snapshot of file that is open for append
                byte[] dataFromSnapshot = DFSTestUtil.ReadFileBuffer(hdfs, file1snap1);
                Assert.AssertThat("Wrong data size in snapshot.", dataFromSnapshot.Length, CoreMatchers.Is
                                      (origLen));
                // Verify that checksum didn't change
                Assert.AssertThat("snapshot file checksum (pre-close) has changed", hdfs.GetFileChecksum
                                      (file1), CoreMatchers.Is(snapChksum1));
                Assert.AssertThat("snapshot checksum (post-append) has changed", hdfs.GetFileChecksum
                                      (file1snap1), CoreMatchers.Is(snapChksum1));
            }
            finally
            {
                @out.Close();
            }
            Assert.AssertThat("file and snapshot file checksums (post-close) are equal", hdfs
                              .GetFileChecksum(file1), CoreMatchers.Not(snapChksum1));
            Assert.AssertThat("snapshot file checksum (post-close) has changed", hdfs.GetFileChecksum
                                  (file1snap1), CoreMatchers.Is(snapChksum1));
            // Make sure we can read the entire file via its non-snapshot path.
            fileStatus = hdfs.GetFileStatus(file1);
            Assert.AssertThat(fileStatus.GetLen(), CoreMatchers.Is((long)origLen + toAppend));
            fis       = hdfs.Open(file1);
            bytesRead = fis.Read(0, buffer, 0, buffer.Length);
            Assert.AssertThat(bytesRead, CoreMatchers.Is(origLen + toAppend));
            fis.Close();
            // Try to open the file via its snapshot path.
            fis        = hdfs.Open(file1snap1);
            fileStatus = hdfs.GetFileStatus(file1snap1);
            Assert.AssertThat(fileStatus.GetLen(), CoreMatchers.Is((long)origLen));
            // Make sure we can only read up to the snapshot length.
            bytesRead = fis.Read(0, buffer, 0, buffer.Length);
            Assert.AssertThat(bytesRead, CoreMatchers.Is(origLen));
            fis.Close();
            byte[] dataFromSnapshot_1 = DFSTestUtil.ReadFileBuffer(hdfs, file1snap1);
            Assert.AssertThat("Wrong data size in snapshot.", dataFromSnapshot_1.Length, CoreMatchers.Is
                                  (origLen));
        }
        public virtual void TestZeroCopyMmapCache()
        {
            HdfsConfiguration conf           = InitZeroCopyTest();
            MiniDFSCluster    cluster        = null;
            Path              TestPath       = new Path("/a");
            int               TestFileLength = 5 * BlockSize;
            int               RandomSeed     = 23453;
            string            Context        = "testZeroCopyMmapCacheContext";
            FSDataInputStream fsIn           = null;

            ByteBuffer[]          results = new ByteBuffer[] { null, null, null, null };
            DistributedFileSystem fs      = null;

            conf.Set(DFSConfigKeys.DfsClientContext, Context);
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            cluster.WaitActive();
            fs = cluster.GetFileSystem();
            DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, RandomSeed);
            try
            {
                DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
            }
            catch (Exception e)
            {
                NUnit.Framework.Assert.Fail("unexpected InterruptedException during " + "waitReplication: "
                                            + e);
            }
            catch (TimeoutException e)
            {
                NUnit.Framework.Assert.Fail("unexpected TimeoutException during " + "waitReplication: "
                                            + e);
            }
            fsIn = fs.Open(TestPath);
            byte[] original = new byte[TestFileLength];
            IOUtils.ReadFully(fsIn, original, 0, TestFileLength);
            fsIn.Close();
            fsIn = fs.Open(TestPath);
            ShortCircuitCache cache = ClientContext.Get(Context, new DFSClient.Conf(conf)).GetShortCircuitCache
                                          ();

            cache.Accept(new TestEnhancedByteBufferAccess.CountingVisitor(0, 5, 5, 0));
            results[0] = fsIn.Read(null, BlockSize, EnumSet.Of(ReadOption.SkipChecksums));
            fsIn.Seek(0);
            results[1] = fsIn.Read(null, BlockSize, EnumSet.Of(ReadOption.SkipChecksums));
            // The mmap should be of the first block of the file.
            ExtendedBlock firstBlock = DFSTestUtil.GetFirstBlock(fs, TestPath);

            cache.Accept(new _CacheVisitor_373(firstBlock));
            // The replica should not yet be evictable, since we have it open.
            // Read more blocks.
            results[2] = fsIn.Read(null, BlockSize, EnumSet.Of(ReadOption.SkipChecksums));
            results[3] = fsIn.Read(null, BlockSize, EnumSet.Of(ReadOption.SkipChecksums));
            // we should have 3 mmaps, 1 evictable
            cache.Accept(new TestEnhancedByteBufferAccess.CountingVisitor(3, 5, 2, 0));
            // After we close the cursors, the mmaps should be evictable for
            // a brief period of time.  Then, they should be closed (we're
            // using a very quick timeout)
            foreach (ByteBuffer buffer in results)
            {
                if (buffer != null)
                {
                    fsIn.ReleaseBuffer(buffer);
                }
            }
            fsIn.Close();
            GenericTestUtils.WaitFor(new _Supplier_407(cache), 10, 60000);
            cache.Accept(new TestEnhancedByteBufferAccess.CountingVisitor(0, -1, -1, -1));
            fs.Close();
            cluster.Shutdown();
        }
        /// <summary>
        /// Test that we can zero-copy read cached data even without disabling
        /// checksums.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestZeroCopyReadOfCachedData()
        {
            BlockReaderTestUtil.EnableShortCircuitShmTracing();
            BlockReaderTestUtil.EnableBlockReaderFactoryTracing();
            BlockReaderTestUtil.EnableHdfsCachingTracing();
            int  TestFileLength    = BlockSize;
            Path TestPath          = new Path("/a");
            int  RandomSeed        = 23453;
            HdfsConfiguration conf = InitZeroCopyTest();

            conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitSkipChecksumKey, false);
            string Context = "testZeroCopyReadOfCachedData";

            conf.Set(DFSConfigKeys.DfsClientContext, Context);
            conf.SetLong(DFSConfigKeys.DfsDatanodeMaxLockedMemoryKey, DFSTestUtil.RoundUpToMultiple
                             (TestFileLength, (int)NativeIO.POSIX.GetCacheManipulator().GetOperatingSystemPageSize
                                 ()));
            MiniDFSCluster cluster = null;
            ByteBuffer     result  = null;
            ByteBuffer     result2 = null;

            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            cluster.WaitActive();
            FsDatasetSpi <object> fsd = cluster.GetDataNodes()[0].GetFSDataset();
            DistributedFileSystem fs  = cluster.GetFileSystem();

            DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, RandomSeed);
            DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
            byte[] original = DFSTestUtil.CalculateFileContentsFromSeed(RandomSeed, TestFileLength
                                                                        );
            // Prior to caching, the file can't be read via zero-copy
            FSDataInputStream fsIn = fs.Open(TestPath);

            try
            {
                result = fsIn.Read(null, TestFileLength / 2, EnumSet.NoneOf <ReadOption>());
                NUnit.Framework.Assert.Fail("expected UnsupportedOperationException");
            }
            catch (NotSupportedException)
            {
            }
            // expected
            // Cache the file
            fs.AddCachePool(new CachePoolInfo("pool1"));
            long directiveId = fs.AddCacheDirective(new CacheDirectiveInfo.Builder().SetPath(
                                                        TestPath).SetReplication((short)1).SetPool("pool1").Build());
            int numBlocks = (int)Math.Ceil((double)TestFileLength / BlockSize);

            DFSTestUtil.VerifyExpectedCacheUsage(DFSTestUtil.RoundUpToMultiple(TestFileLength
                                                                               , BlockSize), numBlocks, cluster.GetDataNodes()[0].GetFSDataset());
            try
            {
                result = fsIn.Read(null, TestFileLength, EnumSet.NoneOf <ReadOption>());
            }
            catch (NotSupportedException)
            {
                NUnit.Framework.Assert.Fail("expected to be able to read cached file via zero-copy"
                                            );
            }
            Assert.AssertArrayEquals(Arrays.CopyOfRange(original, 0, BlockSize), ByteBufferToArray
                                         (result));
            // Test that files opened after the cache operation has finished
            // still get the benefits of zero-copy (regression test for HDFS-6086)
            FSDataInputStream fsIn2 = fs.Open(TestPath);

            try
            {
                result2 = fsIn2.Read(null, TestFileLength, EnumSet.NoneOf <ReadOption>());
            }
            catch (NotSupportedException)
            {
                NUnit.Framework.Assert.Fail("expected to be able to read cached file via zero-copy"
                                            );
            }
            Assert.AssertArrayEquals(Arrays.CopyOfRange(original, 0, BlockSize), ByteBufferToArray
                                         (result2));
            fsIn2.ReleaseBuffer(result2);
            fsIn2.Close();
            // check that the replica is anchored
            ExtendedBlock     firstBlock = DFSTestUtil.GetFirstBlock(fs, TestPath);
            ShortCircuitCache cache      = ClientContext.Get(Context, new DFSClient.Conf(conf)).GetShortCircuitCache
                                               ();

            WaitForReplicaAnchorStatus(cache, firstBlock, true, true, 1);
            // Uncache the replica
            fs.RemoveCacheDirective(directiveId);
            WaitForReplicaAnchorStatus(cache, firstBlock, false, true, 1);
            fsIn.ReleaseBuffer(result);
            WaitForReplicaAnchorStatus(cache, firstBlock, false, false, 1);
            DFSTestUtil.VerifyExpectedCacheUsage(0, 0, fsd);
            fsIn.Close();
            fs.Close();
            cluster.Shutdown();
        }
        public virtual void TestClientMmapDisable()
        {
            HdfsConfiguration conf = InitZeroCopyTest();

            conf.SetBoolean(DFSConfigKeys.DfsClientMmapEnabled, false);
            MiniDFSCluster        cluster        = null;
            Path                  TestPath       = new Path("/a");
            int                   TestFileLength = 16385;
            int                   RandomSeed     = 23453;
            string                Context        = "testClientMmapDisable";
            FSDataInputStream     fsIn           = null;
            DistributedFileSystem fs             = null;

            conf.Set(DFSConfigKeys.DfsClientContext, Context);
            try
            {
                // With DFS_CLIENT_MMAP_ENABLED set to false, we should not do memory
                // mapped reads.
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, RandomSeed);
                DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
                fsIn = fs.Open(TestPath);
                try
                {
                    fsIn.Read(null, 1, EnumSet.Of(ReadOption.SkipChecksums));
                    NUnit.Framework.Assert.Fail("expected zero-copy read to fail when client mmaps "
                                                + "were disabled.");
                }
                catch (NotSupportedException)
                {
                }
            }
            finally
            {
                if (fsIn != null)
                {
                    fsIn.Close();
                }
                if (fs != null)
                {
                    fs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
            fsIn    = null;
            fs      = null;
            cluster = null;
            try
            {
                // Now try again with DFS_CLIENT_MMAP_CACHE_SIZE == 0.  It should work.
                conf.SetBoolean(DFSConfigKeys.DfsClientMmapEnabled, true);
                conf.SetInt(DFSConfigKeys.DfsClientMmapCacheSize, 0);
                conf.Set(DFSConfigKeys.DfsClientContext, Context + ".1");
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, RandomSeed);
                DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
                fsIn = fs.Open(TestPath);
                ByteBuffer buf = fsIn.Read(null, 1, EnumSet.Of(ReadOption.SkipChecksums));
                fsIn.ReleaseBuffer(buf);
                // Test EOF behavior
                IOUtils.SkipFully(fsIn, TestFileLength - 1);
                buf = fsIn.Read(null, 1, EnumSet.Of(ReadOption.SkipChecksums));
                NUnit.Framework.Assert.AreEqual(null, buf);
            }
            finally
            {
                if (fsIn != null)
                {
                    fsIn.Close();
                }
                if (fs != null)
                {
                    fs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        public virtual void Test2GBMmapLimit()
        {
            Assume.AssumeTrue(BlockReaderTestUtil.ShouldTestLargeFiles());
            HdfsConfiguration conf = InitZeroCopyTest();
            long TestFileLength    = 2469605888L;

            conf.Set(DFSConfigKeys.DfsChecksumTypeKey, "NULL");
            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, TestFileLength);
            MiniDFSCluster cluster  = null;
            Path           TestPath = new Path("/a");
            string         Context  = "test2GBMmapLimit";

            conf.Set(DFSConfigKeys.DfsClientContext, Context);
            FSDataInputStream fsIn  = null;
            FSDataInputStream fsIn2 = null;
            ByteBuffer        buf1  = null;
            ByteBuffer        buf2  = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                DistributedFileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, unchecked ((int)(0xB
                                                                                                )));
                DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
                fsIn = fs.Open(TestPath);
                buf1 = fsIn.Read(null, 1, EnumSet.Of(ReadOption.SkipChecksums));
                NUnit.Framework.Assert.AreEqual(1, buf1.Remaining());
                fsIn.ReleaseBuffer(buf1);
                buf1 = null;
                fsIn.Seek(2147483640L);
                buf1 = fsIn.Read(null, 1024, EnumSet.Of(ReadOption.SkipChecksums));
                NUnit.Framework.Assert.AreEqual(7, buf1.Remaining());
                NUnit.Framework.Assert.AreEqual(int.MaxValue, buf1.Limit());
                fsIn.ReleaseBuffer(buf1);
                buf1 = null;
                NUnit.Framework.Assert.AreEqual(2147483647L, fsIn.GetPos());
                try
                {
                    buf1 = fsIn.Read(null, 1024, EnumSet.Of(ReadOption.SkipChecksums));
                    NUnit.Framework.Assert.Fail("expected UnsupportedOperationException");
                }
                catch (NotSupportedException)
                {
                }
                // expected; can't read past 2GB boundary.
                fsIn.Close();
                fsIn = null;
                // Now create another file with normal-sized blocks, and verify we
                // can read past 2GB
                Path TestPath2 = new Path("/b");
                conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 268435456L);
                DFSTestUtil.CreateFile(fs, TestPath2, 1024 * 1024, TestFileLength, 268435456L, (short
                                                                                                )1, unchecked ((int)(0xA)));
                fsIn2 = fs.Open(TestPath2);
                fsIn2.Seek(2147483640L);
                buf2 = fsIn2.Read(null, 1024, EnumSet.Of(ReadOption.SkipChecksums));
                NUnit.Framework.Assert.AreEqual(8, buf2.Remaining());
                NUnit.Framework.Assert.AreEqual(2147483648L, fsIn2.GetPos());
                fsIn2.ReleaseBuffer(buf2);
                buf2 = null;
                buf2 = fsIn2.Read(null, 1024, EnumSet.Of(ReadOption.SkipChecksums));
                NUnit.Framework.Assert.AreEqual(1024, buf2.Remaining());
                NUnit.Framework.Assert.AreEqual(2147484672L, fsIn2.GetPos());
                fsIn2.ReleaseBuffer(buf2);
                buf2 = null;
            }
            finally
            {
                if (buf1 != null)
                {
                    fsIn.ReleaseBuffer(buf1);
                }
                if (buf2 != null)
                {
                    fsIn2.ReleaseBuffer(buf2);
                }
                IOUtils.Cleanup(null, fsIn, fsIn2);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #24
0
        /// <exception cref="System.Exception"/>
        public virtual void HardLeaseRecoveryRestartHelper(bool doRename, int size)
        {
            if (size < 0)
            {
                size = AppendTestUtil.NextInt(FileSize + 1);
            }
            //create a file
            string fileStr = "/hardLeaseRecovery";

            AppendTestUtil.Log.Info("filestr=" + fileStr);
            Path filePath          = new Path(fileStr);
            FSDataOutputStream stm = dfs.Create(filePath, true, BufSize, ReplicationNum, BlockSize
                                                );

            NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(fileStr));
            // write bytes into the file.
            AppendTestUtil.Log.Info("size=" + size);
            stm.Write(buffer, 0, size);
            string originalLeaseHolder = NameNodeAdapter.GetLeaseHolderForPath(cluster.GetNameNode
                                                                                   (), fileStr);

            NUnit.Framework.Assert.IsFalse("original lease holder should not be the NN", originalLeaseHolder
                                           .Equals(HdfsServerConstants.NamenodeLeaseHolder));
            // hflush file
            AppendTestUtil.Log.Info("hflush");
            stm.Hflush();
            // check visible length
            HdfsDataInputStream @in = (HdfsDataInputStream)dfs.Open(filePath);

            NUnit.Framework.Assert.AreEqual(size, @in.GetVisibleLength());
            @in.Close();
            if (doRename)
            {
                fileStr += ".renamed";
                Path renamedPath = new Path(fileStr);
                NUnit.Framework.Assert.IsTrue(dfs.Rename(filePath, renamedPath));
                filePath = renamedPath;
            }
            // kill the lease renewal thread
            AppendTestUtil.Log.Info("leasechecker.interruptAndJoin()");
            dfs.dfs.GetLeaseRenewer().InterruptAndJoin();
            // Make sure the DNs don't send a heartbeat for a while, so the blocks
            // won't actually get completed during lease recovery.
            foreach (DataNode dn in cluster.GetDataNodes())
            {
                DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, true);
            }
            // set the hard limit to be 1 second
            cluster.SetLeasePeriod(LongLeasePeriod, ShortLeasePeriod);
            // Make sure lease recovery begins.
            Sharpen.Thread.Sleep(HdfsServerConstants.NamenodeLeaseRecheckInterval * 2);
            CheckLease(fileStr, size);
            cluster.RestartNameNode(false);
            CheckLease(fileStr, size);
            // Let the DNs send heartbeats again.
            foreach (DataNode dn_1 in cluster.GetDataNodes())
            {
                DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn_1, false);
            }
            cluster.WaitActive();
            // set the hard limit to be 1 second, to initiate lease recovery.
            cluster.SetLeasePeriod(LongLeasePeriod, ShortLeasePeriod);
            // wait for lease recovery to complete
            LocatedBlocks locatedBlocks;

            do
            {
                Sharpen.Thread.Sleep(ShortLeasePeriod);
                locatedBlocks = dfs.dfs.GetLocatedBlocks(fileStr, 0L, size);
            }while (locatedBlocks.IsUnderConstruction());
            NUnit.Framework.Assert.AreEqual(size, locatedBlocks.GetFileLength());
            // make sure that the client can't write data anymore.
            try
            {
                stm.Write('b');
                stm.Hflush();
                NUnit.Framework.Assert.Fail("Should not be able to flush after we've lost the lease"
                                            );
            }
            catch (IOException e)
            {
                Log.Info("Expceted exception on write/hflush", e);
            }
            try
            {
                stm.Close();
                NUnit.Framework.Assert.Fail("Should not be able to close after we've lost the lease"
                                            );
            }
            catch (IOException e)
            {
                Log.Info("Expected exception on close", e);
            }
            // verify data
            AppendTestUtil.Log.Info("File size is good. Now validating sizes from datanodes..."
                                    );
            AppendTestUtil.CheckFullFile(dfs, filePath, size, buffer, fileStr);
        }
예제 #25
0
        public virtual void TestReplaceDatanodeOnFailure()
        {
            Configuration conf = new HdfsConfiguration();

            //always replace a datanode
            ReplaceDatanodeOnFailure.Write(ReplaceDatanodeOnFailure.Policy.Always, true, conf
                                           );
            string[] racks = new string[Replication];
            Arrays.Fill(racks, Rack0);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Racks(racks).NumDataNodes
                                         (Replication).Build();

            try
            {
                DistributedFileSystem fs = cluster.GetFileSystem();
                Path dir = new Path(Dir);
                TestReplaceDatanodeOnFailure.SlowWriter[] slowwriters = new TestReplaceDatanodeOnFailure.SlowWriter
                                                                        [10];
                for (int i = 1; i <= slowwriters.Length; i++)
                {
                    //create slow writers in different speed
                    slowwriters[i - 1] = new TestReplaceDatanodeOnFailure.SlowWriter(fs, new Path(dir
                                                                                                  , "file" + i), i * 200L);
                }
                foreach (TestReplaceDatanodeOnFailure.SlowWriter s in slowwriters)
                {
                    s.Start();
                }
                // Let slow writers write something.
                // Some of them are too slow and will be not yet started.
                SleepSeconds(1);
                //start new datanodes
                cluster.StartDataNodes(conf, 2, true, null, new string[] { Rack1, Rack1 });
                //stop an old datanode
                cluster.StopDataNode(AppendTestUtil.NextInt(Replication));
                //Let the slow writer writes a few more seconds
                //Everyone should have written something.
                SleepSeconds(5);
                //check replication and interrupt.
                foreach (TestReplaceDatanodeOnFailure.SlowWriter s_1 in slowwriters)
                {
                    s_1.CheckReplication();
                    s_1.InterruptRunning();
                }
                //close files
                foreach (TestReplaceDatanodeOnFailure.SlowWriter s_2 in slowwriters)
                {
                    s_2.JoinAndClose();
                }
                //Verify the file
                Log.Info("Verify the file");
                for (int i_1 = 0; i_1 < slowwriters.Length; i_1++)
                {
                    Log.Info(slowwriters[i_1].filepath + ": length=" + fs.GetFileStatus(slowwriters[i_1
                                                                                        ].filepath).GetLen());
                    FSDataInputStream @in = null;
                    try
                    {
                        @in = fs.Open(slowwriters[i_1].filepath);
                        for (int j = 0; (x = @in.Read()) != -1; j++)
                        {
                            NUnit.Framework.Assert.AreEqual(j, x);
                        }
                    }
                    finally
                    {
                        IOUtils.CloseStream(@in);
                    }
                }
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
예제 #26
0
        public virtual void TestConcat()
        {
            int               numFiles = 10;
            long              fileLen  = blockSize * 3;
            HdfsFileStatus    fStatus;
            FSDataInputStream stm;
            string            trg     = "/trg";
            Path              trgPath = new Path(trg);

            DFSTestUtil.CreateFile(dfs, trgPath, fileLen, ReplFactor, 1);
            fStatus = nn.GetFileInfo(trg);
            long trgLen    = fStatus.GetLen();
            long trgBlocks = nn.GetBlockLocations(trg, 0, trgLen).LocatedBlockCount();

            Path[]   files = new Path[numFiles];
            byte[][] bytes = new byte[][] { new byte[(int)fileLen], new byte[(int)fileLen], new
                                            byte[(int)fileLen], new byte[(int)fileLen], new byte[(int)fileLen], new byte[(int
                                                                                                                          )fileLen], new byte[(int)fileLen], new byte[(int)fileLen], new byte[(int)fileLen
                                            ], new byte[(int)fileLen], new byte[(int)fileLen] };
            LocatedBlocks[] lblocks = new LocatedBlocks[numFiles];
            long[]          lens    = new long[numFiles];
            stm = dfs.Open(trgPath);
            stm.ReadFully(0, bytes[0]);
            stm.Close();
            int i;

            for (i = 0; i < files.Length; i++)
            {
                files[i] = new Path("/file" + i);
                Path path = files[i];
                System.Console.Out.WriteLine("Creating file " + path);
                // make files with different content
                DFSTestUtil.CreateFile(dfs, path, fileLen, ReplFactor, i);
                fStatus = nn.GetFileInfo(path.ToUri().GetPath());
                lens[i] = fStatus.GetLen();
                NUnit.Framework.Assert.AreEqual(trgLen, lens[i]);
                // file of the same length.
                lblocks[i] = nn.GetBlockLocations(path.ToUri().GetPath(), 0, lens[i]);
                //read the file
                stm = dfs.Open(path);
                stm.ReadFully(0, bytes[i + 1]);
                //bytes[i][10] = 10;
                stm.Close();
            }
            // check permissions -try the operation with the "wrong" user
            UserGroupInformation user1 = UserGroupInformation.CreateUserForTesting("theDoctor"
                                                                                   , new string[] { "tardis" });
            DistributedFileSystem hdfs = (DistributedFileSystem)DFSTestUtil.GetFileSystemAs(user1
                                                                                            , conf);

            try
            {
                hdfs.Concat(trgPath, files);
                NUnit.Framework.Assert.Fail("Permission exception expected");
            }
            catch (IOException ie)
            {
                System.Console.Out.WriteLine("Got expected exception for permissions:" + ie.GetLocalizedMessage
                                                 ());
            }
            // expected
            // check count update
            ContentSummary cBefore = dfs.GetContentSummary(trgPath.GetParent());

            // resort file array, make INode id not sorted.
            for (int j = 0; j < files.Length / 2; j++)
            {
                Path tempPath = files[j];
                files[j] = files[files.Length - 1 - j];
                files[files.Length - 1 - j] = tempPath;
                byte[] tempBytes = bytes[1 + j];
                bytes[1 + j] = bytes[files.Length - 1 - j + 1];
                bytes[files.Length - 1 - j + 1] = tempBytes;
            }
            // now concatenate
            dfs.Concat(trgPath, files);
            // verify  count
            ContentSummary cAfter = dfs.GetContentSummary(trgPath.GetParent());

            NUnit.Framework.Assert.AreEqual(cBefore.GetFileCount(), cAfter.GetFileCount() + files
                                            .Length);
            // verify other stuff
            long totalLen    = trgLen;
            long totalBlocks = trgBlocks;

            for (i = 0; i < files.Length; i++)
            {
                totalLen    += lens[i];
                totalBlocks += lblocks[i].LocatedBlockCount();
            }
            System.Console.Out.WriteLine("total len=" + totalLen + "; totalBlocks=" + totalBlocks
                                         );
            fStatus = nn.GetFileInfo(trg);
            trgLen  = fStatus.GetLen();
            // new length
            // read the resulting file
            stm = dfs.Open(trgPath);
            byte[] byteFileConcat = new byte[(int)trgLen];
            stm.ReadFully(0, byteFileConcat);
            stm.Close();
            trgBlocks = nn.GetBlockLocations(trg, 0, trgLen).LocatedBlockCount();
            //verifications
            // 1. number of blocks
            NUnit.Framework.Assert.AreEqual(trgBlocks, totalBlocks);
            // 2. file lengths
            NUnit.Framework.Assert.AreEqual(trgLen, totalLen);
            // 3. removal of the src file
            foreach (Path p in files)
            {
                fStatus = nn.GetFileInfo(p.ToUri().GetPath());
                NUnit.Framework.Assert.IsNull("File " + p + " still exists", fStatus);
                // file shouldn't exist
                // try to create fie with the same name
                DFSTestUtil.CreateFile(dfs, p, fileLen, ReplFactor, 1);
            }
            // 4. content
            CheckFileContent(byteFileConcat, bytes);
            // add a small file (less then a block)
            Path smallFile = new Path("/sfile");
            int  sFileLen  = 10;

            DFSTestUtil.CreateFile(dfs, smallFile, sFileLen, ReplFactor, 1);
            dfs.Concat(trgPath, new Path[] { smallFile });
            fStatus = nn.GetFileInfo(trg);
            trgLen  = fStatus.GetLen();
            // new length
            // check number of blocks
            trgBlocks = nn.GetBlockLocations(trg, 0, trgLen).LocatedBlockCount();
            NUnit.Framework.Assert.AreEqual(trgBlocks, totalBlocks + 1);
            // and length
            NUnit.Framework.Assert.AreEqual(trgLen, totalLen + sFileLen);
        }
예제 #27
0
        /// <summary>
        /// The method starts new cluster with defined Configuration; creates a file
        /// with specified block_size and writes 10 equal sections in it; it also calls
        /// hflush/hsync after each write and throws an IOException in case of an error.
        /// </summary>
        /// <param name="conf">cluster configuration</param>
        /// <param name="fileName">of the file to be created and processed as required</param>
        /// <param name="block_size">value to be used for the file's creation</param>
        /// <param name="replicas">is the number of replicas</param>
        /// <param name="isSync">hsync or hflush</param>
        /// <param name="syncFlags">specify the semantic of the sync/flush</param>
        /// <exception cref="System.IO.IOException">in case of any errors</exception>
        public static void DoTheJob(Configuration conf, string fileName, long block_size,
                                    short replicas, bool isSync, EnumSet <HdfsDataOutputStream.SyncFlag> syncFlags)
        {
            byte[] fileContent;
            int    Sections = 10;

            fileContent = AppendTestUtil.InitBuffer(AppendTestUtil.FileSize);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(replicas).
                                     Build();
            // Make sure we work with DFS in order to utilize all its functionality
            DistributedFileSystem fileSystem = cluster.GetFileSystem();
            FSDataInputStream     @is;

            try
            {
                Path   path     = new Path(fileName);
                string pathName = new Path(fileSystem.GetWorkingDirectory(), path).ToUri().GetPath
                                      ();
                FSDataOutputStream stm = fileSystem.Create(path, false, 4096, replicas, block_size
                                                           );
                System.Console.Out.WriteLine("Created file " + fileName);
                int tenth    = AppendTestUtil.FileSize / Sections;
                int rounding = AppendTestUtil.FileSize - tenth * Sections;
                for (int i = 0; i < Sections; i++)
                {
                    System.Console.Out.WriteLine("Writing " + (tenth * i) + " to " + (tenth * (i + 1)
                                                                                      ) + " section to file " + fileName);
                    // write to the file
                    stm.Write(fileContent, tenth * i, tenth);
                    // Wait while hflush/hsync pushes all packets through built pipeline
                    if (isSync)
                    {
                        ((DFSOutputStream)stm.GetWrappedStream()).Hsync(syncFlags);
                    }
                    else
                    {
                        ((DFSOutputStream)stm.GetWrappedStream()).Hflush();
                    }
                    // Check file length if updatelength is required
                    if (isSync && syncFlags.Contains(HdfsDataOutputStream.SyncFlag.UpdateLength))
                    {
                        long currentFileLength = fileSystem.GetFileStatus(path).GetLen();
                        NUnit.Framework.Assert.AreEqual("File size doesn't match for hsync/hflush with updating the length"
                                                        , tenth * (i + 1), currentFileLength);
                    }
                    else
                    {
                        if (isSync && syncFlags.Contains(HdfsDataOutputStream.SyncFlag.EndBlock))
                        {
                            LocatedBlocks blocks = fileSystem.dfs.GetLocatedBlocks(pathName, 0);
                            NUnit.Framework.Assert.AreEqual(i + 1, blocks.GetLocatedBlocks().Count);
                        }
                    }
                    byte[] toRead   = new byte[tenth];
                    byte[] expected = new byte[tenth];
                    System.Array.Copy(fileContent, tenth * i, expected, 0, tenth);
                    // Open the same file for read. Need to create new reader after every write operation(!)
                    @is = fileSystem.Open(path);
                    @is.Seek(tenth * i);
                    int readBytes = @is.Read(toRead, 0, tenth);
                    System.Console.Out.WriteLine("Has read " + readBytes);
                    NUnit.Framework.Assert.IsTrue("Should've get more bytes", (readBytes > 0) && (readBytes
                                                                                                  <= tenth));
                    @is.Close();
                    CheckData(toRead, 0, readBytes, expected, "Partial verification");
                }
                System.Console.Out.WriteLine("Writing " + (tenth * Sections) + " to " + (tenth *
                                                                                         Sections + rounding) + " section to file " + fileName);
                stm.Write(fileContent, tenth * Sections, rounding);
                stm.Close();
                NUnit.Framework.Assert.AreEqual("File size doesn't match ", AppendTestUtil.FileSize
                                                , fileSystem.GetFileStatus(path).GetLen());
                AppendTestUtil.CheckFullFile(fileSystem, path, fileContent.Length, fileContent, "hflush()"
                                             );
            }
            finally
            {
                fileSystem.Close();
                cluster.Shutdown();
            }
        }
예제 #28
0
        public virtual void TestMissingBlocksAlert()
        {
            MiniDFSCluster cluster = null;

            try
            {
                Configuration conf = new HdfsConfiguration();
                //minimize test delay
                conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, 0);
                conf.SetInt(DFSConfigKeys.DfsClientRetryWindowBase, 10);
                int fileLen = 10 * 1024;
                conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, fileLen / 2);
                //start a cluster with single datanode
                cluster = new MiniDFSCluster.Builder(conf).Build();
                cluster.WaitActive();
                BlockManager          bm  = cluster.GetNamesystem().GetBlockManager();
                DistributedFileSystem dfs = cluster.GetFileSystem();
                // create a normal file
                DFSTestUtil.CreateFile(dfs, new Path("/testMissingBlocksAlert/file1"), fileLen, (
                                           short)3, 0);
                Path corruptFile = new Path("/testMissingBlocks/corruptFile");
                DFSTestUtil.CreateFile(dfs, corruptFile, fileLen, (short)3, 0);
                // Corrupt the block
                ExtendedBlock block = DFSTestUtil.GetFirstBlock(dfs, corruptFile);
                NUnit.Framework.Assert.IsTrue(cluster.CorruptReplica(0, block));
                // read the file so that the corrupt block is reported to NN
                FSDataInputStream @in = dfs.Open(corruptFile);
                try
                {
                    @in.ReadFully(new byte[fileLen]);
                }
                catch (ChecksumException)
                {
                }
                // checksum error is expected.
                @in.Close();
                Log.Info("Waiting for missing blocks count to increase...");
                while (dfs.GetMissingBlocksCount() <= 0)
                {
                    Sharpen.Thread.Sleep(100);
                }
                NUnit.Framework.Assert.IsTrue(dfs.GetMissingBlocksCount() == 1);
                NUnit.Framework.Assert.AreEqual(4, dfs.GetUnderReplicatedBlocksCount());
                NUnit.Framework.Assert.AreEqual(3, bm.GetUnderReplicatedNotMissingBlocks());
                MBeanServer mbs        = ManagementFactory.GetPlatformMBeanServer();
                ObjectName  mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo"
                                                        );
                NUnit.Framework.Assert.AreEqual(1, (long)(long)mbs.GetAttribute(mxbeanName, "NumberOfMissingBlocks"
                                                                                ));
                // now do the reverse : remove the file expect the number of missing
                // blocks to go to zero
                dfs.Delete(corruptFile, true);
                Log.Info("Waiting for missing blocks count to be zero...");
                while (dfs.GetMissingBlocksCount() > 0)
                {
                    Sharpen.Thread.Sleep(100);
                }
                NUnit.Framework.Assert.AreEqual(2, dfs.GetUnderReplicatedBlocksCount());
                NUnit.Framework.Assert.AreEqual(2, bm.GetUnderReplicatedNotMissingBlocks());
                NUnit.Framework.Assert.AreEqual(0, (long)(long)mbs.GetAttribute(mxbeanName, "NumberOfMissingBlocks"
                                                                                ));
                Path replOneFile = new Path("/testMissingBlocks/replOneFile");
                DFSTestUtil.CreateFile(dfs, replOneFile, fileLen, (short)1, 0);
                ExtendedBlock replOneBlock = DFSTestUtil.GetFirstBlock(dfs, replOneFile);
                NUnit.Framework.Assert.IsTrue(cluster.CorruptReplica(0, replOneBlock));
                // read the file so that the corrupt block is reported to NN
                @in = dfs.Open(replOneFile);
                try
                {
                    @in.ReadFully(new byte[fileLen]);
                }
                catch (ChecksumException)
                {
                }
                // checksum error is expected.
                @in.Close();
                NUnit.Framework.Assert.AreEqual(1, dfs.GetMissingReplOneBlocksCount());
                NUnit.Framework.Assert.AreEqual(1, (long)(long)mbs.GetAttribute(mxbeanName, "NumberOfMissingBlocksWithReplicationFactorOne"
                                                                                ));
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }