Exemple #1
0
        public virtual void TestRamDiskShortCircuitRead()
        {
            StartUpCluster(ReplFactor, new StorageType[] { StorageType.RamDisk, StorageType.Default }, 2 * BlockSize - 1, true);
            // 1 replica + delta, SCR read
            string MethodName = GenericTestUtils.GetMethodName();
            int    Seed       = unchecked ((int)(0xFADED));
            Path   path       = new Path("/" + MethodName + ".dat");

            MakeRandomTestFile(path, BlockSize, true, Seed);
            EnsureFileReplicasOnStorageType(path, StorageType.RamDisk);
            // Sleep for a short time to allow the lazy writer thread to do its job
            Sharpen.Thread.Sleep(3 * LazyWriterIntervalSec * 1000);
            //assertThat(verifyReadRandomFile(path, BLOCK_SIZE, SEED), is(true));
            FSDataInputStream fis = fs.Open(path);

            // Verify SCR read counters
            try
            {
                fis = fs.Open(path);
                byte[] buf = new byte[BufferLength];
                fis.Read(0, buf, 0, BufferLength);
                HdfsDataInputStream dfsis = (HdfsDataInputStream)fis;
                NUnit.Framework.Assert.AreEqual(BufferLength, dfsis.GetReadStatistics().GetTotalBytesRead
                                                    ());
                NUnit.Framework.Assert.AreEqual(BufferLength, dfsis.GetReadStatistics().GetTotalShortCircuitBytesRead
                                                    ());
            }
            finally
            {
                fis.Close();
                fis = null;
            }
        }
Exemple #2
0
        /// <summary>
        /// Tests the fileLength when we sync the file and restart the cluster and
        /// Datanodes not report to Namenode yet.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestFileLengthWithHSyncAndClusterRestartWithOutDNsRegister()
        {
            Configuration conf = new HdfsConfiguration();

            // create cluster
            conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, 512);
            MiniDFSCluster      cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            HdfsDataInputStream @in     = null;

            try
            {
                Path path = new Path("/tmp/TestFileLengthOnClusterRestart", "test");
                DistributedFileSystem dfs  = cluster.GetFileSystem();
                FSDataOutputStream    @out = dfs.Create(path);
                int fileLength             = 1030;
                @out.Write(new byte[fileLength]);
                @out.Hsync();
                cluster.RestartNameNode();
                cluster.WaitActive();
                @in = (HdfsDataInputStream)dfs.Open(path, 1024);
                // Verify the length when we just restart NN. DNs will register
                // immediately.
                NUnit.Framework.Assert.AreEqual(fileLength, @in.GetVisibleLength());
                cluster.ShutdownDataNodes();
                cluster.RestartNameNode(false);
                // This is just for ensuring NN started.
                VerifyNNIsInSafeMode(dfs);
                try
                {
                    @in = (HdfsDataInputStream)dfs.Open(path);
                    NUnit.Framework.Assert.Fail("Expected IOException");
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue(e.GetLocalizedMessage().IndexOf("Name node is in safe mode"
                                                                                  ) >= 0);
                }
            }
            finally
            {
                if (null != @in)
                {
                    @in.Close();
                }
                cluster.Shutdown();
            }
        }
Exemple #3
0
        /// <exception cref="System.IO.IOException"/>
        private bool CheckUnsupportedMethod(FileSystem fs, Path file, byte[] expected, int
                                            readOffset)
        {
            HdfsDataInputStream stm    = (HdfsDataInputStream)fs.Open(file);
            ByteBuffer          actual = ByteBuffer.AllocateDirect(expected.Length - readOffset);

            IOUtils.SkipFully(stm, readOffset);
            try
            {
                stm.Read(actual);
            }
            catch (NotSupportedException)
            {
                return(true);
            }
            return(false);
        }
Exemple #4
0
        /// <summary>
        /// Check the file content, reading as user
        /// <paramref name="readingUser"/>
        ///
        /// </summary>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        internal static void CheckFileContentDirect(URI uri, Path name, byte[] expected,
                                                    int readOffset, string readingUser, Configuration conf, bool legacyShortCircuitFails
                                                    )
        {
            // Ensure short circuit is enabled
            DistributedFileSystem fs            = GetFileSystem(readingUser, uri, conf);
            ClientContext         clientContext = ClientContext.GetFromConf(conf);

            if (legacyShortCircuitFails)
            {
                NUnit.Framework.Assert.IsTrue(clientContext.GetDisableLegacyBlockReaderLocal());
            }
            HdfsDataInputStream stm    = (HdfsDataInputStream)fs.Open(name);
            ByteBuffer          actual = ByteBuffer.AllocateDirect(expected.Length - readOffset);

            IOUtils.SkipFully(stm, readOffset);
            actual.Limit(3);
            //Read a small number of bytes first.
            int nread = stm.Read(actual);

            actual.Limit(nread + 2);
            nread += stm.Read(actual);
            // Read across chunk boundary
            actual.Limit(Math.Min(actual.Capacity(), nread + 517));
            nread += stm.Read(actual);
            CheckData(ArrayFromByteBuffer(actual), readOffset, expected, nread, "A few bytes"
                      );
            //Now read rest of it
            actual.Limit(actual.Capacity());
            while (actual.HasRemaining())
            {
                int nbytes = stm.Read(actual);
                if (nbytes < 0)
                {
                    throw new EOFException("End of file reached before reading fully.");
                }
                nread += nbytes;
            }
            CheckData(ArrayFromByteBuffer(actual), readOffset, expected, "Read 3");
            if (legacyShortCircuitFails)
            {
                NUnit.Framework.Assert.IsTrue(clientContext.GetDisableLegacyBlockReaderLocal());
            }
            stm.Close();
        }
        //check the file
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        internal static void CheckFile(Path p, int expectedsize, Configuration conf)
        {
            //open the file with another user account
            string username = UserGroupInformation.GetCurrentUser().GetShortUserName() + "_"
                              + ++userCount;
            UserGroupInformation ugi = UserGroupInformation.CreateUserForTesting(username, new
                                                                                 string[] { "supergroup" });
            FileSystem          fs  = DFSTestUtil.GetFileSystemAs(ugi, conf);
            HdfsDataInputStream @in = (HdfsDataInputStream)fs.Open(p);

            //Check visible length
            NUnit.Framework.Assert.IsTrue(@in.GetVisibleLength() >= expectedsize);
            //Able to read?
            for (int i = 0; i < expectedsize; i++)
            {
                NUnit.Framework.Assert.AreEqual(unchecked ((byte)i), unchecked ((byte)@in.Read()));
            }
            @in.Close();
        }
Exemple #6
0
        /// <exception cref="System.IO.IOException"/>
        private void OnOpen(ChannelHandlerContext ctx)
        {
            string nnId                  = @params.NamenodeId();
            int    bufferSize            = @params.BufferSize();
            long   offset                = @params.Offset();
            long   length                = @params.Length();
            DefaultHttpResponse response = new DefaultHttpResponse(HttpVersion.Http11, HttpResponseStatus
                                                                   .Ok);
            HttpHeaders headers = response.Headers();

            // Allow the UI to access the file
            headers.Set(HttpHeaders.Names.AccessControlAllowMethods, HttpMethod.Get);
            headers.Set(HttpHeaders.Names.AccessControlAllowOrigin, "*");
            headers.Set(HttpHeaders.Names.ContentType, ApplicationOctetStream);
            headers.Set(HttpHeaders.Names.Connection, HttpHeaders.Values.Close);
            DFSClient           dfsclient = NewDfsClient(nnId, conf);
            HdfsDataInputStream @in       = dfsclient.CreateWrappedInputStream(dfsclient.Open(path,
                                                                                              bufferSize, true));

            @in.Seek(offset);
            long contentLength = @in.GetVisibleLength() - offset;

            if (length >= 0)
            {
                contentLength = Math.Min(contentLength, length);
            }
            InputStream data;

            if (contentLength >= 0)
            {
                headers.Set(HttpHeaders.Names.ContentLength, contentLength);
                data = new LimitInputStream(@in, contentLength);
            }
            else
            {
                data = @in;
            }
            ctx.Write(response);
            ctx.WriteAndFlush(new _ChunkedStream_221(dfsclient, data)).AddListener(ChannelFutureListener
                                                                                   .Close);
        }
Exemple #7
0
        public virtual void TestRamDiskEvictionWithShortCircuitReadHandle()
        {
            StartUpCluster(ReplFactor, new StorageType[] { StorageType.RamDisk, StorageType.Default }, (6 * BlockSize - 1), true);
            // 5 replica + delta, SCR.
            string MethodName = GenericTestUtils.GetMethodName();
            Path   path1      = new Path("/" + MethodName + ".01.dat");
            Path   path2      = new Path("/" + MethodName + ".02.dat");
            int    Seed       = unchecked ((int)(0xFADED));

            MakeRandomTestFile(path1, BlockSize, true, Seed);
            EnsureFileReplicasOnStorageType(path1, StorageType.RamDisk);
            // Sleep for a short time to allow the lazy writer thread to do its job.
            // However the block replica should not be evicted from RAM_DISK yet.
            Sharpen.Thread.Sleep(3 * LazyWriterIntervalSec * 1000);
            // No eviction should happen as the free ratio is below the threshold
            FSDataInputStream fis = fs.Open(path1);

            try
            {
                // Keep and open read handle to path1 while creating path2
                byte[] buf = new byte[BufferLength];
                fis.Read(0, buf, 0, BufferLength);
                // Create the 2nd file that will trigger RAM_DISK eviction.
                MakeTestFile(path2, BlockSize * 2, true);
                EnsureFileReplicasOnStorageType(path2, StorageType.RamDisk);
                // Ensure path1 is still readable from the open SCR handle.
                fis.Read(fis.GetPos(), buf, 0, BufferLength);
                HdfsDataInputStream dfsis = (HdfsDataInputStream)fis;
                NUnit.Framework.Assert.AreEqual(2 * BufferLength, dfsis.GetReadStatistics().GetTotalBytesRead
                                                    ());
                NUnit.Framework.Assert.AreEqual(2 * BufferLength, dfsis.GetReadStatistics().GetTotalShortCircuitBytesRead
                                                    ());
            }
            finally
            {
                IOUtils.CloseQuietly(fis);
            }
            // After the open handle is closed, path1 should be evicted to DISK.
            TriggerBlockReport();
            EnsureFileReplicasOnStorageType(path1, StorageType.Default);
        }
        /// <exception cref="System.Exception"/>
        private void TestStatistics(bool isShortCircuit)
        {
            Assume.AssumeTrue(DomainSocket.GetLoadingFailureReason() == null);
            HdfsConfiguration        conf    = new HdfsConfiguration();
            TemporarySocketDirectory sockDir = null;

            if (isShortCircuit)
            {
                DFSInputStream.tcpReadsDisabledForTesting = true;
                sockDir = new TemporarySocketDirectory();
                conf.Set(DFSConfigKeys.DfsDomainSocketPathKey, new FilePath(sockDir.GetDir(), "TestStatisticsForLocalRead.%d.sock"
                                                                            ).GetAbsolutePath());
                conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, true);
                DomainSocket.DisableBindPathValidation();
            }
            else
            {
                conf.SetBoolean(DFSConfigKeys.DfsClientReadShortcircuitKey, false);
            }
            MiniDFSCluster    cluster    = null;
            Path              TestPath   = new Path("/a");
            long              RandomSeed = 4567L;
            FSDataInputStream fsIn       = null;

            byte[]     original = new byte[TestBlockReaderLocal.BlockReaderLocalTest.TestLength];
            FileSystem fs       = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, TestPath, TestBlockReaderLocal.BlockReaderLocalTest.TestLength
                                       , (short)1, RandomSeed);
                try
                {
                    DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
                }
                catch (Exception e)
                {
                    NUnit.Framework.Assert.Fail("unexpected InterruptedException during " + "waitReplication: "
                                                + e);
                }
                catch (TimeoutException e)
                {
                    NUnit.Framework.Assert.Fail("unexpected TimeoutException during " + "waitReplication: "
                                                + e);
                }
                fsIn = fs.Open(TestPath);
                IOUtils.ReadFully(fsIn, original, 0, TestBlockReaderLocal.BlockReaderLocalTest.TestLength
                                  );
                HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
                NUnit.Framework.Assert.AreEqual(TestBlockReaderLocal.BlockReaderLocalTest.TestLength
                                                , dfsIn.GetReadStatistics().GetTotalBytesRead());
                NUnit.Framework.Assert.AreEqual(TestBlockReaderLocal.BlockReaderLocalTest.TestLength
                                                , dfsIn.GetReadStatistics().GetTotalLocalBytesRead());
                if (isShortCircuit)
                {
                    NUnit.Framework.Assert.AreEqual(TestBlockReaderLocal.BlockReaderLocalTest.TestLength
                                                    , dfsIn.GetReadStatistics().GetTotalShortCircuitBytesRead());
                }
                else
                {
                    NUnit.Framework.Assert.AreEqual(0, dfsIn.GetReadStatistics().GetTotalShortCircuitBytesRead
                                                        ());
                }
                fsIn.Close();
                fsIn = null;
            }
            finally
            {
                DFSInputStream.tcpReadsDisabledForTesting = false;
                if (fsIn != null)
                {
                    fsIn.Close();
                }
                if (fs != null)
                {
                    fs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
                if (sockDir != null)
                {
                    sockDir.Close();
                }
            }
        }
Exemple #9
0
        /// <exception cref="Javax.Servlet.ServletException"/>
        /// <exception cref="System.IO.IOException"/>
        protected override void DoGet(HttpServletRequest request, HttpServletResponse response
                                      )
        {
            string path        = ServletUtil.GetDecodedPath(request, "/streamFile");
            string rawPath     = ServletUtil.GetRawPath(request, "/streamFile");
            string filename    = JspHelper.ValidatePath(path);
            string rawFilename = JspHelper.ValidatePath(rawPath);

            if (filename == null)
            {
                response.SetContentType("text/plain");
                PrintWriter @out = response.GetWriter();
                @out.Write("Invalid input");
                return;
            }
            Enumeration <string> reqRanges = request.GetHeaders("Range");

            if (reqRanges != null && !reqRanges.MoveNext())
            {
                reqRanges = null;
            }
            DFSClient dfs;

            try
            {
                dfs = GetDFSClient(request);
            }
            catch (Exception e)
            {
                response.SendError(400, e.Message);
                return;
            }
            HdfsDataInputStream @in   = null;
            OutputStream        out_1 = null;

            try
            {
                @in   = dfs.CreateWrappedInputStream(dfs.Open(filename));
                out_1 = response.GetOutputStream();
                long fileLen = @in.GetVisibleLength();
                if (reqRanges != null)
                {
                    IList <InclusiveByteRange> ranges = InclusiveByteRange.SatisfiableRanges(reqRanges
                                                                                             , fileLen);
                    StreamFile.SendPartialData(@in, out_1, response, fileLen, ranges);
                }
                else
                {
                    // No ranges, so send entire file
                    response.SetHeader("Content-Disposition", "attachment; filename=\"" + rawFilename
                                       + "\"");
                    response.SetContentType("application/octet-stream");
                    response.SetHeader(ContentLength, string.Empty + fileLen);
                    StreamFile.CopyFromOffset(@in, out_1, 0L, fileLen);
                }
                @in.Close();
                @in = null;
                out_1.Close();
                out_1 = null;
                dfs.Close();
                dfs = null;
            }
            catch (IOException ioe)
            {
                if (Log.IsDebugEnabled())
                {
                    Log.Debug("response.isCommitted()=" + response.IsCommitted(), ioe);
                }
                throw;
            }
            finally
            {
                IOUtils.Cleanup(Log, @in);
                IOUtils.Cleanup(Log, out_1);
                IOUtils.Cleanup(Log, dfs);
            }
        }
        /// <exception cref="System.Exception"/>
        public virtual void HardLeaseRecoveryRestartHelper(bool doRename, int size)
        {
            if (size < 0)
            {
                size = AppendTestUtil.NextInt(FileSize + 1);
            }
            //create a file
            string fileStr = "/hardLeaseRecovery";

            AppendTestUtil.Log.Info("filestr=" + fileStr);
            Path filePath          = new Path(fileStr);
            FSDataOutputStream stm = dfs.Create(filePath, true, BufSize, ReplicationNum, BlockSize
                                                );

            NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(fileStr));
            // write bytes into the file.
            AppendTestUtil.Log.Info("size=" + size);
            stm.Write(buffer, 0, size);
            string originalLeaseHolder = NameNodeAdapter.GetLeaseHolderForPath(cluster.GetNameNode
                                                                                   (), fileStr);

            NUnit.Framework.Assert.IsFalse("original lease holder should not be the NN", originalLeaseHolder
                                           .Equals(HdfsServerConstants.NamenodeLeaseHolder));
            // hflush file
            AppendTestUtil.Log.Info("hflush");
            stm.Hflush();
            // check visible length
            HdfsDataInputStream @in = (HdfsDataInputStream)dfs.Open(filePath);

            NUnit.Framework.Assert.AreEqual(size, @in.GetVisibleLength());
            @in.Close();
            if (doRename)
            {
                fileStr += ".renamed";
                Path renamedPath = new Path(fileStr);
                NUnit.Framework.Assert.IsTrue(dfs.Rename(filePath, renamedPath));
                filePath = renamedPath;
            }
            // kill the lease renewal thread
            AppendTestUtil.Log.Info("leasechecker.interruptAndJoin()");
            dfs.dfs.GetLeaseRenewer().InterruptAndJoin();
            // Make sure the DNs don't send a heartbeat for a while, so the blocks
            // won't actually get completed during lease recovery.
            foreach (DataNode dn in cluster.GetDataNodes())
            {
                DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, true);
            }
            // set the hard limit to be 1 second
            cluster.SetLeasePeriod(LongLeasePeriod, ShortLeasePeriod);
            // Make sure lease recovery begins.
            Sharpen.Thread.Sleep(HdfsServerConstants.NamenodeLeaseRecheckInterval * 2);
            CheckLease(fileStr, size);
            cluster.RestartNameNode(false);
            CheckLease(fileStr, size);
            // Let the DNs send heartbeats again.
            foreach (DataNode dn_1 in cluster.GetDataNodes())
            {
                DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn_1, false);
            }
            cluster.WaitActive();
            // set the hard limit to be 1 second, to initiate lease recovery.
            cluster.SetLeasePeriod(LongLeasePeriod, ShortLeasePeriod);
            // wait for lease recovery to complete
            LocatedBlocks locatedBlocks;

            do
            {
                Sharpen.Thread.Sleep(ShortLeasePeriod);
                locatedBlocks = dfs.dfs.GetLocatedBlocks(fileStr, 0L, size);
            }while (locatedBlocks.IsUnderConstruction());
            NUnit.Framework.Assert.AreEqual(size, locatedBlocks.GetFileLength());
            // make sure that the client can't write data anymore.
            try
            {
                stm.Write('b');
                stm.Hflush();
                NUnit.Framework.Assert.Fail("Should not be able to flush after we've lost the lease"
                                            );
            }
            catch (IOException e)
            {
                Log.Info("Expceted exception on write/hflush", e);
            }
            try
            {
                stm.Close();
                NUnit.Framework.Assert.Fail("Should not be able to close after we've lost the lease"
                                            );
            }
            catch (IOException e)
            {
                Log.Info("Expected exception on close", e);
            }
            // verify data
            AppendTestUtil.Log.Info("File size is good. Now validating sizes from datanodes..."
                                    );
            AppendTestUtil.CheckFullFile(dfs, filePath, size, buffer, fileStr);
        }
        public virtual void TestZeroCopyReads()
        {
            HdfsConfiguration conf    = InitZeroCopyTest();
            MiniDFSCluster    cluster = null;
            Path TestPath             = new Path("/a");
            FSDataInputStream fsIn    = null;
            int        TestFileLength = 3 * BlockSize;
            FileSystem fs             = null;

            try
            {
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
                cluster.WaitActive();
                fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, TestPath, TestFileLength, (short)1, 7567L);
                try
                {
                    DFSTestUtil.WaitReplication(fs, TestPath, (short)1);
                }
                catch (Exception e)
                {
                    NUnit.Framework.Assert.Fail("unexpected InterruptedException during " + "waitReplication: "
                                                + e);
                }
                catch (TimeoutException e)
                {
                    NUnit.Framework.Assert.Fail("unexpected TimeoutException during " + "waitReplication: "
                                                + e);
                }
                fsIn = fs.Open(TestPath);
                byte[] original = new byte[TestFileLength];
                IOUtils.ReadFully(fsIn, original, 0, TestFileLength);
                fsIn.Close();
                fsIn = fs.Open(TestPath);
                ByteBuffer result = fsIn.Read(null, BlockSize, EnumSet.Of(ReadOption.SkipChecksums
                                                                          ));
                NUnit.Framework.Assert.AreEqual(BlockSize, result.Remaining());
                HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
                NUnit.Framework.Assert.AreEqual(BlockSize, dfsIn.GetReadStatistics().GetTotalBytesRead
                                                    ());
                NUnit.Framework.Assert.AreEqual(BlockSize, dfsIn.GetReadStatistics().GetTotalZeroCopyBytesRead
                                                    ());
                Assert.AssertArrayEquals(Arrays.CopyOfRange(original, 0, BlockSize), ByteBufferToArray
                                             (result));
                fsIn.ReleaseBuffer(result);
            }
            finally
            {
                if (fsIn != null)
                {
                    fsIn.Close();
                }
                if (fs != null)
                {
                    fs.Close();
                }
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }