Example #1
0
        /// <summary>
        /// Tests the fileLength when we sync the file and restart the cluster and
        /// Datanodes not report to Namenode yet.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestFileLengthWithHSyncAndClusterRestartWithOutDNsRegister()
        {
            Configuration conf = new HdfsConfiguration();

            // create cluster
            conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, 512);
            MiniDFSCluster      cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            HdfsDataInputStream @in     = null;

            try
            {
                Path path = new Path("/tmp/TestFileLengthOnClusterRestart", "test");
                DistributedFileSystem dfs  = cluster.GetFileSystem();
                FSDataOutputStream    @out = dfs.Create(path);
                int fileLength             = 1030;
                @out.Write(new byte[fileLength]);
                @out.Hsync();
                cluster.RestartNameNode();
                cluster.WaitActive();
                @in = (HdfsDataInputStream)dfs.Open(path, 1024);
                // Verify the length when we just restart NN. DNs will register
                // immediately.
                NUnit.Framework.Assert.AreEqual(fileLength, @in.GetVisibleLength());
                cluster.ShutdownDataNodes();
                cluster.RestartNameNode(false);
                // This is just for ensuring NN started.
                VerifyNNIsInSafeMode(dfs);
                try
                {
                    @in = (HdfsDataInputStream)dfs.Open(path);
                    NUnit.Framework.Assert.Fail("Expected IOException");
                }
                catch (IOException e)
                {
                    NUnit.Framework.Assert.IsTrue(e.GetLocalizedMessage().IndexOf("Name node is in safe mode"
                                                                                  ) >= 0);
                }
            }
            finally
            {
                if (null != @in)
                {
                    @in.Close();
                }
                cluster.Shutdown();
            }
        }
        //check the file
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        internal static void CheckFile(Path p, int expectedsize, Configuration conf)
        {
            //open the file with another user account
            string username = UserGroupInformation.GetCurrentUser().GetShortUserName() + "_"
                              + ++userCount;
            UserGroupInformation ugi = UserGroupInformation.CreateUserForTesting(username, new
                                                                                 string[] { "supergroup" });
            FileSystem          fs  = DFSTestUtil.GetFileSystemAs(ugi, conf);
            HdfsDataInputStream @in = (HdfsDataInputStream)fs.Open(p);

            //Check visible length
            NUnit.Framework.Assert.IsTrue(@in.GetVisibleLength() >= expectedsize);
            //Able to read?
            for (int i = 0; i < expectedsize; i++)
            {
                NUnit.Framework.Assert.AreEqual(unchecked ((byte)i), unchecked ((byte)@in.Read()));
            }
            @in.Close();
        }
Example #3
0
        /// <exception cref="System.IO.IOException"/>
        private void OnOpen(ChannelHandlerContext ctx)
        {
            string nnId                  = @params.NamenodeId();
            int    bufferSize            = @params.BufferSize();
            long   offset                = @params.Offset();
            long   length                = @params.Length();
            DefaultHttpResponse response = new DefaultHttpResponse(HttpVersion.Http11, HttpResponseStatus
                                                                   .Ok);
            HttpHeaders headers = response.Headers();

            // Allow the UI to access the file
            headers.Set(HttpHeaders.Names.AccessControlAllowMethods, HttpMethod.Get);
            headers.Set(HttpHeaders.Names.AccessControlAllowOrigin, "*");
            headers.Set(HttpHeaders.Names.ContentType, ApplicationOctetStream);
            headers.Set(HttpHeaders.Names.Connection, HttpHeaders.Values.Close);
            DFSClient           dfsclient = NewDfsClient(nnId, conf);
            HdfsDataInputStream @in       = dfsclient.CreateWrappedInputStream(dfsclient.Open(path,
                                                                                              bufferSize, true));

            @in.Seek(offset);
            long contentLength = @in.GetVisibleLength() - offset;

            if (length >= 0)
            {
                contentLength = Math.Min(contentLength, length);
            }
            InputStream data;

            if (contentLength >= 0)
            {
                headers.Set(HttpHeaders.Names.ContentLength, contentLength);
                data = new LimitInputStream(@in, contentLength);
            }
            else
            {
                data = @in;
            }
            ctx.Write(response);
            ctx.WriteAndFlush(new _ChunkedStream_221(dfsclient, data)).AddListener(ChannelFutureListener
                                                                                   .Close);
        }
Example #4
0
        /// <exception cref="Javax.Servlet.ServletException"/>
        /// <exception cref="System.IO.IOException"/>
        protected override void DoGet(HttpServletRequest request, HttpServletResponse response
                                      )
        {
            string path        = ServletUtil.GetDecodedPath(request, "/streamFile");
            string rawPath     = ServletUtil.GetRawPath(request, "/streamFile");
            string filename    = JspHelper.ValidatePath(path);
            string rawFilename = JspHelper.ValidatePath(rawPath);

            if (filename == null)
            {
                response.SetContentType("text/plain");
                PrintWriter @out = response.GetWriter();
                @out.Write("Invalid input");
                return;
            }
            Enumeration <string> reqRanges = request.GetHeaders("Range");

            if (reqRanges != null && !reqRanges.MoveNext())
            {
                reqRanges = null;
            }
            DFSClient dfs;

            try
            {
                dfs = GetDFSClient(request);
            }
            catch (Exception e)
            {
                response.SendError(400, e.Message);
                return;
            }
            HdfsDataInputStream @in   = null;
            OutputStream        out_1 = null;

            try
            {
                @in   = dfs.CreateWrappedInputStream(dfs.Open(filename));
                out_1 = response.GetOutputStream();
                long fileLen = @in.GetVisibleLength();
                if (reqRanges != null)
                {
                    IList <InclusiveByteRange> ranges = InclusiveByteRange.SatisfiableRanges(reqRanges
                                                                                             , fileLen);
                    StreamFile.SendPartialData(@in, out_1, response, fileLen, ranges);
                }
                else
                {
                    // No ranges, so send entire file
                    response.SetHeader("Content-Disposition", "attachment; filename=\"" + rawFilename
                                       + "\"");
                    response.SetContentType("application/octet-stream");
                    response.SetHeader(ContentLength, string.Empty + fileLen);
                    StreamFile.CopyFromOffset(@in, out_1, 0L, fileLen);
                }
                @in.Close();
                @in = null;
                out_1.Close();
                out_1 = null;
                dfs.Close();
                dfs = null;
            }
            catch (IOException ioe)
            {
                if (Log.IsDebugEnabled())
                {
                    Log.Debug("response.isCommitted()=" + response.IsCommitted(), ioe);
                }
                throw;
            }
            finally
            {
                IOUtils.Cleanup(Log, @in);
                IOUtils.Cleanup(Log, out_1);
                IOUtils.Cleanup(Log, dfs);
            }
        }
        /// <exception cref="System.Exception"/>
        public virtual void HardLeaseRecoveryRestartHelper(bool doRename, int size)
        {
            if (size < 0)
            {
                size = AppendTestUtil.NextInt(FileSize + 1);
            }
            //create a file
            string fileStr = "/hardLeaseRecovery";

            AppendTestUtil.Log.Info("filestr=" + fileStr);
            Path filePath          = new Path(fileStr);
            FSDataOutputStream stm = dfs.Create(filePath, true, BufSize, ReplicationNum, BlockSize
                                                );

            NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(fileStr));
            // write bytes into the file.
            AppendTestUtil.Log.Info("size=" + size);
            stm.Write(buffer, 0, size);
            string originalLeaseHolder = NameNodeAdapter.GetLeaseHolderForPath(cluster.GetNameNode
                                                                                   (), fileStr);

            NUnit.Framework.Assert.IsFalse("original lease holder should not be the NN", originalLeaseHolder
                                           .Equals(HdfsServerConstants.NamenodeLeaseHolder));
            // hflush file
            AppendTestUtil.Log.Info("hflush");
            stm.Hflush();
            // check visible length
            HdfsDataInputStream @in = (HdfsDataInputStream)dfs.Open(filePath);

            NUnit.Framework.Assert.AreEqual(size, @in.GetVisibleLength());
            @in.Close();
            if (doRename)
            {
                fileStr += ".renamed";
                Path renamedPath = new Path(fileStr);
                NUnit.Framework.Assert.IsTrue(dfs.Rename(filePath, renamedPath));
                filePath = renamedPath;
            }
            // kill the lease renewal thread
            AppendTestUtil.Log.Info("leasechecker.interruptAndJoin()");
            dfs.dfs.GetLeaseRenewer().InterruptAndJoin();
            // Make sure the DNs don't send a heartbeat for a while, so the blocks
            // won't actually get completed during lease recovery.
            foreach (DataNode dn in cluster.GetDataNodes())
            {
                DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, true);
            }
            // set the hard limit to be 1 second
            cluster.SetLeasePeriod(LongLeasePeriod, ShortLeasePeriod);
            // Make sure lease recovery begins.
            Sharpen.Thread.Sleep(HdfsServerConstants.NamenodeLeaseRecheckInterval * 2);
            CheckLease(fileStr, size);
            cluster.RestartNameNode(false);
            CheckLease(fileStr, size);
            // Let the DNs send heartbeats again.
            foreach (DataNode dn_1 in cluster.GetDataNodes())
            {
                DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn_1, false);
            }
            cluster.WaitActive();
            // set the hard limit to be 1 second, to initiate lease recovery.
            cluster.SetLeasePeriod(LongLeasePeriod, ShortLeasePeriod);
            // wait for lease recovery to complete
            LocatedBlocks locatedBlocks;

            do
            {
                Sharpen.Thread.Sleep(ShortLeasePeriod);
                locatedBlocks = dfs.dfs.GetLocatedBlocks(fileStr, 0L, size);
            }while (locatedBlocks.IsUnderConstruction());
            NUnit.Framework.Assert.AreEqual(size, locatedBlocks.GetFileLength());
            // make sure that the client can't write data anymore.
            try
            {
                stm.Write('b');
                stm.Hflush();
                NUnit.Framework.Assert.Fail("Should not be able to flush after we've lost the lease"
                                            );
            }
            catch (IOException e)
            {
                Log.Info("Expceted exception on write/hflush", e);
            }
            try
            {
                stm.Close();
                NUnit.Framework.Assert.Fail("Should not be able to close after we've lost the lease"
                                            );
            }
            catch (IOException e)
            {
                Log.Info("Expected exception on close", e);
            }
            // verify data
            AppendTestUtil.Log.Info("File size is good. Now validating sizes from datanodes..."
                                    );
            AppendTestUtil.CheckFullFile(dfs, filePath, size, buffer, fileStr);
        }