예제 #1
0
 public bool Get()
 {
     try
     {
         LocatedBlocks  locs = NameNodeAdapter.GetBlockLocations(nn, path, 0, 1000);
         DatanodeInfo[] dnis = locs.GetLastLocatedBlock().GetLocations();
         foreach (DatanodeInfo dni in dnis)
         {
             NUnit.Framework.Assert.IsNotNull(dni);
         }
         int numReplicas = dnis.Length;
         Org.Apache.Hadoop.Hdfs.Server.Namenode.HA.TestStandbyIsHot.Log.Info("Got " + numReplicas
                                                                             + " locs: " + locs);
         if (numReplicas > expectedReplicas)
         {
             cluster.TriggerDeletionReports();
         }
         cluster.TriggerHeartbeats();
         return(numReplicas == expectedReplicas);
     }
     catch (IOException e)
     {
         Org.Apache.Hadoop.Hdfs.Server.Namenode.HA.TestStandbyIsHot.Log.Warn("No block locations yet: "
                                                                             + e.Message);
         return(false);
     }
 }
예제 #2
0
        /// <summary>Create a redirection URL</summary>
        /// <exception cref="System.IO.IOException"/>
        private Uri CreateRedirectURL(string path, string encodedPath, HdfsFileStatus status
                                      , UserGroupInformation ugi, ClientProtocol nnproxy, HttpServletRequest request,
                                      string dt)
        {
            string        scheme = request.GetScheme();
            LocatedBlocks blks   = nnproxy.GetBlockLocations(status.GetFullPath(new Path(path))
                                                             .ToUri().GetPath(), 0, 1);
            Configuration conf = NameNodeHttpServer.GetConfFromContext(GetServletContext());
            DatanodeID    host = PickSrcDatanode(blks, status, conf);
            string        hostname;

            if (host is DatanodeInfo)
            {
                hostname = host.GetHostName();
            }
            else
            {
                hostname = host.GetIpAddr();
            }
            int    port    = "https".Equals(scheme) ? host.GetInfoSecurePort() : host.GetInfoPort();
            string dtParam = string.Empty;

            if (dt != null)
            {
                dtParam = JspHelper.GetDelegationTokenUrlParam(dt);
            }
            // Add namenode address to the url params
            NameNode nn        = NameNodeHttpServer.GetNameNodeFromContext(GetServletContext());
            string   addr      = nn.GetNameNodeAddressHostPortString();
            string   addrParam = JspHelper.GetUrlParam(JspHelper.NamenodeAddress, addr);

            return(new Uri(scheme, hostname, port, "/streamFile" + encodedPath + '?' + "ugi="
                           + ServletUtil.EncodeQueryValue(ugi.GetShortUserName()) + dtParam + addrParam));
        }
예제 #3
0
        public virtual void TestBlockTokenInLastLocatedBlock()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetBoolean(DFSConfigKeys.DfsBlockAccessTokenEnableKey, true);
            conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, 512);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.WaitActive();
            try
            {
                FileSystem         fs       = cluster.GetFileSystem();
                string             fileName = "/testBlockTokenInLastLocatedBlock";
                Path               filePath = new Path(fileName);
                FSDataOutputStream @out     = fs.Create(filePath, (short)1);
                @out.Write(new byte[1000]);
                // ensure that the first block is written out (see FSOutputSummer#flush)
                @out.Flush();
                LocatedBlocks locatedBlocks = cluster.GetNameNodeRpc().GetBlockLocations(fileName
                                                                                         , 0, 1000);
                while (locatedBlocks.GetLastLocatedBlock() == null)
                {
                    Sharpen.Thread.Sleep(100);
                    locatedBlocks = cluster.GetNameNodeRpc().GetBlockLocations(fileName, 0, 1000);
                }
                Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token = locatedBlocks
                                                                                      .GetLastLocatedBlock().GetBlockToken();
                NUnit.Framework.Assert.AreEqual(BlockTokenIdentifier.KindName, token.GetKind());
                @out.Close();
            }
            finally
            {
                cluster.Shutdown();
            }
        }
예제 #4
0
        public virtual void TestGetBlockLocations()
        {
            NamenodeProtocols namenode = cluster.GetNameNodeRpc();
            Path               p       = new Path(BaseDir, "file2.dat");
            string             src     = p.ToString();
            FSDataOutputStream @out    = TestFileCreation.CreateFile(hdfs, p, 3);
            // write a half block
            int len = (int)(((uint)BlockSize) >> 1);

            WriteFile(p, @out, len);
            for (int i = 1; i < NumBlocks;)
            {
                // verify consistency
                LocatedBlocks        lb     = namenode.GetBlockLocations(src, 0, len);
                IList <LocatedBlock> blocks = lb.GetLocatedBlocks();
                NUnit.Framework.Assert.AreEqual(i, blocks.Count);
                Block b = blocks[blocks.Count - 1].GetBlock().GetLocalBlock();
                NUnit.Framework.Assert.IsTrue(b is BlockInfoContiguousUnderConstruction);
                if (++i < NumBlocks)
                {
                    // write one more block
                    WriteFile(p, @out, BlockSize);
                    len += BlockSize;
                }
            }
            // close file
            @out.Close();
        }
        /// <summary>Create a file with one block and corrupt some/all of the block replicas.
        ///     </summary>
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="Org.Apache.Hadoop.Security.AccessControlException"/>
        /// <exception cref="System.IO.FileNotFoundException"/>
        /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/>
        /// <exception cref="System.Exception"/>
        /// <exception cref="Sharpen.TimeoutException"/>
        private void CreateAFileWithCorruptedBlockReplicas(Path filePath, short repl, int
                                                           corruptBlockCount)
        {
            DFSTestUtil.CreateFile(dfs, filePath, BlockSize, repl, 0);
            DFSTestUtil.WaitReplication(dfs, filePath, repl);
            // Locate the file blocks by asking name node
            LocatedBlocks locatedblocks = dfs.dfs.GetNamenode().GetBlockLocations(filePath.ToString
                                                                                      (), 0L, BlockSize);

            NUnit.Framework.Assert.AreEqual(repl, locatedblocks.Get(0).GetLocations().Length);
            // The file only has one block
            LocatedBlock lblock = locatedblocks.Get(0);

            DatanodeInfo[] datanodeinfos = lblock.GetLocations();
            ExtendedBlock  block         = lblock.GetBlock();

            // corrupt some /all of the block replicas
            for (int i = 0; i < corruptBlockCount; i++)
            {
                DatanodeInfo dninfo = datanodeinfos[i];
                DataNode     dn     = cluster.GetDataNode(dninfo.GetIpcPort());
                CorruptBlock(block, dn);
                Log.Debug("Corrupted block " + block.GetBlockName() + " on data node " + dninfo);
            }
        }
예제 #6
0
        /// <summary>TC12: Append to partial CRC chunk</summary>
        /// <exception cref="System.Exception"/>
        private void TestTC12(bool appendToNewBlock)
        {
            Path p = new Path("/TC12/foo" + (appendToNewBlock ? "0" : "1"));

            System.Console.Out.WriteLine("p=" + p);
            //a. Create file with a block size of 64KB
            //   and a default io.bytes.per.checksum of 512 bytes.
            //   Write 25687 bytes of data. Close file.
            int len1 = 25687;
            {
                FSDataOutputStream @out = fs.Create(p, false, buffersize, Replication, BlockSize);
                AppendTestUtil.Write(@out, 0, len1);
                @out.Close();
            }
            //b. Reopen file in "append" mode. Append another 5877 bytes of data. Close file.
            int len2 = 5877;

            {
                FSDataOutputStream @out = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.Append
                                                                                     , CreateFlag.NewBlock), 4096, null) : fs.Append(p);
                AppendTestUtil.Write(@out, len1, len2);
                @out.Close();
            }
            //c. Reopen file and read 25687+5877 bytes of data from file. Close file.
            AppendTestUtil.Check(fs, p, len1 + len2);
            if (appendToNewBlock)
            {
                LocatedBlocks blks = fs.dfs.GetLocatedBlocks(p.ToString(), 0);
                NUnit.Framework.Assert.AreEqual(2, blks.GetLocatedBlocks().Count);
                NUnit.Framework.Assert.AreEqual(len1, blks.GetLocatedBlocks()[0].GetBlockSize());
                NUnit.Framework.Assert.AreEqual(len2, blks.GetLocatedBlocks()[1].GetBlockSize());
                AppendTestUtil.Check(fs, p, 0, len1);
                AppendTestUtil.Check(fs, p, len1, len2);
            }
        }
예제 #7
0
        /// <exception cref="System.IO.IOException"/>
        public static DatanodeInfo BestNode(LocatedBlocks blks, Configuration conf)
        {
            Dictionary <DatanodeInfo, JspHelper.NodeRecord> map = new Dictionary <DatanodeInfo,
                                                                                  JspHelper.NodeRecord>();

            foreach (LocatedBlock block in blks.GetLocatedBlocks())
            {
                DatanodeInfo[] nodes = block.GetLocations();
                foreach (DatanodeInfo node in nodes)
                {
                    JspHelper.NodeRecord record = map[node];
                    if (record == null)
                    {
                        map[node] = new JspHelper.NodeRecord(node, 1);
                    }
                    else
                    {
                        record.frequency++;
                    }
                }
            }
            JspHelper.NodeRecord[] nodes_1 = Sharpen.Collections.ToArray(map.Values, new JspHelper.NodeRecord
                                                                         [map.Count]);
            Arrays.Sort(nodes_1, new JspHelper.NodeRecordComparator());
            return(BestNode(nodes_1, false));
        }
예제 #8
0
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        protected internal bool VerifyDeletedBlocks(LocatedBlocks locatedBlocks)
        {
            Log.Info("Verifying replica has no saved copy after deletion.");
            TriggerBlockReport();
            while (DataNodeTestUtils.GetPendingAsyncDeletions(cluster.GetDataNodes()[0]) > 0L
                   )
            {
                Sharpen.Thread.Sleep(1000);
            }
            string bpid = cluster.GetNamesystem().GetBlockPoolId();
            IList <FsVolumeSpi> volumes = cluster.GetDataNodes()[0].GetFSDataset().GetVolumes(
                );

            // Make sure deleted replica does not have a copy on either finalized dir of
            // transient volume or finalized dir of non-transient volume
            foreach (FsVolumeSpi v in volumes)
            {
                FsVolumeImpl volume    = (FsVolumeImpl)v;
                FilePath     targetDir = (v.IsTransientStorage()) ? volume.GetBlockPoolSlice(bpid).GetFinalizedDir
                                             () : volume.GetBlockPoolSlice(bpid).GetLazypersistDir();
                if (VerifyBlockDeletedFromDir(targetDir, locatedBlocks) == false)
                {
                    return(false);
                }
            }
            return(true);
        }
예제 #9
0
        public virtual void TestFallbackToDiskPartial()
        {
            StartUpCluster(true, 2);
            string MethodName = GenericTestUtils.GetMethodName();
            Path   path       = new Path("/" + MethodName + ".dat");

            MakeTestFile(path, BlockSize * 5, true);
            // Sleep for a short time to allow the lazy writer thread to do its job
            Sharpen.Thread.Sleep(6 * LazyWriterIntervalSec * 1000);
            TriggerBlockReport();
            int           numBlocksOnRamDisk = 0;
            int           numBlocksOnDisk    = 0;
            long          fileLength         = client.GetFileInfo(path.ToString()).GetLen();
            LocatedBlocks locatedBlocks      = client.GetLocatedBlocks(path.ToString(), 0, fileLength
                                                                       );

            foreach (LocatedBlock locatedBlock in locatedBlocks.GetLocatedBlocks())
            {
                if (locatedBlock.GetStorageTypes()[0] == StorageType.RamDisk)
                {
                    numBlocksOnRamDisk++;
                }
                else
                {
                    if (locatedBlock.GetStorageTypes()[0] == StorageType.Default)
                    {
                        numBlocksOnDisk++;
                    }
                }
            }
            // Since eviction is asynchronous, depending on the timing of eviction
            // wrt writes, we may get 2 or less blocks on RAM disk.
            System.Diagnostics.Debug.Assert((numBlocksOnRamDisk <= 2));
            System.Diagnostics.Debug.Assert((numBlocksOnDisk >= 3));
        }
예제 #10
0
        public virtual void TestLocatedBlocks2Locations()
        {
            DatanodeInfo d = DFSTestUtil.GetLocalDatanodeInfo();

            DatanodeInfo[] ds = new DatanodeInfo[1];
            ds[0] = d;
            // ok
            ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
            LocatedBlock  l1 = new LocatedBlock(b1, ds, 0, false);
            // corrupt
            ExtendedBlock        b2  = new ExtendedBlock("bpid", 2, 1, 1);
            LocatedBlock         l2  = new LocatedBlock(b2, ds, 0, true);
            IList <LocatedBlock> ls  = Arrays.AsList(l1, l2);
            LocatedBlocks        lbs = new LocatedBlocks(10, false, ls, l2, true, null);

            BlockLocation[] bs = DFSUtil.LocatedBlocks2Locations(lbs);
            NUnit.Framework.Assert.IsTrue("expected 2 blocks but got " + bs.Length, bs.Length
                                          == 2);
            int corruptCount = 0;

            foreach (BlockLocation b in bs)
            {
                if (b.IsCorrupt())
                {
                    corruptCount++;
                }
            }
            NUnit.Framework.Assert.IsTrue("expected 1 corrupt files but got " + corruptCount,
                                          corruptCount == 1);
            // test an empty location
            bs = DFSUtil.LocatedBlocks2Locations(new LocatedBlocks());
            NUnit.Framework.Assert.AreEqual(0, bs.Length);
        }
예제 #11
0
        public virtual void TestAbandonBlock()
        {
            string src = FileNamePrefix + "foo";
            // Start writing a file but do not close it
            FSDataOutputStream fout = fs.Create(new Path(src), true, 4096, (short)1, 512L);

            for (int i = 0; i < 1024; i++)
            {
                fout.Write(123);
            }
            fout.Hflush();
            long fileId = ((DFSOutputStream)fout.GetWrappedStream()).GetFileId();
            // Now abandon the last block
            DFSClient     dfsclient = DFSClientAdapter.GetDFSClient(fs);
            LocatedBlocks blocks    = dfsclient.GetNamenode().GetBlockLocations(src, 0, int.MaxValue
                                                                                );
            int          orginalNumBlocks = blocks.LocatedBlockCount();
            LocatedBlock b = blocks.GetLastLocatedBlock();

            dfsclient.GetNamenode().AbandonBlock(b.GetBlock(), fileId, src, dfsclient.clientName
                                                 );
            // call abandonBlock again to make sure the operation is idempotent
            dfsclient.GetNamenode().AbandonBlock(b.GetBlock(), fileId, src, dfsclient.clientName
                                                 );
            // And close the file
            fout.Close();
            // Close cluster and check the block has been abandoned after restart
            cluster.RestartNameNode();
            blocks = dfsclient.GetNamenode().GetBlockLocations(src, 0, int.MaxValue);
            NUnit.Framework.Assert.AreEqual("Blocks " + b + " has not been abandoned.", orginalNumBlocks
                                            , blocks.LocatedBlockCount() + 1);
        }
예제 #12
0
        /// <summary>Create FileStatus with location info by file INode</summary>
        /// <exception cref="System.IO.IOException"/>
        private static HdfsLocatedFileStatus CreateLocatedFileStatus(FSDirectory fsd, string
                                                                     fullPath, byte[] path, INode node, byte storagePolicy, int snapshot, bool isRawPath
                                                                     , INodesInPath iip)
        {
            System.Diagnostics.Debug.Assert(fsd.HasReadLock());
            long size = 0;
            // length is zero for directories
            short              replication = 0;
            long               blocksize   = 0;
            LocatedBlocks      loc         = null;
            bool               isEncrypted;
            FileEncryptionInfo feInfo = isRawPath ? null : fsd.GetFileEncryptionInfo(node, snapshot
                                                                                     , iip);

            if (node.IsFile())
            {
                INodeFile fileNode = node.AsFile();
                size        = fileNode.ComputeFileSize(snapshot);
                replication = fileNode.GetFileReplication(snapshot);
                blocksize   = fileNode.GetPreferredBlockSize();
                bool inSnapshot = snapshot != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                  .CurrentStateId;
                bool isUc     = !inSnapshot && fileNode.IsUnderConstruction();
                long fileSize = !inSnapshot && isUc?fileNode.ComputeFileSizeNotIncludingLastUcBlock
                                    () : size;

                loc = fsd.GetFSNamesystem().GetBlockManager().CreateLocatedBlocks(fileNode.GetBlocks
                                                                                      (snapshot), fileSize, isUc, 0L, size, false, inSnapshot, feInfo);
                if (loc == null)
                {
                    loc = new LocatedBlocks();
                }
                isEncrypted = (feInfo != null) || (isRawPath && fsd.IsInAnEZ(INodesInPath.FromINode
                                                                                 (node)));
            }
            else
            {
                isEncrypted = fsd.IsInAnEZ(INodesInPath.FromINode(node));
            }
            int childrenNum = node.IsDirectory() ? node.AsDirectory().GetChildrenNum(snapshot
                                                                                     ) : 0;
            INodeAttributes       nodeAttrs = fsd.GetAttributes(fullPath, path, node, snapshot);
            HdfsLocatedFileStatus status    = new HdfsLocatedFileStatus(size, node.IsDirectory()
                                                                        , replication, blocksize, node.GetModificationTime(snapshot), node.GetAccessTime
                                                                            (snapshot), GetPermissionForFileStatus(nodeAttrs, isEncrypted), nodeAttrs.GetUserName
                                                                            (), nodeAttrs.GetGroupName(), node.IsSymlink() ? node.AsSymlink().GetSymlink() :
                                                                        null, path, node.GetId(), loc, childrenNum, feInfo, storagePolicy);

            // Set caching information for the located blocks.
            if (loc != null)
            {
                CacheManager cacheManager = fsd.GetFSNamesystem().GetCacheManager();
                foreach (LocatedBlock lb in loc.GetLocatedBlocks())
                {
                    cacheManager.SetCachedLocations(lb);
                }
            }
            return(status);
        }
        /// <summary>Verify the first block of the file is corrupted (for all its replica).</summary>
        /// <exception cref="Org.Apache.Hadoop.Security.AccessControlException"/>
        /// <exception cref="System.IO.FileNotFoundException"/>
        /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/>
        /// <exception cref="System.IO.IOException"/>
        private void VerifyFirstBlockCorrupted(Path filePath, bool isCorrupted)
        {
            LocatedBlocks locatedBlocks = dfs.dfs.GetNamenode().GetBlockLocations(filePath.ToUri
                                                                                      ().GetPath(), 0, long.MaxValue);
            LocatedBlock firstLocatedBlock = locatedBlocks.Get(0);

            NUnit.Framework.Assert.AreEqual(isCorrupted, firstLocatedBlock.IsCorrupt());
        }
        /// <exception cref="System.IO.IOException"/>
        private LocatedBlock GetLocatedBlock()
        {
            LocatedBlocks locatedBlocks = client.GetLocatedBlocks(Path.ToString(), 0, BlockSize
                                                                  );

            Assert.AssertThat(locatedBlocks.GetLocatedBlocks().Count, CoreMatchers.Is(1));
            return(Iterables.GetOnlyElement(locatedBlocks.GetLocatedBlocks()));
        }
예제 #15
0
        /// <summary>
        /// Regression test for HDFS-2795:
        /// - Start an HA cluster with a DN.
        /// </summary>
        /// <remarks>
        /// Regression test for HDFS-2795:
        /// - Start an HA cluster with a DN.
        /// - Write several blocks to the FS with replication 1.
        /// - Shutdown the DN
        /// - Wait for the NNs to declare the DN dead. All blocks will be under-replicated.
        /// - Restart the DN.
        /// In the bug, the standby node would only very slowly notice the blocks returning
        /// to the cluster.
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestDatanodeRestarts()
        {
            Configuration conf = new Configuration();

            conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, 1024);
            // We read from the standby to watch block locations
            HAUtil.SetAllowStandbyReads(conf, true);
            conf.SetLong(DFSConfigKeys.DfsNamenodeAccesstimePrecisionKey, 0);
            conf.SetInt(DFSConfigKeys.DfsHaTaileditsPeriodKey, 1);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology
                                                                                 .SimpleHATopology()).NumDataNodes(1).Build();

            try
            {
                NameNode nn0 = cluster.GetNameNode(0);
                NameNode nn1 = cluster.GetNameNode(1);
                cluster.TransitionToActive(0);
                // Create 5 blocks.
                DFSTestUtil.CreateFile(cluster.GetFileSystem(0), TestFilePath, 5 * 1024, (short)1
                                       , 1L);
                HATestUtil.WaitForStandbyToCatchUp(nn0, nn1);
                // Stop the DN.
                DataNode dn     = cluster.GetDataNodes()[0];
                string   dnName = dn.GetDatanodeId().GetXferAddr();
                MiniDFSCluster.DataNodeProperties dnProps = cluster.StopDataNode(0);
                // Make sure both NNs register it as dead.
                BlockManagerTestUtil.NoticeDeadDatanode(nn0, dnName);
                BlockManagerTestUtil.NoticeDeadDatanode(nn1, dnName);
                BlockManagerTestUtil.UpdateState(nn0.GetNamesystem().GetBlockManager());
                BlockManagerTestUtil.UpdateState(nn1.GetNamesystem().GetBlockManager());
                NUnit.Framework.Assert.AreEqual(5, nn0.GetNamesystem().GetUnderReplicatedBlocks()
                                                );
                // The SBN will not have any blocks in its neededReplication queue
                // since the SBN doesn't process replication.
                NUnit.Framework.Assert.AreEqual(0, nn1.GetNamesystem().GetUnderReplicatedBlocks()
                                                );
                LocatedBlocks locs = nn1.GetRpcServer().GetBlockLocations(TestFile, 0, 1);
                NUnit.Framework.Assert.AreEqual("Standby should have registered that the block has no replicas"
                                                , 0, locs.Get(0).GetLocations().Length);
                cluster.RestartDataNode(dnProps);
                // Wait for both NNs to re-register the DN.
                cluster.WaitActive(0);
                cluster.WaitActive(1);
                BlockManagerTestUtil.UpdateState(nn0.GetNamesystem().GetBlockManager());
                BlockManagerTestUtil.UpdateState(nn1.GetNamesystem().GetBlockManager());
                NUnit.Framework.Assert.AreEqual(0, nn0.GetNamesystem().GetUnderReplicatedBlocks()
                                                );
                NUnit.Framework.Assert.AreEqual(0, nn1.GetNamesystem().GetUnderReplicatedBlocks()
                                                );
                locs = nn1.GetRpcServer().GetBlockLocations(TestFile, 0, 1);
                NUnit.Framework.Assert.AreEqual("Standby should have registered that the block has replicas again"
                                                , 1, locs.Get(0).GetLocations().Length);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
예제 #16
0
        public virtual void HSyncEndBlock_00()
        {
            int           preferredBlockSize = 1024;
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsBlockSizeKey, preferredBlockSize);
            MiniDFSCluster        cluster    = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            DistributedFileSystem fileSystem = cluster.GetFileSystem();
            FSDataOutputStream    stm        = null;

            try
            {
                Path path = new Path("/" + fName);
                stm = fileSystem.Create(path, true, 4096, (short)2, AppendTestUtil.BlockSize);
                System.Console.Out.WriteLine("Created file " + path.ToString());
                ((DFSOutputStream)stm.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                           .EndBlock));
                long currentFileLength = fileSystem.GetFileStatus(path).GetLen();
                NUnit.Framework.Assert.AreEqual(0L, currentFileLength);
                LocatedBlocks blocks = fileSystem.dfs.GetLocatedBlocks(path.ToString(), 0);
                NUnit.Framework.Assert.AreEqual(0, blocks.GetLocatedBlocks().Count);
                // write a block and call hsync(end_block) at the block boundary
                stm.Write(new byte[preferredBlockSize]);
                ((DFSOutputStream)stm.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                           .EndBlock));
                currentFileLength = fileSystem.GetFileStatus(path).GetLen();
                NUnit.Framework.Assert.AreEqual(preferredBlockSize, currentFileLength);
                blocks = fileSystem.dfs.GetLocatedBlocks(path.ToString(), 0);
                NUnit.Framework.Assert.AreEqual(1, blocks.GetLocatedBlocks().Count);
                // call hsync then call hsync(end_block) immediately
                stm.Write(new byte[preferredBlockSize / 2]);
                stm.Hsync();
                ((DFSOutputStream)stm.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag
                                                                           .EndBlock));
                currentFileLength = fileSystem.GetFileStatus(path).GetLen();
                NUnit.Framework.Assert.AreEqual(preferredBlockSize + preferredBlockSize / 2, currentFileLength
                                                );
                blocks = fileSystem.dfs.GetLocatedBlocks(path.ToString(), 0);
                NUnit.Framework.Assert.AreEqual(2, blocks.GetLocatedBlocks().Count);
                stm.Write(new byte[preferredBlockSize / 4]);
                stm.Hsync();
                currentFileLength = fileSystem.GetFileStatus(path).GetLen();
                NUnit.Framework.Assert.AreEqual(preferredBlockSize + preferredBlockSize / 2 + preferredBlockSize
                                                / 4, currentFileLength);
                blocks = fileSystem.dfs.GetLocatedBlocks(path.ToString(), 0);
                NUnit.Framework.Assert.AreEqual(3, blocks.GetLocatedBlocks().Count);
            }
            finally
            {
                IOUtils.Cleanup(null, stm, fileSystem);
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
        /// <summary>
        /// Verify the number of corrupted block replicas by fetching the block
        /// location from name node.
        /// </summary>
        /// <exception cref="Org.Apache.Hadoop.Security.AccessControlException"/>
        /// <exception cref="System.IO.FileNotFoundException"/>
        /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/>
        /// <exception cref="System.IO.IOException"/>
        private void VerifyCorruptedBlockCount(Path filePath, int expectedReplicas)
        {
            LocatedBlocks lBlocks = dfs.dfs.GetNamenode().GetBlockLocations(filePath.ToUri().
                                                                            GetPath(), 0, long.MaxValue);
            // we expect only the first block of the file is used for this test
            LocatedBlock firstLocatedBlock = lBlocks.Get(0);

            NUnit.Framework.Assert.AreEqual(expectedReplicas, firstLocatedBlock.GetLocations(
                                                ).Length);
        }
예제 #18
0
        public virtual void TestReplicationError()
        {
            // create a file of replication factor of 1
            Path fileName = new Path("/test.txt");
            int  fileLen  = 1;

            DFSTestUtil.CreateFile(fs, fileName, 1, (short)1, 1L);
            DFSTestUtil.WaitReplication(fs, fileName, (short)1);
            // get the block belonged to the created file
            LocatedBlocks blocks = NameNodeAdapter.GetBlockLocations(cluster.GetNameNode(), fileName
                                                                     .ToString(), 0, (long)fileLen);

            NUnit.Framework.Assert.AreEqual("Should only find 1 block", blocks.LocatedBlockCount
                                                (), 1);
            LocatedBlock block = blocks.Get(0);

            // bring up a second datanode
            cluster.StartDataNodes(conf, 1, true, null, null);
            cluster.WaitActive();
            int      sndNode  = 1;
            DataNode datanode = cluster.GetDataNodes()[sndNode];
            // replicate the block to the second datanode
            IPEndPoint target = datanode.GetXferAddress();
            Socket     s      = Sharpen.Extensions.CreateSocket(target.Address, target.Port);
            // write the header.
            DataOutputStream @out     = new DataOutputStream(s.GetOutputStream());
            DataChecksum     checksum = DataChecksum.NewDataChecksum(DataChecksum.Type.Crc32, 512
                                                                     );

            new Sender(@out).WriteBlock(block.GetBlock(), StorageType.Default, BlockTokenSecretManager
                                        .DummyToken, string.Empty, new DatanodeInfo[0], new StorageType[0], null, BlockConstructionStage
                                        .PipelineSetupCreate, 1, 0L, 0L, 0L, checksum, CachingStrategy.NewDefaultStrategy
                                            (), false, false, null);
            @out.Flush();
            // close the connection before sending the content of the block
            @out.Close();
            // the temporary block & meta files should be deleted
            string   bpid       = cluster.GetNamesystem().GetBlockPoolId();
            FilePath storageDir = cluster.GetInstanceStorageDir(sndNode, 0);
            FilePath dir1       = MiniDFSCluster.GetRbwDir(storageDir, bpid);

            storageDir = cluster.GetInstanceStorageDir(sndNode, 1);
            FilePath dir2 = MiniDFSCluster.GetRbwDir(storageDir, bpid);

            while (dir1.ListFiles().Length != 0 || dir2.ListFiles().Length != 0)
            {
                Sharpen.Thread.Sleep(100);
            }
            // then increase the file's replication factor
            fs.SetReplication(fileName, (short)2);
            // replication should succeed
            DFSTestUtil.WaitReplication(fs, fileName, (short)1);
            // clean up the file
            fs.Delete(fileName, false);
        }
        /// <exception cref="System.IO.IOException"/>
        public static LocatedBlock GetLastLocatedBlock(ClientProtocol namenode, string src
                                                       )
        {
            //get block info for the last block
            LocatedBlocks        locations = namenode.GetBlockLocations(src, 0, long.MaxValue);
            IList <LocatedBlock> blocks    = locations.GetLocatedBlocks();

            DataNode.Log.Info("blocks.size()=" + blocks.Count);
            NUnit.Framework.Assert.IsTrue(blocks.Count > 0);
            return(blocks[blocks.Count - 1]);
        }
예제 #20
0
 /// <summary>Select a datanode to service this request.</summary>
 /// <remarks>
 /// Select a datanode to service this request.
 /// Currently, this looks at no more than the first five blocks of a file,
 /// selecting a datanode randomly from the most represented.
 /// </remarks>
 /// <param name="conf"></param>
 /// <exception cref="System.IO.IOException"/>
 private DatanodeID PickSrcDatanode(LocatedBlocks blks, HdfsFileStatus i, Configuration
                                    conf)
 {
     if (i.GetLen() == 0 || blks.GetLocatedBlocks().Count <= 0)
     {
         // pick a random datanode
         NameNode nn = NameNodeHttpServer.GetNameNodeFromContext(GetServletContext());
         return(NamenodeJspHelper.GetRandomDatanode(nn));
     }
     return(JspHelper.BestNode(blks, conf));
 }
예제 #21
0
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Exception"/>
        /// <exception cref="Sharpen.TimeoutException"/>
        private void ChangeBlockLen(MiniDFSCluster cluster, int lenDelta)
        {
            Path       fileName          = new Path("/file1");
            short      ReplicationFactor = (short)1;
            FileSystem fs      = cluster.GetFileSystem();
            int        fileLen = fs.GetConf().GetInt(DFSConfigKeys.DfsBytesPerChecksumKey, 512);

            DFSTestUtil.CreateFile(fs, fileName, fileLen, ReplicationFactor, 0);
            DFSTestUtil.WaitReplication(fs, fileName, ReplicationFactor);
            ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, fileName);

            // Change the length of a replica
            for (int i = 0; i < cluster.GetDataNodes().Count; i++)
            {
                if (DFSTestUtil.ChangeReplicaLength(cluster, block, i, lenDelta))
                {
                    break;
                }
            }
            // increase the file's replication factor
            fs.SetReplication(fileName, (short)(ReplicationFactor + 1));
            // block replication triggers corrupt block detection
            DFSClient dfsClient = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort
                                                                   ()), fs.GetConf());
            LocatedBlocks blocks = dfsClient.GetNamenode().GetBlockLocations(fileName.ToString
                                                                                 (), 0, fileLen);

            if (lenDelta < 0)
            {
                // replica truncated
                while (!blocks.Get(0).IsCorrupt() || ReplicationFactor != blocks.Get(0).GetLocations
                           ().Length)
                {
                    Sharpen.Thread.Sleep(100);
                    blocks = dfsClient.GetNamenode().GetBlockLocations(fileName.ToString(), 0, fileLen
                                                                       );
                }
            }
            else
            {
                // no corruption detected; block replicated
                while (ReplicationFactor + 1 != blocks.Get(0).GetLocations().Length)
                {
                    Sharpen.Thread.Sleep(100);
                    blocks = dfsClient.GetNamenode().GetBlockLocations(fileName.ToString(), 0, fileLen
                                                                       );
                }
            }
            fs.Delete(fileName, true);
        }
예제 #22
0
        /// <exception cref="System.Exception"/>
        private void TestBadBlockReportOnTransfer(bool corruptBlockByDeletingBlockFile)
        {
            Configuration  conf         = new HdfsConfiguration();
            FileSystem     fs           = null;
            DFSClient      dfsClient    = null;
            LocatedBlocks  blocks       = null;
            int            replicaCount = 0;
            short          replFactor   = 1;
            MiniDFSCluster cluster      = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();

            cluster.WaitActive();
            fs        = cluster.GetFileSystem();
            dfsClient = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()),
                                      conf);
            // Create file with replication factor of 1
            Path file1 = new Path("/tmp/testBadBlockReportOnTransfer/file1");

            DFSTestUtil.CreateFile(fs, file1, 1024, replFactor, 0);
            DFSTestUtil.WaitReplication(fs, file1, replFactor);
            // Corrupt the block belonging to the created file
            ExtendedBlock block = DFSTestUtil.GetFirstBlock(fs, file1);
            int           blockFilesCorrupted = corruptBlockByDeletingBlockFile ? cluster.CorruptBlockOnDataNodesByDeletingBlockFile
                                                    (block) : cluster.CorruptBlockOnDataNodes(block);

            NUnit.Framework.Assert.AreEqual("Corrupted too few blocks", replFactor, blockFilesCorrupted
                                            );
            // Increase replication factor, this should invoke transfer request
            // Receiving datanode fails on checksum and reports it to namenode
            replFactor = 2;
            fs.SetReplication(file1, replFactor);
            // Now get block details and check if the block is corrupt
            blocks = dfsClient.GetNamenode().GetBlockLocations(file1.ToString(), 0, long.MaxValue
                                                               );
            while (blocks.Get(0).IsCorrupt() != true)
            {
                try
                {
                    Log.Info("Waiting until block is marked as corrupt...");
                    Sharpen.Thread.Sleep(1000);
                }
                catch (Exception)
                {
                }
                blocks = dfsClient.GetNamenode().GetBlockLocations(file1.ToString(), 0, long.MaxValue
                                                                   );
            }
            replicaCount = blocks.Get(0).GetLocations().Length;
            NUnit.Framework.Assert.IsTrue(replicaCount == 1);
            cluster.Shutdown();
        }
        /// <exception cref="System.IO.IOException"/>
        private LocatedBlocks CreateFileGetBlocks(string filenamePrefix)
        {
            Path filePath = new Path("/" + filenamePrefix + ".dat");

            // Write out a file with a few blocks, get block locations.
            DFSTestUtil.CreateFile(fs, filePath, BlockSize, BlockSize * NumBlocks, BlockSize,
                                   NumDatanodes, seed);
            // Get the block list for the file with the block locations.
            LocatedBlocks blocks = client.GetLocatedBlocks(filePath.ToString(), 0, BlockSize
                                                           * NumBlocks);

            Assert.AssertThat(cluster.GetNamesystem().GetUnderReplicatedBlocks(), IS.Is(0L));
            return(blocks);
        }
예제 #24
0
        /// <summary>Test assumes that the file has a single block</summary>
        /// <exception cref="System.IO.IOException"/>
        private FilePath GetBlockForFile(Path path, bool exists)
        {
            LocatedBlocks blocks = nn.GetRpcServer().GetBlockLocations(path.ToString(), 0, long.MaxValue
                                                                       );

            NUnit.Framework.Assert.AreEqual("The test helper functions assume that each file has a single block"
                                            , 1, blocks.GetLocatedBlocks().Count);
            ExtendedBlock      block     = blocks.GetLocatedBlocks()[0].GetBlock();
            BlockLocalPathInfo bInfo     = dn0.GetFSDataset().GetBlockLocalPathInfo(block);
            FilePath           blockFile = new FilePath(bInfo.GetBlockPath());

            NUnit.Framework.Assert.AreEqual(exists, blockFile.Exists());
            return(blockFile);
        }
예제 #25
0
        /// <summary>TC11: Racing rename</summary>
        /// <exception cref="System.Exception"/>
        private void TestTC11(bool appendToNewBlock)
        {
            Path p = new Path("/TC11/foo" + (appendToNewBlock ? "0" : "1"));

            System.Console.Out.WriteLine("p=" + p);
            //a. Create file and write one block of data. Close file.
            int len1 = (int)BlockSize;
            {
                FSDataOutputStream @out = fs.Create(p, false, buffersize, Replication, BlockSize);
                AppendTestUtil.Write(@out, 0, len1);
                @out.Close();
            }
            //b. Reopen file in "append" mode. Append half block of data.
            FSDataOutputStream out_1 = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.
                                                                                  Append, CreateFlag.NewBlock), 4096, null) : fs.Append(p);
            int len2 = (int)BlockSize / 2;

            AppendTestUtil.Write(out_1, len1, len2);
            out_1.Hflush();
            //c. Rename file to file.new.
            Path pnew = new Path(p + ".new");

            NUnit.Framework.Assert.IsTrue(fs.Rename(p, pnew));
            //d. Close file handle that was opened in (b).
            out_1.Close();
            //check block sizes
            long          len           = fs.GetFileStatus(pnew).GetLen();
            LocatedBlocks locatedblocks = fs.dfs.GetNamenode().GetBlockLocations(pnew.ToString
                                                                                     (), 0L, len);
            int numblock = locatedblocks.LocatedBlockCount();

            for (int i = 0; i < numblock; i++)
            {
                LocatedBlock  lb   = locatedblocks.Get(i);
                ExtendedBlock blk  = lb.GetBlock();
                long          size = lb.GetBlockSize();
                if (i < numblock - 1)
                {
                    NUnit.Framework.Assert.AreEqual(BlockSize, size);
                }
                foreach (DatanodeInfo datanodeinfo in lb.GetLocations())
                {
                    DataNode dn       = cluster.GetDataNode(datanodeinfo.GetIpcPort());
                    Block    metainfo = DataNodeTestUtils.GetFSDataset(dn).GetStoredBlock(blk.GetBlockPoolId
                                                                                              (), blk.GetBlockId());
                    NUnit.Framework.Assert.AreEqual(size, metainfo.GetNumBytes());
                }
            }
        }
예제 #26
0
        /// <summary>TC7: Corrupted replicas are present.</summary>
        /// <exception cref="System.IO.IOException">an exception might be thrown</exception>
        /// <exception cref="System.Exception"/>
        private void TestTC7(bool appendToNewBlock)
        {
            short repl = 2;
            Path  p    = new Path("/TC7/foo" + (appendToNewBlock ? "0" : "1"));

            System.Console.Out.WriteLine("p=" + p);
            //a. Create file with replication factor of 2. Write half block of data. Close file.
            int len1 = (int)(BlockSize / 2);

            {
                FSDataOutputStream @out = fs.Create(p, false, buffersize, repl, BlockSize);
                AppendTestUtil.Write(@out, 0, len1);
                @out.Close();
            }
            DFSTestUtil.WaitReplication(fs, p, repl);
            //b. Log into one datanode that has one replica of this block.
            //   Find the block file on this datanode and truncate it to zero size.
            LocatedBlocks locatedblocks = fs.dfs.GetNamenode().GetBlockLocations(p.ToString()
                                                                                 , 0L, len1);

            NUnit.Framework.Assert.AreEqual(1, locatedblocks.LocatedBlockCount());
            LocatedBlock  lb  = locatedblocks.Get(0);
            ExtendedBlock blk = lb.GetBlock();

            NUnit.Framework.Assert.AreEqual(len1, lb.GetBlockSize());
            DatanodeInfo[] datanodeinfos = lb.GetLocations();
            NUnit.Framework.Assert.AreEqual(repl, datanodeinfos.Length);
            DataNode dn = cluster.GetDataNode(datanodeinfos[0].GetIpcPort());
            FilePath f  = DataNodeTestUtils.GetBlockFile(dn, blk.GetBlockPoolId(), blk.GetLocalBlock
                                                             ());
            RandomAccessFile raf = new RandomAccessFile(f, "rw");

            AppendTestUtil.Log.Info("dn=" + dn + ", blk=" + blk + " (length=" + blk.GetNumBytes
                                        () + ")");
            NUnit.Framework.Assert.AreEqual(len1, raf.Length());
            raf.SetLength(0);
            raf.Close();
            //c. Open file in "append mode".  Append a new block worth of data. Close file.
            int len2 = (int)BlockSize;

            {
                FSDataOutputStream @out = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.Append
                                                                                     , CreateFlag.NewBlock), 4096, null) : fs.Append(p);
                AppendTestUtil.Write(@out, len1, len2);
                @out.Close();
            }
            //d. Reopen file and read two blocks worth of data.
            AppendTestUtil.Check(fs, p, len1 + len2);
        }
예제 #27
0
        /// <summary>Convert LocatedBlocks to a Json string.</summary>
        /// <exception cref="System.IO.IOException"/>
        public static string ToJsonString(LocatedBlocks locatedblocks)
        {
            if (locatedblocks == null)
            {
                return(null);
            }
            IDictionary <string, object> m = new SortedDictionary <string, object>();

            m["fileLength"]          = locatedblocks.GetFileLength();
            m["isUnderConstruction"] = locatedblocks.IsUnderConstruction();
            m["locatedBlocks"]       = ToJsonArray(locatedblocks.GetLocatedBlocks());
            m["lastLocatedBlock"]    = ToJsonMap(locatedblocks.GetLastLocatedBlock());
            m["isLastBlockComplete"] = locatedblocks.IsLastBlockComplete();
            return(ToJsonString(typeof(LocatedBlocks), m));
        }
예제 #28
0
        public virtual void TestAppend2AfterSoftLimit()
        {
            Configuration conf = new HdfsConfiguration();

            conf.SetInt(DFSConfigKeys.DfsReplicationKey, 1);
            //Set small soft-limit for lease
            long           softLimit = 1L;
            long           hardLimit = 9999999L;
            MiniDFSCluster cluster   = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();

            cluster.SetLeasePeriod(softLimit, hardLimit);
            cluster.WaitActive();
            DistributedFileSystem fs  = cluster.GetFileSystem();
            DistributedFileSystem fs2 = new DistributedFileSystem();

            fs2.Initialize(fs.GetUri(), conf);
            Path testPath = new Path("/testAppendAfterSoftLimit");

            byte[] fileContents = AppendTestUtil.InitBuffer(32);
            // create a new file without closing
            FSDataOutputStream @out = fs.Create(testPath);

            @out.Write(fileContents);
            //Wait for > soft-limit
            Sharpen.Thread.Sleep(250);
            try
            {
                FSDataOutputStream appendStream2 = fs2.Append(testPath, EnumSet.Of(CreateFlag.Append
                                                                                   , CreateFlag.NewBlock), 4096, null);
                appendStream2.Write(fileContents);
                appendStream2.Close();
                NUnit.Framework.Assert.AreEqual(fileContents.Length, fs.GetFileStatus(testPath).GetLen
                                                    ());
                // make sure we now have 1 block since the first writer was revoked
                LocatedBlocks blks = fs.GetClient().GetLocatedBlocks(testPath.ToString(), 0L);
                NUnit.Framework.Assert.AreEqual(1, blks.GetLocatedBlocks().Count);
                foreach (LocatedBlock blk in blks.GetLocatedBlocks())
                {
                    NUnit.Framework.Assert.AreEqual(fileContents.Length, blk.GetBlockSize());
                }
            }
            finally
            {
                fs.Close();
                fs2.Close();
                cluster.Shutdown();
            }
        }
예제 #29
0
        /// <exception cref="System.IO.IOException"/>
        protected internal LocatedBlocks EnsureFileReplicasOnStorageType(Path path, StorageType
                                                                         storageType)
        {
            // Ensure that returned block locations returned are correct!
            Log.Info("Ensure path: " + path + " is on StorageType: " + storageType);
            Assert.AssertThat(fs.Exists(path), IS.Is(true));
            long          fileLength    = client.GetFileInfo(path.ToString()).GetLen();
            LocatedBlocks locatedBlocks = client.GetLocatedBlocks(path.ToString(), 0, fileLength
                                                                  );

            foreach (LocatedBlock locatedBlock in locatedBlocks.GetLocatedBlocks())
            {
                Assert.AssertThat(locatedBlock.GetStorageTypes()[0], IS.Is(storageType));
            }
            return(locatedBlocks);
        }
예제 #30
0
        public virtual void TestBlockRecoveryWithLessMetafile()
        {
            Configuration conf = new Configuration();

            conf.Set(DFSConfigKeys.DfsBlockLocalPathAccessUserKey, UserGroupInformation.GetCurrentUser
                         ().GetShortUserName());
            cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build();
            Path file = new Path("/testRecoveryFile");
            DistributedFileSystem dfs  = cluster.GetFileSystem();
            FSDataOutputStream    @out = dfs.Create(file);
            int count = 0;

            while (count < 2 * 1024 * 1024)
            {
                @out.WriteBytes("Data");
                count += 4;
            }
            @out.Hsync();
            // abort the original stream
            ((DFSOutputStream)@out.GetWrappedStream()).Abort();
            LocatedBlocks locations = cluster.GetNameNodeRpc().GetBlockLocations(file.ToString
                                                                                     (), 0, count);
            ExtendedBlock      block         = locations.Get(0).GetBlock();
            DataNode           dn            = cluster.GetDataNodes()[0];
            BlockLocalPathInfo localPathInfo = dn.GetBlockLocalPathInfo(block, null);
            FilePath           metafile      = new FilePath(localPathInfo.GetMetaPath());

            NUnit.Framework.Assert.IsTrue(metafile.Exists());
            // reduce the block meta file size
            RandomAccessFile raf = new RandomAccessFile(metafile, "rw");

            raf.SetLength(metafile.Length() - 20);
            raf.Close();
            // restart DN to make replica to RWR
            MiniDFSCluster.DataNodeProperties dnProp = cluster.StopDataNode(0);
            cluster.RestartDataNode(dnProp, true);
            // try to recover the lease
            DistributedFileSystem newdfs = (DistributedFileSystem)FileSystem.NewInstance(cluster
                                                                                         .GetConfiguration(0));

            count = 0;
            while (++count < 10 && !newdfs.RecoverLease(file))
            {
                Sharpen.Thread.Sleep(1000);
            }
            NUnit.Framework.Assert.IsTrue("File should be closed", newdfs.RecoverLease(file));
        }