コード例 #1
0
 /// <exception cref="System.Exception"/>
 private void WaitForReplicaAnchorStatus(ShortCircuitCache cache, ExtendedBlock block
                                         , bool expectedIsAnchorable, bool expectedIsAnchored, int expectedOutstandingMmaps
                                         )
 {
     GenericTestUtils.WaitFor(new _Supplier_683(cache, expectedOutstandingMmaps, block
                                                , expectedIsAnchorable, expectedIsAnchored), 10, 60000);
 }
コード例 #2
0
 private void UpdateDeletedBlockId(ExtendedBlock block)
 {
     lock (this)
     {
         ICollection <long> blockIds = deletedBlockIds[block.GetBlockPoolId()];
         if (blockIds == null)
         {
             blockIds = new HashSet <long>();
             deletedBlockIds[block.GetBlockPoolId()] = blockIds;
         }
         blockIds.AddItem(block.GetBlockId());
         numDeletedBlocks++;
         if (numDeletedBlocks == MaxDeletedBlocks)
         {
             foreach (KeyValuePair <string, ICollection <long> > e in deletedBlockIds)
             {
                 string             bpid = e.Key;
                 ICollection <long> bs   = e.Value;
                 fsdatasetImpl.RemoveDeletedBlocks(bpid, bs);
                 bs.Clear();
             }
             numDeletedBlocks = 0;
         }
     }
 }
コード例 #3
0
 /// <exception cref="System.IO.IOException"/>
 private BlockReaderLocalLegacy(DFSClient.Conf conf, string hdfsfile, ExtendedBlock
                                block, Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token, long
                                startOffset, long length, BlockLocalPathInfo pathinfo, FileInputStream dataIn)
     : this(conf, hdfsfile, block, token, startOffset, length, pathinfo, DataChecksum.
            NewDataChecksum(DataChecksum.Type.Null, 4), false, dataIn, startOffset, null)
 {
 }
コード例 #4
0
        public virtual void TestUnderReplicatedUsesNewRacks()
        {
            Configuration conf = GetConf();
            short         ReplicationFactor = 3;
            Path          filePath          = new Path("/testFile");

            // All datanodes are on the same rack
            string[]       racks   = new string[] { "/rack1", "/rack1", "/rack1", "/rack1", "/rack1" };
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(racks.Length
                                                                                   ).Racks(racks).Build();
            FSNamesystem ns = cluster.GetNameNode().GetNamesystem();

            try
            {
                // Create a file with one block
                FileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, filePath, 1L, ReplicationFactor, 1L);
                ExtendedBlock b = DFSTestUtil.GetFirstBlock(fs, filePath);
                DFSTestUtil.WaitForReplication(cluster, b, 1, ReplicationFactor, 0);
                // Add new datanodes on a different rack and increase the
                // replication factor so the block is underreplicated and make
                // sure at least one of the hosts on the new rack is used.
                string[] newRacks = new string[] { "/rack2", "/rack2" };
                cluster.StartDataNodes(conf, 2, true, null, newRacks);
                ReplicationFactor = 5;
                NameNodeAdapter.SetReplication(ns, "/testFile", ReplicationFactor);
                DFSTestUtil.WaitForReplication(cluster, b, 2, ReplicationFactor, 0);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
コード例 #5
0
        /*
         * Replace block
         */
        /// <exception cref="System.IO.IOException"/>
        /// <exception cref="System.Net.Sockets.SocketException"/>
        private bool ReplaceBlock(ExtendedBlock block, DatanodeInfo source, DatanodeInfo
                                  sourceProxy, DatanodeInfo destination, StorageType targetStorageType)
        {
            Socket sock = new Socket();

            try
            {
                sock.Connect(NetUtils.CreateSocketAddr(destination.GetXferAddr()), HdfsServerConstants
                             .ReadTimeout);
                sock.SetKeepAlive(true);
                // sendRequest
                DataOutputStream @out = new DataOutputStream(sock.GetOutputStream());
                new Sender(@out).ReplaceBlock(block, targetStorageType, BlockTokenSecretManager.DummyToken
                                              , source.GetDatanodeUuid(), sourceProxy);
                @out.Flush();
                // receiveResponse
                DataInputStream reply = new DataInputStream(sock.GetInputStream());
                DataTransferProtos.BlockOpResponseProto proto = DataTransferProtos.BlockOpResponseProto
                                                                .ParseDelimitedFrom(reply);
                while (proto.GetStatus() == DataTransferProtos.Status.InProgress)
                {
                    proto = DataTransferProtos.BlockOpResponseProto.ParseDelimitedFrom(reply);
                }
                return(proto.GetStatus() == DataTransferProtos.Status.Success);
            }
            finally
            {
                sock.Close();
            }
        }
コード例 #6
0
        /// <summary>
        /// Generate testing environment and return a collection of blocks
        /// on which to run the tests.
        /// </summary>
        /// <param name="bpid">Block pool ID to generate blocks for</param>
        /// <param name="dataSet">Namespace in which to insert blocks</param>
        /// <returns>Contrived blocks for further testing.</returns>
        /// <exception cref="System.IO.IOException"/>
        private ExtendedBlock[] Setup(string bpid, FsDatasetImpl dataSet)
        {
            // setup replicas map
            ExtendedBlock[] blocks = new ExtendedBlock[] { new ExtendedBlock(bpid, 1, 1, 2001
                                                                             ), new ExtendedBlock(bpid, 2, 1, 2002), new ExtendedBlock(bpid, 3, 1, 2003), new
                                                           ExtendedBlock(bpid, 4, 1, 2004), new ExtendedBlock(bpid, 5, 1, 2005), new ExtendedBlock
                                                               (bpid, 6, 1, 2006) };
            ReplicaMap   replicasMap = dataSet.volumeMap;
            FsVolumeImpl vol         = (FsVolumeImpl)dataSet.volumes.GetNextVolume(StorageType.Default
                                                                                   , 0).GetVolume();
            ReplicaInfo replicaInfo = new FinalizedReplica(blocks[Finalized].GetLocalBlock(),
                                                           vol, vol.GetCurrentDir().GetParentFile());

            replicasMap.Add(bpid, replicaInfo);
            replicaInfo.GetBlockFile().CreateNewFile();
            replicaInfo.GetMetaFile().CreateNewFile();
            replicasMap.Add(bpid, new ReplicaInPipeline(blocks[Temporary].GetBlockId(), blocks
                                                        [Temporary].GetGenerationStamp(), vol, vol.CreateTmpFile(bpid, blocks[Temporary]
                                                                                                                 .GetLocalBlock()).GetParentFile(), 0));
            replicaInfo = new ReplicaBeingWritten(blocks[Rbw].GetLocalBlock(), vol, vol.CreateRbwFile
                                                      (bpid, blocks[Rbw].GetLocalBlock()).GetParentFile(), null);
            replicasMap.Add(bpid, replicaInfo);
            replicaInfo.GetBlockFile().CreateNewFile();
            replicaInfo.GetMetaFile().CreateNewFile();
            replicasMap.Add(bpid, new ReplicaWaitingToBeRecovered(blocks[Rwr].GetLocalBlock()
                                                                  , vol, vol.CreateRbwFile(bpid, blocks[Rwr].GetLocalBlock()).GetParentFile()));
            replicasMap.Add(bpid, new ReplicaUnderRecovery(new FinalizedReplica(blocks[Rur].GetLocalBlock
                                                                                    (), vol, vol.GetCurrentDir().GetParentFile()), 2007));
            return(blocks);
        }
コード例 #7
0
 public ReportBadBlockAction(ExtendedBlock block, string storageUuid, StorageType
                             storageType)
 {
     this.block       = block;
     this.storageUuid = storageUuid;
     this.storageType = storageType;
 }
コード例 #8
0
ファイル: TestDFSUtil.cs プロジェクト: orf53975/hadoop.net
        public virtual void TestLocatedBlocks2Locations()
        {
            DatanodeInfo d = DFSTestUtil.GetLocalDatanodeInfo();

            DatanodeInfo[] ds = new DatanodeInfo[1];
            ds[0] = d;
            // ok
            ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
            LocatedBlock  l1 = new LocatedBlock(b1, ds, 0, false);
            // corrupt
            ExtendedBlock        b2  = new ExtendedBlock("bpid", 2, 1, 1);
            LocatedBlock         l2  = new LocatedBlock(b2, ds, 0, true);
            IList <LocatedBlock> ls  = Arrays.AsList(l1, l2);
            LocatedBlocks        lbs = new LocatedBlocks(10, false, ls, l2, true, null);

            BlockLocation[] bs = DFSUtil.LocatedBlocks2Locations(lbs);
            NUnit.Framework.Assert.IsTrue("expected 2 blocks but got " + bs.Length, bs.Length
                                          == 2);
            int corruptCount = 0;

            foreach (BlockLocation b in bs)
            {
                if (b.IsCorrupt())
                {
                    corruptCount++;
                }
            }
            NUnit.Framework.Assert.IsTrue("expected 1 corrupt files but got " + corruptCount,
                                          corruptCount == 1);
            // test an empty location
            bs = DFSUtil.LocatedBlocks2Locations(new LocatedBlocks());
            NUnit.Framework.Assert.AreEqual(0, bs.Length);
        }
コード例 #9
0
        public virtual void TestGetMetaData()
        {
            SimulatedFSDataset fsdataset = GetSimulatedFSDataset();
            ExtendedBlock      b         = new ExtendedBlock(bpid, 1, 5, 0);

            try
            {
                NUnit.Framework.Assert.IsTrue(fsdataset.GetMetaDataInputStream(b) == null);
                NUnit.Framework.Assert.IsTrue("Expected an IO exception", false);
            }
            catch (IOException)
            {
            }
            // ok - as expected
            AddSomeBlocks(fsdataset);
            // Only need to add one but ....
            b = new ExtendedBlock(bpid, 1, 0, 0);
            InputStream     metaInput     = fsdataset.GetMetaDataInputStream(b);
            DataInputStream metaDataInput = new DataInputStream(metaInput);
            short           version       = metaDataInput.ReadShort();

            NUnit.Framework.Assert.AreEqual(BlockMetadataHeader.Version, version);
            DataChecksum checksum = DataChecksum.NewDataChecksum(metaDataInput);

            NUnit.Framework.Assert.AreEqual(DataChecksum.Type.Null, checksum.GetChecksumType(
                                                ));
            NUnit.Framework.Assert.AreEqual(0, checksum.GetChecksumSize());
        }
コード例 #10
0
 /// <exception cref="System.IO.IOException"/>
 private void CorruptBlock(MiniDFSCluster cluster, FileSystem fs, Path fileName, int
                           dnIndex, ExtendedBlock block)
 {
     // corrupt the block on datanode dnIndex
     // the indexes change once the nodes are restarted.
     // But the datadirectory will not change
     NUnit.Framework.Assert.IsTrue(cluster.CorruptReplica(dnIndex, block));
     MiniDFSCluster.DataNodeProperties dnProps = cluster.StopDataNode(0);
     // Each datanode has multiple data dirs, check each
     for (int dirIndex = 0; dirIndex < 2; dirIndex++)
     {
         string   bpid        = cluster.GetNamesystem().GetBlockPoolId();
         FilePath storageDir  = cluster.GetStorageDir(dnIndex, dirIndex);
         FilePath dataDir     = MiniDFSCluster.GetFinalizedDir(storageDir, bpid);
         FilePath scanLogFile = new FilePath(dataDir, "dncp_block_verification.log.curr");
         if (scanLogFile.Exists())
         {
             // wait for one minute for deletion to succeed;
             for (int i = 0; !scanLogFile.Delete(); i++)
             {
                 NUnit.Framework.Assert.IsTrue("Could not delete log file in one minute", i < 60);
                 try
                 {
                     Sharpen.Thread.Sleep(1000);
                 }
                 catch (Exception)
                 {
                 }
             }
         }
     }
     // restart the detained so the corrupt replica will be detected
     cluster.RestartDataNode(dnProps);
 }
コード例 #11
0
        public virtual void TestSufficientlySingleReplBlockUsesNewRack()
        {
            Configuration conf = GetConf();
            short         ReplicationFactor = 1;
            Path          filePath          = new Path("/testFile");

            string[]       racks   = new string[] { "/rack1", "/rack1", "/rack1", "/rack2" };
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(racks.Length
                                                                                   ).Racks(racks).Build();
            FSNamesystem ns = cluster.GetNameNode().GetNamesystem();

            try
            {
                // Create a file with one block with a replication factor of 1
                FileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, filePath, 1L, ReplicationFactor, 1L);
                ExtendedBlock b = DFSTestUtil.GetFirstBlock(fs, filePath);
                DFSTestUtil.WaitForReplication(cluster, b, 1, ReplicationFactor, 0);
                ReplicationFactor = 2;
                NameNodeAdapter.SetReplication(ns, "/testFile", ReplicationFactor);
                DFSTestUtil.WaitForReplication(cluster, b, 2, ReplicationFactor, 0);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
コード例 #12
0
        // ok - as expected
        public virtual void CheckInvalidBlock(ExtendedBlock b)
        {
            SimulatedFSDataset fsdataset = GetSimulatedFSDataset();

            NUnit.Framework.Assert.IsFalse(fsdataset.IsValidBlock(b));
            try
            {
                fsdataset.GetLength(b);
                NUnit.Framework.Assert.IsTrue("Expected an IO exception", false);
            }
            catch (IOException)
            {
            }
            // ok - as expected
            try
            {
                fsdataset.GetBlockInputStream(b);
                NUnit.Framework.Assert.IsTrue("Expected an IO exception", false);
            }
            catch (IOException)
            {
            }
            // ok - as expected
            try
            {
                fsdataset.FinalizeBlock(b);
                NUnit.Framework.Assert.IsTrue("Expected an IO exception", false);
            }
            catch (IOException)
            {
            }
        }
コード例 #13
0
        public virtual void TestSufficientlyReplBlocksUsesNewRack()
        {
            Configuration conf = GetConf();
            short         ReplicationFactor = 3;
            Path          filePath          = new Path("/testFile");

            // All datanodes are on the same rack
            string[]       racks   = new string[] { "/rack1", "/rack1", "/rack1" };
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(racks.Length
                                                                                   ).Racks(racks).Build();

            try
            {
                // Create a file with one block with a replication factor of 3
                FileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, filePath, 1L, ReplicationFactor, 1L);
                ExtendedBlock b = DFSTestUtil.GetFirstBlock(fs, filePath);
                DFSTestUtil.WaitForReplication(cluster, b, 1, ReplicationFactor, 0);
                // Add a new datanode on a different rack
                string[] newRacks = new string[] { "/rack2" };
                cluster.StartDataNodes(conf, 1, true, null, newRacks);
                cluster.WaitActive();
                DFSTestUtil.WaitForReplication(cluster, b, 2, ReplicationFactor, 0);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
コード例 #14
0
        public virtual void TestReduceReplFactorRespectsRackPolicy()
        {
            Configuration conf = GetConf();
            short         ReplicationFactor = 3;
            Path          filePath          = new Path("/testFile");

            string[]       racks   = new string[] { "/rack1", "/rack1", "/rack2", "/rack2" };
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(racks.Length
                                                                                   ).Racks(racks).Build();
            FSNamesystem ns = cluster.GetNameNode().GetNamesystem();

            try
            {
                // Create a file with one block
                FileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, filePath, 1L, ReplicationFactor, 1L);
                ExtendedBlock b = DFSTestUtil.GetFirstBlock(fs, filePath);
                DFSTestUtil.WaitForReplication(cluster, b, 2, ReplicationFactor, 0);
                // Decrease the replication factor, make sure the deleted replica
                // was not the one that lived on the rack with only one replica,
                // ie we should still have 2 racks after reducing the repl factor.
                ReplicationFactor = 2;
                NameNodeAdapter.SetReplication(ns, "/testFile", ReplicationFactor);
                DFSTestUtil.WaitForReplication(cluster, b, 2, ReplicationFactor, 0);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
コード例 #15
0
        public virtual void TestArrayOutOfBoundsException()
        {
            MiniDFSCluster cluster = null;

            try
            {
                Configuration conf = new HdfsConfiguration();
                cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
                cluster.WaitActive();
                FileSystem fs       = cluster.GetFileSystem();
                Path       FilePath = new Path("/tmp.txt");
                long       FileLen  = 1L;
                DFSTestUtil.CreateFile(fs, FilePath, FileLen, (short)2, 1L);
                // get the block
                string   bpid       = cluster.GetNamesystem().GetBlockPoolId();
                FilePath storageDir = cluster.GetInstanceStorageDir(0, 0);
                FilePath dataDir    = MiniDFSCluster.GetFinalizedDir(storageDir, bpid);
                NUnit.Framework.Assert.IsTrue("Data directory does not exist", dataDir.Exists());
                ExtendedBlock blk = GetBlock(bpid, dataDir);
                if (blk == null)
                {
                    storageDir = cluster.GetInstanceStorageDir(0, 1);
                    dataDir    = MiniDFSCluster.GetFinalizedDir(storageDir, bpid);
                    blk        = GetBlock(bpid, dataDir);
                }
                NUnit.Framework.Assert.IsFalse("Data directory does not contain any blocks or there was an "
                                               + "IO error", blk == null);
                // start a third datanode
                cluster.StartDataNodes(conf, 1, true, null, null);
                AList <DataNode> datanodes = cluster.GetDataNodes();
                NUnit.Framework.Assert.AreEqual(datanodes.Count, 3);
                DataNode dataNode = datanodes[2];
                // report corrupted block by the third datanode
                DatanodeRegistration dnR = DataNodeTestUtils.GetDNRegistrationForBP(dataNode, blk
                                                                                    .GetBlockPoolId());
                FSNamesystem ns = cluster.GetNamesystem();
                ns.WriteLock();
                try
                {
                    cluster.GetNamesystem().GetBlockManager().FindAndMarkBlockAsCorrupt(blk, new DatanodeInfo
                                                                                            (dnR), "TEST", "STORAGE_ID");
                }
                finally
                {
                    ns.WriteUnlock();
                }
                // open the file
                fs.Open(FilePath);
                //clean up
                fs.Delete(FilePath, false);
            }
            finally
            {
                if (cluster != null)
                {
                    cluster.Shutdown();
                }
            }
        }
コード例 #16
0
        public virtual void TestBlockTokenRpcLeak()
        {
            Configuration conf = new Configuration();

            conf.Set(CommonConfigurationKeysPublic.HadoopSecurityAuthentication, "kerberos");
            UserGroupInformation.SetConfiguration(conf);
            Assume.AssumeTrue(FdDir.Exists());
            BlockTokenSecretManager sm = new BlockTokenSecretManager(blockKeyUpdateInterval,
                                                                     blockTokenLifetime, 0, "fake-pool", null);

            Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token = sm.GenerateToken
                                                                                      (block3, EnumSet.AllOf <BlockTokenSecretManager.AccessMode>());
            Server server = CreateMockDatanode(sm, token, conf);

            server.Start();
            IPEndPoint    addr     = NetUtils.GetConnectAddress(server);
            DatanodeID    fakeDnId = DFSTestUtil.GetLocalDatanodeID(addr.Port);
            ExtendedBlock b        = new ExtendedBlock("fake-pool", new Org.Apache.Hadoop.Hdfs.Protocol.Block
                                                           (12345L));
            LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);

            fakeBlock.SetBlockToken(token);
            // Create another RPC proxy with the same configuration - this will never
            // attempt to connect anywhere -- but it causes the refcount on the
            // RPC "Client" object to stay above 0 such that RPC.stopProxy doesn't
            // actually close the TCP connections to the real target DN.
            ClientDatanodeProtocol proxyToNoWhere = RPC.GetProxy <ClientDatanodeProtocol>(ClientDatanodeProtocol
                                                                                          .versionID, new IPEndPoint("1.1.1.1", 1), UserGroupInformation.CreateRemoteUser(
                                                                                              "junk"), conf, NetUtils.GetDefaultSocketFactory(conf));
            ClientDatanodeProtocol proxy = null;
            int fdsAtStart = CountOpenFileDescriptors();

            try
            {
                long endTime = Time.Now() + 3000;
                while (Time.Now() < endTime)
                {
                    proxy = DFSUtil.CreateClientDatanodeProtocolProxy(fakeDnId, conf, 1000, false, fakeBlock
                                                                      );
                    NUnit.Framework.Assert.AreEqual(block3.GetBlockId(), proxy.GetReplicaVisibleLength
                                                        (block3));
                    if (proxy != null)
                    {
                        RPC.StopProxy(proxy);
                    }
                    Log.Info("Num open fds:" + CountOpenFileDescriptors());
                }
                int fdsAtEnd = CountOpenFileDescriptors();
                if (fdsAtEnd - fdsAtStart > 50)
                {
                    NUnit.Framework.Assert.Fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!");
                }
            }
            finally
            {
                server.Stop();
            }
            RPC.StopProxy(proxyToNoWhere);
        }
コード例 #17
0
        /// <exception cref="System.IO.IOException"/>
        public static void CheckMetaInfo(ExtendedBlock b, DataNode dn)
        {
            Block metainfo = DataNodeTestUtils.GetFSDataset(dn).GetStoredBlock(b.GetBlockPoolId
                                                                                   (), b.GetBlockId());

            NUnit.Framework.Assert.AreEqual(b.GetBlockId(), metainfo.GetBlockId());
            NUnit.Framework.Assert.AreEqual(b.GetNumBytes(), metainfo.GetNumBytes());
        }
コード例 #18
0
        /// <summary>Generate an block token for current user</summary>
        /// <exception cref="System.IO.IOException"/>
        public virtual Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> GenerateToken
            (ExtendedBlock block, EnumSet <BlockTokenSecretManager.AccessMode> modes)
        {
            UserGroupInformation ugi = UserGroupInformation.GetCurrentUser();
            string userID            = (ugi == null ? null : ugi.GetShortUserName());

            return(GenerateToken(userID, block, modes));
        }
コード例 #19
0
        /// <exception cref="System.IO.IOException"/>
        public static bool UnlinkBlock <_T0>(FsDatasetSpi <_T0> fsd, ExtendedBlock block, int
                                             numLinks)
            where _T0 : FsVolumeSpi
        {
            ReplicaInfo info = ((FsDatasetImpl)fsd).GetReplicaInfo(block);

            return(info.UnlinkBlock(numLinks));
        }
コード例 #20
0
ファイル: Sender.cs プロジェクト: orf53975/hadoop.net
 /// <exception cref="System.IO.IOException"/>
 public override void BlockChecksum(ExtendedBlock blk, Org.Apache.Hadoop.Security.Token.Token
                                    <BlockTokenIdentifier> blockToken)
 {
     DataTransferProtos.OpBlockChecksumProto proto = ((DataTransferProtos.OpBlockChecksumProto
                                                       )DataTransferProtos.OpBlockChecksumProto.NewBuilder().SetHeader(DataTransferProtoUtil
                                                                                                                       .BuildBaseHeader(blk, blockToken)).Build());
     Send(@out, OP.BlockChecksum, proto);
 }
コード例 #21
0
 /// <exception cref="System.IO.IOException"/>
 internal virtual void WriteBlock(ExtendedBlock block, BlockConstructionStage stage
                                  , long newGS, DataChecksum checksum)
 {
     sender.WriteBlock(block, StorageType.Default, BlockTokenSecretManager.DummyToken,
                       "cl", new DatanodeInfo[1], new StorageType[1], null, stage, 0, block.GetNumBytes
                           (), block.GetNumBytes(), newGS, checksum, CachingStrategy.NewDefaultStrategy(),
                       false, false, null);
 }
コード例 #22
0
 public _Supplier_594(TestDNFencing _enclosing, MiniDFSCluster cluster, ExtendedBlock
                      block, int waitFor)
 {
     this._enclosing = _enclosing;
     this.cluster    = cluster;
     this.block      = block;
     this.waitFor    = waitFor;
 }
コード例 #23
0
 public _CacheVisitor_687(int expectedOutstandingMmaps, ExtendedBlock block, bool
                          expectedIsAnchorable, bool expectedIsAnchored, MutableBoolean result)
 {
     this.expectedOutstandingMmaps = expectedOutstandingMmaps;
     this.block = block;
     this.expectedIsAnchorable = expectedIsAnchorable;
     this.expectedIsAnchored   = expectedIsAnchored;
     this.result = result;
 }
コード例 #24
0
ファイル: Sender.cs プロジェクト: orf53975/hadoop.net
 /// <exception cref="System.IO.IOException"/>
 public override void ReplaceBlock(ExtendedBlock blk, StorageType storageType, Org.Apache.Hadoop.Security.Token.Token
                                   <BlockTokenIdentifier> blockToken, string delHint, DatanodeInfo source)
 {
     DataTransferProtos.OpReplaceBlockProto proto = ((DataTransferProtos.OpReplaceBlockProto
                                                      )DataTransferProtos.OpReplaceBlockProto.NewBuilder().SetHeader(DataTransferProtoUtil
                                                                                                                     .BuildBaseHeader(blk, blockToken)).SetStorageType(PBHelper.ConvertStorageType(storageType
                                                                                                                                                                                                   )).SetDelHint(delHint).SetSource(PBHelper.ConvertDatanodeInfo(source)).Build());
     Send(@out, OP.ReplaceBlock, proto);
 }
コード例 #25
0
        /// <summary>Scan a block.</summary>
        /// <param name="cblock">The block to scan.</param>
        /// <param name="bytesPerSec">The bytes per second to scan at.</param>
        /// <returns>
        /// The length of the block that was scanned, or
        /// -1 if the block could not be scanned.
        /// </returns>
        private long ScanBlock(ExtendedBlock cblock, long bytesPerSec)
        {
            // 'cblock' has a valid blockId and block pool id, but we don't yet know the
            // genstamp the block is supposed to have.  Ask the FsDatasetImpl for this
            // information.
            ExtendedBlock block = null;

            try
            {
                Block b = volume.GetDataset().GetStoredBlock(cblock.GetBlockPoolId(), cblock.GetBlockId
                                                                 ());
                if (b == null)
                {
                    Log.Info("FileNotFound while finding block {} on volume {}", cblock, volume.GetBasePath
                                 ());
                }
                else
                {
                    block = new ExtendedBlock(cblock.GetBlockPoolId(), b);
                }
            }
            catch (FileNotFoundException)
            {
                Log.Info("FileNotFoundException while finding block {} on volume {}", cblock, volume
                         .GetBasePath());
            }
            catch (IOException)
            {
                Log.Warn("I/O error while finding block {} on volume {}", cblock, volume.GetBasePath
                             ());
            }
            if (block == null)
            {
                return(-1);
            }
            // block not found.
            BlockSender blockSender = null;

            try
            {
                blockSender = new BlockSender(block, 0, -1, false, true, true, datanode, null, CachingStrategy
                                              .NewDropBehind());
                throttler.SetBandwidth(bytesPerSec);
                long bytesRead = blockSender.SendBlock(nullStream, null, throttler);
                resultHandler.Handle(block, null);
                return(bytesRead);
            }
            catch (IOException e)
            {
                resultHandler.Handle(block, e);
            }
            finally
            {
                IOUtils.Cleanup(null, blockSender);
            }
            return(-1);
        }
コード例 #26
0
        public virtual void TestNodeDecomissionWithOverreplicationRespectsRackPolicy()
        {
            Configuration conf = GetConf();
            short         ReplicationFactor = 5;
            Path          filePath          = new Path("/testFile");
            // Configure an excludes file
            FileSystem localFileSys = FileSystem.GetLocal(conf);
            Path       workingDir   = localFileSys.GetWorkingDirectory();
            Path       dir          = new Path(workingDir, "build/test/data/temp/decommission");
            Path       excludeFile  = new Path(dir, "exclude");
            Path       includeFile  = new Path(dir, "include");

            NUnit.Framework.Assert.IsTrue(localFileSys.Mkdirs(dir));
            DFSTestUtil.WriteFile(localFileSys, excludeFile, string.Empty);
            DFSTestUtil.WriteFile(localFileSys, includeFile, string.Empty);
            conf.Set(DFSConfigKeys.DfsHosts, includeFile.ToUri().GetPath());
            conf.Set(DFSConfigKeys.DfsHostsExclude, excludeFile.ToUri().GetPath());
            // All hosts are on two racks, only one host on /rack2
            string[]       racks   = new string[] { "/rack1", "/rack2", "/rack1", "/rack1", "/rack1" };
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(racks.Length
                                                                                   ).Racks(racks).Build();
            FSNamesystem ns = cluster.GetNameNode().GetNamesystem();

            try
            {
                FileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, filePath, 1L, ReplicationFactor, 1L);
                ExtendedBlock b = DFSTestUtil.GetFirstBlock(fs, filePath);
                DFSTestUtil.WaitForReplication(cluster, b, 2, ReplicationFactor, 0);
                // Lower the replication factor so the blocks are over replicated
                ReplicationFactor = 2;
                fs.SetReplication(filePath, ReplicationFactor);
                // Decommission one of the hosts with the block that is not on
                // the lone host on rack2 (if we decomission that host it would
                // be impossible to respect the rack policy).
                BlockLocation[] locs = fs.GetFileBlockLocations(fs.GetFileStatus(filePath), 0, long.MaxValue
                                                                );
                foreach (string top in locs[0].GetTopologyPaths())
                {
                    if (!top.StartsWith("/rack2"))
                    {
                        string name = Sharpen.Runtime.Substring(top, "/rack1".Length + 1);
                        DFSTestUtil.WriteFile(localFileSys, excludeFile, name);
                        ns.GetBlockManager().GetDatanodeManager().RefreshNodes(conf);
                        DFSTestUtil.WaitForDecommission(fs, name);
                        break;
                    }
                }
                // Check the block still has sufficient # replicas across racks,
                // ie we didn't remove the replica on the host on /rack1.
                DFSTestUtil.WaitForReplication(cluster, b, 2, ReplicationFactor, 0);
            }
            finally
            {
                cluster.Shutdown();
            }
        }
コード例 #27
0
 /// <summary>
 /// Override createRbw to verify that the block length that is passed
 /// is correct.
 /// </summary>
 /// <remarks>
 /// Override createRbw to verify that the block length that is passed
 /// is correct. This requires both DFSOutputStream and BlockReceiver to
 /// correctly propagate the hint to FsDatasetSpi.
 /// </remarks>
 /// <exception cref="System.IO.IOException"/>
 public override ReplicaHandler CreateRbw(StorageType storageType, ExtendedBlock b
                                          , bool allowLazyPersist)
 {
     lock (this)
     {
         Assert.AssertThat(b.GetLocalBlock().GetNumBytes(), IS.Is(ExpectedBlockLength));
         return(base.CreateRbw(storageType, b, allowLazyPersist));
     }
 }
コード例 #28
0
 /// <summary>
 /// Delete the block file and meta file from the disk asynchronously, adjust
 /// dfsUsed statistics accordingly.
 /// </summary>
 internal virtual void DeleteAsync(FsVolumeReference volumeRef, FilePath blockFile
                                   , FilePath metaFile, ExtendedBlock block, string trashDirectory)
 {
     Log.Info("Scheduling " + block.GetLocalBlock() + " file " + blockFile + " for deletion"
              );
     FsDatasetAsyncDiskService.ReplicaFileDeleteTask deletionTask = new FsDatasetAsyncDiskService.ReplicaFileDeleteTask
                                                                        (this, volumeRef, blockFile, metaFile, block, trashDirectory);
     Execute(((FsVolumeImpl)volumeRef.GetVolume()).GetCurrentDir(), deletionTask);
 }
コード例 #29
0
        /// <summary>Generate a block token for a specified user</summary>
        /// <exception cref="System.IO.IOException"/>
        public virtual Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> GenerateToken
            (string userId, ExtendedBlock block, EnumSet <BlockTokenSecretManager.AccessMode>
            modes)
        {
            BlockTokenIdentifier id = new BlockTokenIdentifier(userId, block.GetBlockPoolId()
                                                               , block.GetBlockId(), modes);

            return(new Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier>(id, this));
        }
コード例 #30
0
ファイル: TestHostsFiles.cs プロジェクト: orf53975/hadoop.net
        public virtual void TestHostsExcludeInUI()
        {
            Configuration conf = GetConf();
            short         ReplicationFactor = 2;
            Path          filePath          = new Path("/testFile");
            // Configure an excludes file
            FileSystem localFileSys = FileSystem.GetLocal(conf);
            Path       workingDir   = localFileSys.GetWorkingDirectory();
            Path       dir          = new Path(workingDir, "build/test/data/temp/decommission");
            Path       excludeFile  = new Path(dir, "exclude");
            Path       includeFile  = new Path(dir, "include");

            NUnit.Framework.Assert.IsTrue(localFileSys.Mkdirs(dir));
            DFSTestUtil.WriteFile(localFileSys, excludeFile, string.Empty);
            DFSTestUtil.WriteFile(localFileSys, includeFile, string.Empty);
            conf.Set(DFSConfigKeys.DfsHostsExclude, excludeFile.ToUri().GetPath());
            conf.Set(DFSConfigKeys.DfsHosts, includeFile.ToUri().GetPath());
            // Two blocks and four racks
            string[]       racks   = new string[] { "/rack1", "/rack1", "/rack2", "/rack2" };
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(racks.Length
                                                                                   ).Racks(racks).Build();
            FSNamesystem ns = cluster.GetNameNode().GetNamesystem();

            try
            {
                // Create a file with one block
                FileSystem fs = cluster.GetFileSystem();
                DFSTestUtil.CreateFile(fs, filePath, 1L, ReplicationFactor, 1L);
                ExtendedBlock b = DFSTestUtil.GetFirstBlock(fs, filePath);
                DFSTestUtil.WaitForReplication(cluster, b, 2, ReplicationFactor, 0);
                // Decommission one of the hosts with the block, this should cause
                // the block to get replicated to another host on the same rack,
                // otherwise the rack policy is violated.
                BlockLocation[] locs = fs.GetFileBlockLocations(fs.GetFileStatus(filePath), 0, long.MaxValue
                                                                );
                string name  = locs[0].GetNames()[0];
                string names = name + "\n" + "localhost:42\n";
                Log.Info("adding '" + names + "' to exclude file " + excludeFile.ToUri().GetPath(
                             ));
                DFSTestUtil.WriteFile(localFileSys, excludeFile, name);
                ns.GetBlockManager().GetDatanodeManager().RefreshNodes(conf);
                DFSTestUtil.WaitForDecommission(fs, name);
                // Check the block still has sufficient # replicas across racks
                DFSTestUtil.WaitForReplication(cluster, b, 2, ReplicationFactor, 0);
                MBeanServer mbs        = ManagementFactory.GetPlatformMBeanServer();
                ObjectName  mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo"
                                                        );
                string nodes = (string)mbs.GetAttribute(mxbeanName, "LiveNodes");
                NUnit.Framework.Assert.IsTrue("Live nodes should contain the decommissioned node"
                                              , nodes.Contains("Decommissioned"));
            }
            finally
            {
                cluster.Shutdown();
            }
        }