public virtual void TestAbandonBlock() { string src = FileNamePrefix + "foo"; // Start writing a file but do not close it FSDataOutputStream fout = fs.Create(new Path(src), true, 4096, (short)1, 512L); for (int i = 0; i < 1024; i++) { fout.Write(123); } fout.Hflush(); long fileId = ((DFSOutputStream)fout.GetWrappedStream()).GetFileId(); // Now abandon the last block DFSClient dfsclient = DFSClientAdapter.GetDFSClient(fs); LocatedBlocks blocks = dfsclient.GetNamenode().GetBlockLocations(src, 0, int.MaxValue ); int orginalNumBlocks = blocks.LocatedBlockCount(); LocatedBlock b = blocks.GetLastLocatedBlock(); dfsclient.GetNamenode().AbandonBlock(b.GetBlock(), fileId, src, dfsclient.clientName ); // call abandonBlock again to make sure the operation is idempotent dfsclient.GetNamenode().AbandonBlock(b.GetBlock(), fileId, src, dfsclient.clientName ); // And close the file fout.Close(); // Close cluster and check the block has been abandoned after restart cluster.RestartNameNode(); blocks = dfsclient.GetNamenode().GetBlockLocations(src, 0, int.MaxValue); NUnit.Framework.Assert.AreEqual("Blocks " + b + " has not been abandoned.", orginalNumBlocks , blocks.LocatedBlockCount() + 1); }
/// <exception cref="System.IO.IOException"/> private Block FindBlock(Path path, long size) { Block ret; IList <LocatedBlock> lbs = cluster.GetNameNodeRpc().GetBlockLocations(path.ToString (), FileStart, size).GetLocatedBlocks(); LocatedBlock lb = lbs[lbs.Count - 1]; // Get block from the first DN ret = cluster.GetDataNodes()[DnN0].data.GetStoredBlock(lb.GetBlock().GetBlockPoolId (), lb.GetBlock().GetBlockId()); return(ret); }
/// <summary>Test to verify the race between finalizeBlock and Lease recovery</summary> /// <exception cref="System.Exception"/> public virtual void TestRaceBetweenReplicaRecoveryAndFinalizeBlock() { TearDown(); // Stop the Mocked DN started in startup() Configuration conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsDatanodeXceiverStopTimeoutMillisKey, "1000"); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); try { cluster.WaitClusterUp(); DistributedFileSystem fs = cluster.GetFileSystem(); Path path = new Path("/test"); FSDataOutputStream @out = fs.Create(path); @out.WriteBytes("data"); @out.Hsync(); IList <LocatedBlock> blocks = DFSTestUtil.GetAllBlocks(fs.Open(path)); LocatedBlock block = blocks[0]; DataNode dataNode = cluster.GetDataNodes()[0]; AtomicBoolean recoveryInitResult = new AtomicBoolean(true); Sharpen.Thread recoveryThread = new _Thread_612(block, dataNode, recoveryInitResult ); recoveryThread.Start(); try { @out.Close(); } catch (IOException e) { NUnit.Framework.Assert.IsTrue("Writing should fail", e.Message.Contains("are bad. Aborting..." )); } finally { recoveryThread.Join(); } NUnit.Framework.Assert.IsTrue("Recovery should be initiated successfully", recoveryInitResult .Get()); dataNode.UpdateReplicaUnderRecovery(block.GetBlock(), block.GetBlock().GetGenerationStamp () + 1, block.GetBlock().GetBlockId(), block.GetBlockSize()); } finally { if (null != cluster) { cluster.Shutdown(); cluster = null; } } }
public virtual void TestMoverFailedRetry() { // HDFS-8147 Configuration conf = new HdfsConfiguration(); conf.Set(DFSConfigKeys.DfsMoverRetryMaxAttemptsKey, "2"); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).StorageTypes (new StorageType[][] { new StorageType[] { StorageType.Disk, StorageType.Archive }, new StorageType[] { StorageType.Disk, StorageType.Archive }, new StorageType [] { StorageType.Disk, StorageType.Archive } }).Build(); try { cluster.WaitActive(); DistributedFileSystem dfs = cluster.GetFileSystem(); string file = "/testMoverFailedRetry"; // write to DISK FSDataOutputStream @out = dfs.Create(new Path(file), (short)2); @out.WriteChars("testMoverFailedRetry"); @out.Close(); // Delete block file so, block move will fail with FileNotFoundException LocatedBlock lb = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0); cluster.CorruptBlockOnDataNodesByDeletingBlockFile(lb.GetBlock()); // move to ARCHIVE dfs.SetStoragePolicy(new Path(file), "COLD"); int rc = ToolRunner.Run(conf, new Mover.Cli(), new string[] { "-p", file.ToString () }); NUnit.Framework.Assert.AreEqual("Movement should fail after some retry", ExitStatus .IoException.GetExitCode(), rc); } finally { cluster.Shutdown(); } }
public virtual void TestScheduleSameBlock() { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(4).Build(); try { cluster.WaitActive(); DistributedFileSystem dfs = cluster.GetFileSystem(); string file = "/testScheduleSameBlock/file"; { FSDataOutputStream @out = dfs.Create(new Path(file)); @out.WriteChars("testScheduleSameBlock"); @out.Close(); } Org.Apache.Hadoop.Hdfs.Server.Mover.Mover mover = NewMover(conf); mover.Init(); Mover.Processor processor = new Mover.Processor(this); LocatedBlock lb = dfs.GetClient().GetLocatedBlocks(file, 0).Get(0); IList <Mover.MLocation> locations = Mover.MLocation.ToLocations(lb); Mover.MLocation ml = locations[0]; Dispatcher.DBlock db = mover.NewDBlock(lb.GetBlock().GetLocalBlock(), locations); IList <StorageType> storageTypes = new AList <StorageType>(Arrays.AsList(StorageType .Default, StorageType.Default)); NUnit.Framework.Assert.IsTrue(processor.ScheduleMoveReplica(db, ml, storageTypes) ); NUnit.Framework.Assert.IsFalse(processor.ScheduleMoveReplica(db, ml, storageTypes )); } finally { cluster.Shutdown(); } }
/// <summary>Create a file with one block and corrupt some/all of the block replicas. /// </summary> /// <exception cref="System.IO.IOException"/> /// <exception cref="Org.Apache.Hadoop.Security.AccessControlException"/> /// <exception cref="System.IO.FileNotFoundException"/> /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> /// <exception cref="System.Exception"/> /// <exception cref="Sharpen.TimeoutException"/> private void CreateAFileWithCorruptedBlockReplicas(Path filePath, short repl, int corruptBlockCount) { DFSTestUtil.CreateFile(dfs, filePath, BlockSize, repl, 0); DFSTestUtil.WaitReplication(dfs, filePath, repl); // Locate the file blocks by asking name node LocatedBlocks locatedblocks = dfs.dfs.GetNamenode().GetBlockLocations(filePath.ToString (), 0L, BlockSize); NUnit.Framework.Assert.AreEqual(repl, locatedblocks.Get(0).GetLocations().Length); // The file only has one block LocatedBlock lblock = locatedblocks.Get(0); DatanodeInfo[] datanodeinfos = lblock.GetLocations(); ExtendedBlock block = lblock.GetBlock(); // corrupt some /all of the block replicas for (int i = 0; i < corruptBlockCount; i++) { DatanodeInfo dninfo = datanodeinfos[i]; DataNode dn = cluster.GetDataNode(dninfo.GetIpcPort()); CorruptBlock(block, dn); Log.Debug("Corrupted block " + block.GetBlockName() + " on data node " + dninfo); } }
public virtual void TestAddBlockRetryShouldReturnBlockWithLocations() { string src = "/testAddBlockRetryShouldReturnBlockWithLocations"; NamenodeProtocols nameNodeRpc = cluster.GetNameNodeRpc(); // create file nameNodeRpc.Create(src, FsPermission.GetFileDefault(), "clientName", new EnumSetWritable <CreateFlag>(EnumSet.Of(CreateFlag.Create)), true, (short)3, 1024, null); // start first addBlock() Log.Info("Starting first addBlock for " + src); LocatedBlock lb1 = nameNodeRpc.AddBlock(src, "clientName", null, null, INodeId.GrandfatherInodeId , null); NUnit.Framework.Assert.IsTrue("Block locations should be present", lb1.GetLocations ().Length > 0); cluster.RestartNameNode(); nameNodeRpc = cluster.GetNameNodeRpc(); LocatedBlock lb2 = nameNodeRpc.AddBlock(src, "clientName", null, null, INodeId.GrandfatherInodeId , null); NUnit.Framework.Assert.AreEqual("Blocks are not equal", lb1.GetBlock(), lb2.GetBlock ()); NUnit.Framework.Assert.IsTrue("Wrong locations with retry", lb2.GetLocations().Length > 0); }
public virtual void TestMissingBlock() { // Create a file with single block with two replicas Path file = GetTestPath("testMissingBlocks"); CreateFile(file, 100, (short)1); // Corrupt the only replica of the block to result in a missing block LocatedBlock block = NameNodeAdapter.GetBlockLocations(cluster.GetNameNode(), file .ToString(), 0, 1).Get(0); cluster.GetNamesystem().WriteLock(); try { bm.FindAndMarkBlockAsCorrupt(block.GetBlock(), block.GetLocations()[0], "STORAGE_ID" , "TEST"); } finally { cluster.GetNamesystem().WriteUnlock(); } UpdateMetrics(); MetricsRecordBuilder rb = MetricsAsserts.GetMetrics(NsMetrics); MetricsAsserts.AssertGauge("UnderReplicatedBlocks", 1L, rb); MetricsAsserts.AssertGauge("MissingBlocks", 1L, rb); MetricsAsserts.AssertGauge("MissingReplOneBlocks", 1L, rb); fs.Delete(file, true); WaitForDnMetricValue(NsMetrics, "UnderReplicatedBlocks", 0L); }
internal virtual bool ScheduleMoves4Block(Mover.StorageTypeDiff diff, LocatedBlock lb) { IList <Mover.MLocation> locations = Mover.MLocation.ToLocations(lb); Sharpen.Collections.Shuffle(locations); Dispatcher.DBlock db = this._enclosing.NewDBlock(lb.GetBlock().GetLocalBlock(), locations ); foreach (StorageType t in diff.existing) { foreach (Mover.MLocation ml in locations) { Dispatcher.Source source = this._enclosing.storages.GetSource(ml); if (ml.storageType == t && source != null) { // try to schedule one replica move. if (this.ScheduleMoveReplica(db, source, diff.expected)) { return(true); } } } } return(false); }
/// <exception cref="System.IO.IOException"/> internal static ClientDatanodeProtocolPB CreateClientDatanodeProtocolProxy(DatanodeID datanodeid, Configuration conf, int socketTimeout, bool connectToDnViaHostname, LocatedBlock locatedBlock) { string dnAddr = datanodeid.GetIpcAddr(connectToDnViaHostname); IPEndPoint addr = NetUtils.CreateSocketAddr(dnAddr); if (Log.IsDebugEnabled()) { Log.Debug("Connecting to datanode " + dnAddr + " addr=" + addr); } // Since we're creating a new UserGroupInformation here, we know that no // future RPC proxies will be able to re-use the same connection. And // usages of this proxy tend to be one-off calls. // // This is a temporary fix: callers should really achieve this by using // RPC.stopProxy() on the resulting object, but this is currently not // working in trunk. See the discussion on HDFS-1965. Configuration confWithNoIpcIdle = new Configuration(conf); confWithNoIpcIdle.SetInt(CommonConfigurationKeysPublic.IpcClientConnectionMaxidletimeKey , 0); UserGroupInformation ticket = UserGroupInformation.CreateRemoteUser(locatedBlock. GetBlock().GetLocalBlock().ToString()); ticket.AddToken(locatedBlock.GetBlockToken()); return(CreateClientDatanodeProtocolProxy(addr, ticket, confWithNoIpcIdle, NetUtils .GetDefaultSocketFactory(conf), socketTimeout)); }
public virtual void TestRetryAddBlockWhileInChooseTarget() { string src = "/testRetryAddBlockWhileInChooseTarget"; FSNamesystem ns = cluster.GetNamesystem(); NamenodeProtocols nn = cluster.GetNameNodeRpc(); // create file nn.Create(src, FsPermission.GetFileDefault(), "clientName", new EnumSetWritable <CreateFlag >(EnumSet.Of(CreateFlag.Create)), true, (short)3, 1024, null); // start first addBlock() Log.Info("Starting first addBlock for " + src); LocatedBlock[] onRetryBlock = new LocatedBlock[1]; DatanodeStorageInfo[] targets = ns.GetNewBlockTargets(src, INodeId.GrandfatherInodeId , "clientName", null, null, null, onRetryBlock); NUnit.Framework.Assert.IsNotNull("Targets must be generated", targets); // run second addBlock() Log.Info("Starting second addBlock for " + src); nn.AddBlock(src, "clientName", null, null, INodeId.GrandfatherInodeId, null); NUnit.Framework.Assert.IsTrue("Penultimate block must be complete", CheckFileProgress (src, false)); LocatedBlocks lbs = nn.GetBlockLocations(src, 0, long.MaxValue); NUnit.Framework.Assert.AreEqual("Must be one block", 1, lbs.GetLocatedBlocks().Count ); LocatedBlock lb2 = lbs.Get(0); NUnit.Framework.Assert.AreEqual("Wrong replication", Replication, lb2.GetLocations ().Length); // continue first addBlock() LocatedBlock newBlock = ns.StoreAllocatedBlock(src, INodeId.GrandfatherInodeId, "clientName" , null, targets); NUnit.Framework.Assert.AreEqual("Blocks are not equal", lb2.GetBlock(), newBlock. GetBlock()); // check locations lbs = nn.GetBlockLocations(src, 0, long.MaxValue); NUnit.Framework.Assert.AreEqual("Must be one block", 1, lbs.GetLocatedBlocks().Count ); LocatedBlock lb1 = lbs.Get(0); NUnit.Framework.Assert.AreEqual("Wrong replication", Replication, lb1.GetLocations ().Length); NUnit.Framework.Assert.AreEqual("Blocks are not equal", lb1.GetBlock(), lb2.GetBlock ()); }
public virtual void TestReplicationError() { // create a file of replication factor of 1 Path fileName = new Path("/test.txt"); int fileLen = 1; DFSTestUtil.CreateFile(fs, fileName, 1, (short)1, 1L); DFSTestUtil.WaitReplication(fs, fileName, (short)1); // get the block belonged to the created file LocatedBlocks blocks = NameNodeAdapter.GetBlockLocations(cluster.GetNameNode(), fileName .ToString(), 0, (long)fileLen); NUnit.Framework.Assert.AreEqual("Should only find 1 block", blocks.LocatedBlockCount (), 1); LocatedBlock block = blocks.Get(0); // bring up a second datanode cluster.StartDataNodes(conf, 1, true, null, null); cluster.WaitActive(); int sndNode = 1; DataNode datanode = cluster.GetDataNodes()[sndNode]; // replicate the block to the second datanode IPEndPoint target = datanode.GetXferAddress(); Socket s = Sharpen.Extensions.CreateSocket(target.Address, target.Port); // write the header. DataOutputStream @out = new DataOutputStream(s.GetOutputStream()); DataChecksum checksum = DataChecksum.NewDataChecksum(DataChecksum.Type.Crc32, 512 ); new Sender(@out).WriteBlock(block.GetBlock(), StorageType.Default, BlockTokenSecretManager .DummyToken, string.Empty, new DatanodeInfo[0], new StorageType[0], null, BlockConstructionStage .PipelineSetupCreate, 1, 0L, 0L, 0L, checksum, CachingStrategy.NewDefaultStrategy (), false, false, null); @out.Flush(); // close the connection before sending the content of the block @out.Close(); // the temporary block & meta files should be deleted string bpid = cluster.GetNamesystem().GetBlockPoolId(); FilePath storageDir = cluster.GetInstanceStorageDir(sndNode, 0); FilePath dir1 = MiniDFSCluster.GetRbwDir(storageDir, bpid); storageDir = cluster.GetInstanceStorageDir(sndNode, 1); FilePath dir2 = MiniDFSCluster.GetRbwDir(storageDir, bpid); while (dir1.ListFiles().Length != 0 || dir2.ListFiles().Length != 0) { Sharpen.Thread.Sleep(100); } // then increase the file's replication factor fs.SetReplication(fileName, (short)2); // replication should succeed DFSTestUtil.WaitReplication(fs, fileName, (short)1); // clean up the file fs.Delete(fileName, false); }
/// <exception cref="System.Exception"/> public virtual void TestBlockReaderLocalLegacyWithAppend() { short ReplFactor = 1; HdfsConfiguration conf = GetConfiguration(null); conf.SetBoolean(DFSConfigKeys.DfsClientUseLegacyBlockreaderlocal, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).Build(); cluster.WaitActive(); DistributedFileSystem dfs = cluster.GetFileSystem(); Path path = new Path("/testBlockReaderLocalLegacy"); DFSTestUtil.CreateFile(dfs, path, 10, ReplFactor, 0); DFSTestUtil.WaitReplication(dfs, path, ReplFactor); ClientDatanodeProtocol proxy; Org.Apache.Hadoop.Security.Token.Token <BlockTokenIdentifier> token; ExtendedBlock originalBlock; long originalGS; { LocatedBlock lb = cluster.GetNameNode().GetRpcServer().GetBlockLocations(path.ToString (), 0, 1).Get(0); proxy = DFSUtil.CreateClientDatanodeProtocolProxy(lb.GetLocations()[0], conf, 60000 , false); token = lb.GetBlockToken(); // get block and generation stamp ExtendedBlock blk = new ExtendedBlock(lb.GetBlock()); originalBlock = new ExtendedBlock(blk); originalGS = originalBlock.GetGenerationStamp(); // test getBlockLocalPathInfo BlockLocalPathInfo info = proxy.GetBlockLocalPathInfo(blk, token); NUnit.Framework.Assert.AreEqual(originalGS, info.GetBlock().GetGenerationStamp()); } { // append one byte FSDataOutputStream @out = dfs.Append(path); @out.Write(1); @out.Close(); } { // get new generation stamp LocatedBlock lb = cluster.GetNameNode().GetRpcServer().GetBlockLocations(path.ToString (), 0, 1).Get(0); long newGS = lb.GetBlock().GetGenerationStamp(); NUnit.Framework.Assert.IsTrue(newGS > originalGS); // getBlockLocalPathInfo using the original block. NUnit.Framework.Assert.AreEqual(originalGS, originalBlock.GetGenerationStamp()); BlockLocalPathInfo info = proxy.GetBlockLocalPathInfo(originalBlock, token); NUnit.Framework.Assert.AreEqual(newGS, info.GetBlock().GetGenerationStamp()); } cluster.Shutdown(); }
// try reading a block using a BlockReader directly private static void TryRead(Configuration conf, LocatedBlock lblock, bool shouldSucceed ) { IPEndPoint targetAddr = null; IOException ioe = null; BlockReader blockReader = null; ExtendedBlock block = lblock.GetBlock(); try { DatanodeInfo[] nodes = lblock.GetLocations(); targetAddr = NetUtils.CreateSocketAddr(nodes[0].GetXferAddr()); blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).SetFileName(BlockReaderFactory .GetFileName(targetAddr, "test-blockpoolid", block.GetBlockId())).SetBlock(block ).SetBlockToken(lblock.GetBlockToken()).SetInetSocketAddress(targetAddr).SetStartOffset (0).SetLength(-1).SetVerifyChecksum(true).SetClientName("TestBlockTokenWithDFS") .SetDatanodeInfo(nodes[0]).SetCachingStrategy(CachingStrategy.NewDefaultStrategy ()).SetClientCacheContext(ClientContext.GetFromConf(conf)).SetConfiguration(conf ).SetRemotePeerFactory(new _RemotePeerFactory_162(conf)).Build(); } catch (IOException ex) { ioe = ex; } finally { if (blockReader != null) { try { blockReader.Close(); } catch (IOException e) { throw new RuntimeException(e); } } } if (shouldSucceed) { NUnit.Framework.Assert.IsNotNull("OP_READ_BLOCK: access token is invalid, " + "when it is expected to be valid" , blockReader); } else { NUnit.Framework.Assert.IsNotNull("OP_READ_BLOCK: access token is valid, " + "when it is expected to be invalid" , ioe); NUnit.Framework.Assert.IsTrue("OP_READ_BLOCK failed due to reasons other than access token: " , ioe is InvalidBlockTokenException); } }
/// <summary>TC7: Corrupted replicas are present.</summary> /// <exception cref="System.IO.IOException">an exception might be thrown</exception> /// <exception cref="System.Exception"/> private void TestTC7(bool appendToNewBlock) { short repl = 2; Path p = new Path("/TC7/foo" + (appendToNewBlock ? "0" : "1")); System.Console.Out.WriteLine("p=" + p); //a. Create file with replication factor of 2. Write half block of data. Close file. int len1 = (int)(BlockSize / 2); { FSDataOutputStream @out = fs.Create(p, false, buffersize, repl, BlockSize); AppendTestUtil.Write(@out, 0, len1); @out.Close(); } DFSTestUtil.WaitReplication(fs, p, repl); //b. Log into one datanode that has one replica of this block. // Find the block file on this datanode and truncate it to zero size. LocatedBlocks locatedblocks = fs.dfs.GetNamenode().GetBlockLocations(p.ToString() , 0L, len1); NUnit.Framework.Assert.AreEqual(1, locatedblocks.LocatedBlockCount()); LocatedBlock lb = locatedblocks.Get(0); ExtendedBlock blk = lb.GetBlock(); NUnit.Framework.Assert.AreEqual(len1, lb.GetBlockSize()); DatanodeInfo[] datanodeinfos = lb.GetLocations(); NUnit.Framework.Assert.AreEqual(repl, datanodeinfos.Length); DataNode dn = cluster.GetDataNode(datanodeinfos[0].GetIpcPort()); FilePath f = DataNodeTestUtils.GetBlockFile(dn, blk.GetBlockPoolId(), blk.GetLocalBlock ()); RandomAccessFile raf = new RandomAccessFile(f, "rw"); AppendTestUtil.Log.Info("dn=" + dn + ", blk=" + blk + " (length=" + blk.GetNumBytes () + ")"); NUnit.Framework.Assert.AreEqual(len1, raf.Length()); raf.SetLength(0); raf.Close(); //c. Open file in "append mode". Append a new block worth of data. Close file. int len2 = (int)BlockSize; { FSDataOutputStream @out = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag.Append , CreateFlag.NewBlock), 4096, null) : fs.Append(p); AppendTestUtil.Write(@out, len1, len2); @out.Close(); } //d. Reopen file and read two blocks worth of data. AppendTestUtil.Check(fs, p, len1 + len2); }
/// <summary>TC11: Racing rename</summary> /// <exception cref="System.Exception"/> private void TestTC11(bool appendToNewBlock) { Path p = new Path("/TC11/foo" + (appendToNewBlock ? "0" : "1")); System.Console.Out.WriteLine("p=" + p); //a. Create file and write one block of data. Close file. int len1 = (int)BlockSize; { FSDataOutputStream @out = fs.Create(p, false, buffersize, Replication, BlockSize); AppendTestUtil.Write(@out, 0, len1); @out.Close(); } //b. Reopen file in "append" mode. Append half block of data. FSDataOutputStream out_1 = appendToNewBlock ? fs.Append(p, EnumSet.Of(CreateFlag. Append, CreateFlag.NewBlock), 4096, null) : fs.Append(p); int len2 = (int)BlockSize / 2; AppendTestUtil.Write(out_1, len1, len2); out_1.Hflush(); //c. Rename file to file.new. Path pnew = new Path(p + ".new"); NUnit.Framework.Assert.IsTrue(fs.Rename(p, pnew)); //d. Close file handle that was opened in (b). out_1.Close(); //check block sizes long len = fs.GetFileStatus(pnew).GetLen(); LocatedBlocks locatedblocks = fs.dfs.GetNamenode().GetBlockLocations(pnew.ToString (), 0L, len); int numblock = locatedblocks.LocatedBlockCount(); for (int i = 0; i < numblock; i++) { LocatedBlock lb = locatedblocks.Get(i); ExtendedBlock blk = lb.GetBlock(); long size = lb.GetBlockSize(); if (i < numblock - 1) { NUnit.Framework.Assert.AreEqual(BlockSize, size); } foreach (DatanodeInfo datanodeinfo in lb.GetLocations()) { DataNode dn = cluster.GetDataNode(datanodeinfo.GetIpcPort()); Block metainfo = DataNodeTestUtils.GetFSDataset(dn).GetStoredBlock(blk.GetBlockPoolId (), blk.GetBlockId()); NUnit.Framework.Assert.AreEqual(size, metainfo.GetNumBytes()); } } }
private void Compare(LocatedBlock expected, LocatedBlock actual) { NUnit.Framework.Assert.AreEqual(expected.GetBlock(), actual.GetBlock()); Compare(expected.GetBlockToken(), actual.GetBlockToken()); NUnit.Framework.Assert.AreEqual(expected.GetStartOffset(), actual.GetStartOffset( )); NUnit.Framework.Assert.AreEqual(expected.IsCorrupt(), actual.IsCorrupt()); DatanodeInfo[] ei = expected.GetLocations(); DatanodeInfo[] ai = actual.GetLocations(); NUnit.Framework.Assert.AreEqual(ei.Length, ai.Length); for (int i = 0; i < ei.Length; i++) { Compare(ei[i], ai[i]); } }
/// <summary>Convert a LocatedBlock to a Json map.</summary> /// <exception cref="System.IO.IOException"/> private static IDictionary <string, object> ToJsonMap(LocatedBlock locatedblock) { if (locatedblock == null) { return(null); } IDictionary <string, object> m = new SortedDictionary <string, object>(); m["blockToken"] = ToJsonMap(locatedblock.GetBlockToken()); m["isCorrupt"] = locatedblock.IsCorrupt(); m["startOffset"] = locatedblock.GetStartOffset(); m["block"] = ToJsonMap(locatedblock.GetBlock()); m["locations"] = ToJsonArray(locatedblock.GetLocations()); m["cachedLocations"] = ToJsonArray(locatedblock.GetCachedLocations()); return(m); }
/// <summary>try to access a block on a data node.</summary> /// <remarks>try to access a block on a data node. If fails - throws exception</remarks> /// <param name="datanode"/> /// <param name="lblock"/> /// <exception cref="System.IO.IOException"/> private void AccessBlock(DatanodeInfo datanode, LocatedBlock lblock) { IPEndPoint targetAddr = null; ExtendedBlock block = lblock.GetBlock(); targetAddr = NetUtils.CreateSocketAddr(datanode.GetXferAddr()); BlockReader blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).SetInetSocketAddress (targetAddr).SetBlock(block).SetFileName(BlockReaderFactory.GetFileName(targetAddr , "test-blockpoolid", block.GetBlockId())).SetBlockToken(lblock.GetBlockToken()) .SetStartOffset(0).SetLength(-1).SetVerifyChecksum(true).SetClientName("TestDataNodeVolumeFailure" ).SetDatanodeInfo(datanode).SetCachingStrategy(CachingStrategy.NewDefaultStrategy ()).SetClientCacheContext(ClientContext.GetFromConf(conf)).SetConfiguration(conf ).SetRemotePeerFactory(new _RemotePeerFactory_422(this)).Build(); blockReader.Close(); }
public virtual void Setup() { conf = new HdfsConfiguration(); SimulatedFSDataset.SetFactory(conf); Configuration[] overlays = new Configuration[NumDatanodes]; for (int i = 0; i < overlays.Length; i++) { overlays[i] = new Configuration(); if (i == RoNodeIndex) { overlays[i].SetEnum(SimulatedFSDataset.ConfigPropertyState, i == RoNodeIndex ? DatanodeStorage.State .ReadOnlyShared : DatanodeStorage.State.Normal); } } cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumDatanodes).DataNodeConfOverlays (overlays).Build(); fs = cluster.GetFileSystem(); blockManager = cluster.GetNameNode().GetNamesystem().GetBlockManager(); datanodeManager = blockManager.GetDatanodeManager(); client = new DFSClient(new IPEndPoint("localhost", cluster.GetNameNodePort()), cluster .GetConfiguration(0)); for (int i_1 = 0; i_1 < NumDatanodes; i_1++) { DataNode dataNode = cluster.GetDataNodes()[i_1]; ValidateStorageState(BlockManagerTestUtil.GetStorageReportsForDatanode(datanodeManager .GetDatanode(dataNode.GetDatanodeId())), i_1 == RoNodeIndex ? DatanodeStorage.State .ReadOnlyShared : DatanodeStorage.State.Normal); } // Create a 1 block file DFSTestUtil.CreateFile(fs, Path, BlockSize, BlockSize, BlockSize, (short)1, seed); LocatedBlock locatedBlock = GetLocatedBlock(); extendedBlock = locatedBlock.GetBlock(); block = extendedBlock.GetLocalBlock(); Assert.AssertThat(locatedBlock.GetLocations().Length, CoreMatchers.Is(1)); normalDataNode = locatedBlock.GetLocations()[0]; readOnlyDataNode = datanodeManager.GetDatanode(cluster.GetDataNodes()[RoNodeIndex ].GetDatanodeId()); Assert.AssertThat(normalDataNode, CoreMatchers.Is(CoreMatchers.Not(readOnlyDataNode ))); ValidateNumberReplicas(1); // Inject the block into the datanode with READ_ONLY_SHARED storage cluster.InjectBlocks(0, RoNodeIndex, Collections.Singleton(block)); // There should now be 2 *locations* for the block // Must wait until the NameNode has processed the block report for the injected blocks WaitForLocations(2); }
/// <summary>Get a BlockReader for the given block.</summary> /// <exception cref="System.IO.IOException"/> public static BlockReader GetBlockReader(MiniDFSCluster cluster, LocatedBlock testBlock , int offset, int lenToRead) { IPEndPoint targetAddr = null; ExtendedBlock block = testBlock.GetBlock(); DatanodeInfo[] nodes = testBlock.GetLocations(); targetAddr = NetUtils.CreateSocketAddr(nodes[0].GetXferAddr()); DistributedFileSystem fs = cluster.GetFileSystem(); return(new BlockReaderFactory(fs.GetClient().GetConf()).SetInetSocketAddress(targetAddr ).SetBlock(block).SetFileName(targetAddr.ToString() + ":" + block.GetBlockId()). SetBlockToken(testBlock.GetBlockToken()).SetStartOffset(offset).SetLength(lenToRead ).SetVerifyChecksum(true).SetClientName("BlockReaderTestUtil").SetDatanodeInfo(nodes [0]).SetClientCacheContext(ClientContext.GetFromConf(fs.GetConf())).SetCachingStrategy (CachingStrategy.NewDefaultStrategy()).SetConfiguration(fs.GetConf()).SetAllowShortCircuitLocalReads (true).SetRemotePeerFactory(new _RemotePeerFactory_196(fs)).Build()); }
public virtual void TestBlockMoveAcrossStorageInSameNode() { Configuration conf = new HdfsConfiguration(); // create only one datanode in the cluster to verify movement within // datanode. MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(1).StorageTypes (new StorageType[] { StorageType.Disk, StorageType.Archive }).Build(); try { cluster.WaitActive(); DistributedFileSystem dfs = cluster.GetFileSystem(); Path file = new Path("/testBlockMoveAcrossStorageInSameNode/file"); DFSTestUtil.CreateFile(dfs, file, 1024, (short)1, 1024); LocatedBlocks locatedBlocks = dfs.GetClient().GetLocatedBlocks(file.ToString(), 0 ); // get the current LocatedBlock locatedBlock = locatedBlocks.Get(0); ExtendedBlock block = locatedBlock.GetBlock(); DatanodeInfo[] locations = locatedBlock.GetLocations(); NUnit.Framework.Assert.AreEqual(1, locations.Length); StorageType[] storageTypes = locatedBlock.GetStorageTypes(); // current block should be written to DISK NUnit.Framework.Assert.IsTrue(storageTypes[0] == StorageType.Disk); DatanodeInfo source = locations[0]; // move block to ARCHIVE by using same DataNodeInfo for source, proxy and // destination so that movement happens within datanode NUnit.Framework.Assert.IsTrue(ReplaceBlock(block, source, source, source, StorageType .Archive)); // wait till namenode notified Sharpen.Thread.Sleep(3000); locatedBlocks = dfs.GetClient().GetLocatedBlocks(file.ToString(), 0); // get the current locatedBlock = locatedBlocks.Get(0); NUnit.Framework.Assert.AreEqual("Storage should be only one", 1, locatedBlock.GetLocations ().Length); NUnit.Framework.Assert.IsTrue("Block should be moved to ARCHIVE", locatedBlock.GetStorageTypes ()[0] == StorageType.Archive); } finally { cluster.Shutdown(); } }
// Waits for all of the blocks to have expected replication // Waits for all of the blocks to have expected replication /// <exception cref="System.IO.IOException"/> private void WaitForBlockReplication(string filename, ClientProtocol namenode, int expected, long maxWaitSec) { long start = Time.MonotonicNow(); //wait for all the blocks to be replicated; Log.Info("Checking for block replication for " + filename); LocatedBlocks blocks = namenode.GetBlockLocations(filename, 0, long.MaxValue); NUnit.Framework.Assert.AreEqual(numBlocks, blocks.LocatedBlockCount()); for (int i = 0; i < numBlocks; ++i) { Log.Info("Checking for block:" + (i + 1)); while (true) { // Loop to check for block i (usually when 0 is done all will be done blocks = namenode.GetBlockLocations(filename, 0, long.MaxValue); NUnit.Framework.Assert.AreEqual(numBlocks, blocks.LocatedBlockCount()); LocatedBlock block = blocks.Get(i); int actual = block.GetLocations().Length; if (actual == expected) { Log.Info("Got enough replicas for " + (i + 1) + "th block " + block.GetBlock() + ", got " + actual + "."); break; } Log.Info("Not enough replicas for " + (i + 1) + "th block " + block.GetBlock() + " yet. Expecting " + expected + ", got " + actual + "."); if (maxWaitSec > 0 && (Time.MonotonicNow() - start) > (maxWaitSec * 1000)) { throw new IOException("Timedout while waiting for all blocks to " + " be replicated for " + filename); } try { Sharpen.Thread.Sleep(500); } catch (Exception) { } } } }
// Waits for all of the blocks to have expected replication /// <exception cref="System.IO.IOException"/> private void WaitForBlockReplication(string filename, ClientProtocol namenode, int expected, long maxWaitSec) { long start = Time.MonotonicNow(); //wait for all the blocks to be replicated; Log.Info("Checking for block replication for " + filename); while (true) { bool replOk = true; LocatedBlocks blocks = namenode.GetBlockLocations(filename, 0, long.MaxValue); for (IEnumerator <LocatedBlock> iter = blocks.GetLocatedBlocks().GetEnumerator(); iter.HasNext();) { LocatedBlock block = iter.Next(); int actual = block.GetLocations().Length; if (actual < expected) { Log.Info("Not enough replicas for " + block.GetBlock() + " yet. Expecting " + expected + ", got " + actual + "."); replOk = false; break; } } if (replOk) { return; } if (maxWaitSec > 0 && (Time.MonotonicNow() - start) > (maxWaitSec * 1000)) { throw new IOException("Timedout while waiting for all blocks to " + " be replicated for " + filename); } try { Sharpen.Thread.Sleep(500); } catch (Exception) { } } }
/// <exception cref="System.IO.IOException"/> private void TestPlacement(string clientMachine, string clientRack) { // write 5 files and check whether all times block placed for (int i = 0; i < 5; i++) { string src = "/test-" + i; // Create the file with client machine HdfsFileStatus fileStatus = namesystem.StartFile(src, perm, clientMachine, clientMachine , EnumSet.Of(CreateFlag.Create), true, ReplicationFactor, DefaultBlockSize, null , false); LocatedBlock locatedBlock = nameNodeRpc.AddBlock(src, clientMachine, null, null, fileStatus.GetFileId(), null); NUnit.Framework.Assert.AreEqual("Block should be allocated sufficient locations", ReplicationFactor, locatedBlock.GetLocations().Length); if (clientRack != null) { NUnit.Framework.Assert.AreEqual("First datanode should be rack local", clientRack , locatedBlock.GetLocations()[0].GetNetworkLocation()); } nameNodeRpc.AbandonBlock(locatedBlock.GetBlock(), fileStatus.GetFileId(), src, clientMachine ); } }
public virtual void TestUpdatePipelineAfterDelete() { Configuration conf = new HdfsConfiguration(); Path file = new Path("/test-file"); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).Build(); try { FileSystem fs = cluster.GetFileSystem(); NamenodeProtocols namenode = cluster.GetNameNodeRpc(); DFSOutputStream @out = null; try { // Create a file and make sure a block is allocated for it. @out = (DFSOutputStream)(fs.Create(file).GetWrappedStream()); @out.Write(1); @out.Hflush(); // Create a snapshot that includes the file. SnapshotTestHelper.CreateSnapshot((DistributedFileSystem)fs, new Path("/"), "s1"); // Grab the block info of this file for later use. FSDataInputStream @in = null; ExtendedBlock oldBlock = null; try { @in = fs.Open(file); oldBlock = DFSTestUtil.GetAllBlocks(@in)[0].GetBlock(); } finally { IOUtils.CloseStream(@in); } // Allocate a new block ID/gen stamp so we can simulate pipeline // recovery. string clientName = ((DistributedFileSystem)fs).GetClient().GetClientName(); LocatedBlock newLocatedBlock = namenode.UpdateBlockForPipeline(oldBlock, clientName ); ExtendedBlock newBlock = new ExtendedBlock(oldBlock.GetBlockPoolId(), oldBlock.GetBlockId (), oldBlock.GetNumBytes(), newLocatedBlock.GetBlock().GetGenerationStamp()); // Delete the file from the present FS. It will still exist the // previously-created snapshot. This will log an OP_DELETE for the // file in question. fs.Delete(file, true); // Simulate a pipeline recovery, wherein a new block is allocated // for the existing block, resulting in an OP_UPDATE_BLOCKS being // logged for the file in question. try { namenode.UpdatePipeline(clientName, oldBlock, newBlock, newLocatedBlock.GetLocations (), newLocatedBlock.GetStorageIDs()); } catch (IOException ioe) { // normal GenericTestUtils.AssertExceptionContains("does not exist or it is not under construction" , ioe); } // Make sure the NN can restart with the edit logs as we have them now. cluster.RestartNameNode(true); } finally { IOUtils.CloseStream(@out); } } finally { cluster.Shutdown(); } }
public virtual void TestPendingAndInvalidate() { Configuration Conf = new HdfsConfiguration(); Conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, 1024); Conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, DfsReplicationInterval); Conf.SetInt(DFSConfigKeys.DfsNamenodeReplicationIntervalKey, DfsReplicationInterval ); MiniDFSCluster cluster = new MiniDFSCluster.Builder(Conf).NumDataNodes(DatanodeCount ).Build(); cluster.WaitActive(); FSNamesystem namesystem = cluster.GetNamesystem(); BlockManager bm = namesystem.GetBlockManager(); DistributedFileSystem fs = cluster.GetFileSystem(); try { // 1. create a file Path filePath = new Path("/tmp.txt"); DFSTestUtil.CreateFile(fs, filePath, 1024, (short)3, 0L); // 2. disable the heartbeats foreach (DataNode dn in cluster.GetDataNodes()) { DataNodeTestUtils.SetHeartbeatsDisabledForTests(dn, true); } // 3. mark a couple of blocks as corrupt LocatedBlock block = NameNodeAdapter.GetBlockLocations(cluster.GetNameNode(), filePath .ToString(), 0, 1).Get(0); cluster.GetNamesystem().WriteLock(); try { bm.FindAndMarkBlockAsCorrupt(block.GetBlock(), block.GetLocations()[0], "STORAGE_ID" , "TEST"); bm.FindAndMarkBlockAsCorrupt(block.GetBlock(), block.GetLocations()[1], "STORAGE_ID" , "TEST"); } finally { cluster.GetNamesystem().WriteUnlock(); } BlockManagerTestUtil.ComputeAllPendingWork(bm); BlockManagerTestUtil.UpdateState(bm); NUnit.Framework.Assert.AreEqual(bm.GetPendingReplicationBlocksCount(), 1L); NUnit.Framework.Assert.AreEqual(bm.pendingReplications.GetNumReplicas(block.GetBlock ().GetLocalBlock()), 2); // 4. delete the file fs.Delete(filePath, true); // retry at most 10 times, each time sleep for 1s. Note that 10s is much // less than the default pending record timeout (5~10min) int retries = 10; long pendingNum = bm.GetPendingReplicationBlocksCount(); while (pendingNum != 0 && retries-- > 0) { Sharpen.Thread.Sleep(1000); // let NN do the deletion BlockManagerTestUtil.UpdateState(bm); pendingNum = bm.GetPendingReplicationBlocksCount(); } NUnit.Framework.Assert.AreEqual(pendingNum, 0L); } finally { cluster.Shutdown(); } }
/// <summary>The following test first creates a file.</summary> /// <remarks> /// The following test first creates a file. /// It verifies the block information from a datanode. /// Then, it updates the block with new information and verifies again. /// </remarks> /// <param name="useDnHostname">whether DNs should connect to other DNs by hostname</param> /// <exception cref="System.Exception"/> private void CheckBlockMetaDataInfo(bool useDnHostname) { MiniDFSCluster cluster = null; conf.SetBoolean(DFSConfigKeys.DfsDatanodeUseDnHostname, useDnHostname); if (useDnHostname) { // Since the mini cluster only listens on the loopback we have to // ensure the hostname used to access DNs maps to the loopback. We // do this by telling the DN to advertise localhost as its hostname // instead of the default hostname. conf.Set(DFSConfigKeys.DfsDatanodeHostNameKey, "localhost"); } try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).CheckDataNodeHostConfig (true).Build(); cluster.WaitActive(); //create a file DistributedFileSystem dfs = cluster.GetFileSystem(); string filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.CreateFile(dfs, filepath, 1024L, (short)3, 0L); NUnit.Framework.Assert.IsTrue(dfs.Exists(filepath)); //get block info LocatedBlock locatedblock = GetLastLocatedBlock(DFSClientAdapter.GetDFSClient(dfs ).GetNamenode(), filestr); DatanodeInfo[] datanodeinfo = locatedblock.GetLocations(); NUnit.Framework.Assert.IsTrue(datanodeinfo.Length > 0); //connect to a data node DataNode datanode = cluster.GetDataNode(datanodeinfo[0].GetIpcPort()); InterDatanodeProtocol idp = DataNodeTestUtils.CreateInterDatanodeProtocolProxy(datanode , datanodeinfo[0], conf, useDnHostname); // Stop the block scanners. datanode.GetBlockScanner().RemoveAllVolumeScanners(); //verify BlockMetaDataInfo ExtendedBlock b = locatedblock.GetBlock(); InterDatanodeProtocol.Log.Info("b=" + b + ", " + b.GetType()); CheckMetaInfo(b, datanode); long recoveryId = b.GetGenerationStamp() + 1; idp.InitReplicaRecovery(new BlockRecoveryCommand.RecoveringBlock(b, locatedblock. GetLocations(), recoveryId)); //verify updateBlock ExtendedBlock newblock = new ExtendedBlock(b.GetBlockPoolId(), b.GetBlockId(), b. GetNumBytes() / 2, b.GetGenerationStamp() + 1); idp.UpdateReplicaUnderRecovery(b, recoveryId, b.GetBlockId(), newblock.GetNumBytes ()); CheckMetaInfo(newblock, datanode); // Verify correct null response trying to init recovery for a missing block ExtendedBlock badBlock = new ExtendedBlock("fake-pool", b.GetBlockId(), 0, 0); NUnit.Framework.Assert.IsNull(idp.InitReplicaRecovery(new BlockRecoveryCommand.RecoveringBlock (badBlock, locatedblock.GetLocations(), recoveryId))); } finally { if (cluster != null) { cluster.Shutdown(); } } }
public virtual void TestBlockSynchronization() { int OrgFileSize = 3000; Configuration conf = new HdfsConfiguration(); conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, BlockSize); cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(5).Build(); cluster.WaitActive(); //create a file DistributedFileSystem dfs = cluster.GetFileSystem(); string filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.CreateFile(dfs, filepath, OrgFileSize, ReplicationNum, 0L); NUnit.Framework.Assert.IsTrue(dfs.Exists(filepath)); DFSTestUtil.WaitReplication(dfs, filepath, ReplicationNum); //get block info for the last block LocatedBlock locatedblock = TestInterDatanodeProtocol.GetLastLocatedBlock(dfs.dfs .GetNamenode(), filestr); DatanodeInfo[] datanodeinfos = locatedblock.GetLocations(); NUnit.Framework.Assert.AreEqual(ReplicationNum, datanodeinfos.Length); //connect to data nodes DataNode[] datanodes = new DataNode[ReplicationNum]; for (int i = 0; i < ReplicationNum; i++) { datanodes[i] = cluster.GetDataNode(datanodeinfos[i].GetIpcPort()); NUnit.Framework.Assert.IsTrue(datanodes[i] != null); } //verify Block Info ExtendedBlock lastblock = locatedblock.GetBlock(); DataNode.Log.Info("newblocks=" + lastblock); for (int i_1 = 0; i_1 < ReplicationNum; i_1++) { CheckMetaInfo(lastblock, datanodes[i_1]); } DataNode.Log.Info("dfs.dfs.clientName=" + dfs.dfs.clientName); cluster.GetNameNodeRpc().Append(filestr, dfs.dfs.clientName, new EnumSetWritable < CreateFlag>(EnumSet.Of(CreateFlag.Append))); // expire lease to trigger block recovery. WaitLeaseRecovery(cluster); Block[] updatedmetainfo = new Block[ReplicationNum]; long oldSize = lastblock.GetNumBytes(); lastblock = TestInterDatanodeProtocol.GetLastLocatedBlock(dfs.dfs.GetNamenode(), filestr).GetBlock(); long currentGS = lastblock.GetGenerationStamp(); for (int i_2 = 0; i_2 < ReplicationNum; i_2++) { updatedmetainfo[i_2] = DataNodeTestUtils.GetFSDataset(datanodes[i_2]).GetStoredBlock (lastblock.GetBlockPoolId(), lastblock.GetBlockId()); NUnit.Framework.Assert.AreEqual(lastblock.GetBlockId(), updatedmetainfo[i_2].GetBlockId ()); NUnit.Framework.Assert.AreEqual(oldSize, updatedmetainfo[i_2].GetNumBytes()); NUnit.Framework.Assert.AreEqual(currentGS, updatedmetainfo[i_2].GetGenerationStamp ()); } // verify that lease recovery does not occur when namenode is in safemode System.Console.Out.WriteLine("Testing that lease recovery cannot happen during safemode." ); filestr = "/foo.safemode"; filepath = new Path(filestr); dfs.Create(filepath, (short)1); cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter, false); NUnit.Framework.Assert.IsTrue(dfs.dfs.Exists(filestr)); DFSTestUtil.WaitReplication(dfs, filepath, (short)1); WaitLeaseRecovery(cluster); // verify that we still cannot recover the lease LeaseManager lm = NameNodeAdapter.GetLeaseManager(cluster.GetNamesystem()); NUnit.Framework.Assert.IsTrue("Found " + lm.CountLease() + " lease, expected 1", lm.CountLease() == 1); cluster.GetNameNodeRpc().SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave, false); }
public virtual void TestUpdateReplicaUnderRecovery() { MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); cluster.WaitActive(); string bpid = cluster.GetNamesystem().GetBlockPoolId(); //create a file DistributedFileSystem dfs = cluster.GetFileSystem(); string filestr = "/foo"; Path filepath = new Path(filestr); DFSTestUtil.CreateFile(dfs, filepath, 1024L, (short)3, 0L); //get block info LocatedBlock locatedblock = GetLastLocatedBlock(DFSClientAdapter.GetDFSClient(dfs ).GetNamenode(), filestr); DatanodeInfo[] datanodeinfo = locatedblock.GetLocations(); NUnit.Framework.Assert.IsTrue(datanodeinfo.Length > 0); //get DataNode and FSDataset objects DataNode datanode = cluster.GetDataNode(datanodeinfo[0].GetIpcPort()); NUnit.Framework.Assert.IsTrue(datanode != null); //initReplicaRecovery ExtendedBlock b = locatedblock.GetBlock(); long recoveryid = b.GetGenerationStamp() + 1; long newlength = b.GetNumBytes() - 1; FsDatasetSpi <object> fsdataset = DataNodeTestUtils.GetFSDataset(datanode); ReplicaRecoveryInfo rri = fsdataset.InitReplicaRecovery(new BlockRecoveryCommand.RecoveringBlock (b, null, recoveryid)); //check replica ReplicaInfo replica = FsDatasetTestUtil.FetchReplicaInfo(fsdataset, bpid, b.GetBlockId ()); NUnit.Framework.Assert.AreEqual(HdfsServerConstants.ReplicaState.Rur, replica.GetState ()); //check meta data before update FsDatasetImpl.CheckReplicaFiles(replica); { //case "THIS IS NOT SUPPOSED TO HAPPEN" //with (block length) != (stored replica's on disk length). //create a block with same id and gs but different length. ExtendedBlock tmp = new ExtendedBlock(b.GetBlockPoolId(), rri.GetBlockId(), rri.GetNumBytes () - 1, rri.GetGenerationStamp()); try { //update should fail fsdataset.UpdateReplicaUnderRecovery(tmp, recoveryid, tmp.GetBlockId(), newlength ); NUnit.Framework.Assert.Fail(); } catch (IOException ioe) { System.Console.Out.WriteLine("GOOD: getting " + ioe); } } //update string storageID = fsdataset.UpdateReplicaUnderRecovery(new ExtendedBlock(b.GetBlockPoolId (), rri), recoveryid, rri.GetBlockId(), newlength); NUnit.Framework.Assert.IsTrue(storageID != null); } finally { if (cluster != null) { cluster.Shutdown(); } } }