public virtual void TestDatanodeReport() { conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 500); // 0.5s conf.SetLong(DFSConfigKeys.DfsHeartbeatIntervalKey, 1L); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(NumOfDatanodes ).Build(); try { //wait until the cluster is up cluster.WaitActive(); string bpid = cluster.GetNamesystem().GetBlockPoolId(); IList <DataNode> datanodes = cluster.GetDataNodes(); DFSClient client = cluster.GetFileSystem().dfs; AssertReports(NumOfDatanodes, HdfsConstants.DatanodeReportType.All, client, datanodes , bpid); AssertReports(NumOfDatanodes, HdfsConstants.DatanodeReportType.Live, client, datanodes , bpid); AssertReports(0, HdfsConstants.DatanodeReportType.Dead, client, datanodes, bpid); // bring down one datanode DataNode last = datanodes[datanodes.Count - 1]; Log.Info("XXX shutdown datanode " + last.GetDatanodeUuid()); last.Shutdown(); DatanodeInfo[] nodeInfo = client.DatanodeReport(HdfsConstants.DatanodeReportType. Dead); while (nodeInfo.Length != 1) { try { Sharpen.Thread.Sleep(500); } catch (Exception) { } nodeInfo = client.DatanodeReport(HdfsConstants.DatanodeReportType.Dead); } AssertReports(NumOfDatanodes, HdfsConstants.DatanodeReportType.All, client, datanodes , null); AssertReports(NumOfDatanodes - 1, HdfsConstants.DatanodeReportType.Live, client, datanodes, null); AssertReports(1, HdfsConstants.DatanodeReportType.Dead, client, datanodes, null); Sharpen.Thread.Sleep(5000); MetricsAsserts.AssertGauge("ExpiredHeartbeats", 1, MetricsAsserts.GetMetrics("FSNamesystem" )); } finally { cluster.Shutdown(); } }
public virtual void TestChangeStorageID() { string DnIpAddr = "127.0.0.1"; string DnHostname = "localhost"; int DnXferPort = 12345; int DnInfoPort = 12346; int DnInfoSecurePort = 12347; int DnIpcPort = 12348; Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(0).Build(); IPEndPoint addr = new IPEndPoint("localhost", cluster.GetNameNodePort()); DFSClient client = new DFSClient(addr, conf); NamenodeProtocols rpcServer = cluster.GetNameNodeRpc(); // register a datanode DatanodeID dnId = new DatanodeID(DnIpAddr, DnHostname, "fake-datanode-id", DnXferPort , DnInfoPort, DnInfoSecurePort, DnIpcPort); long nnCTime = cluster.GetNamesystem().GetFSImage().GetStorage().GetCTime(); StorageInfo mockStorageInfo = Org.Mockito.Mockito.Mock <StorageInfo>(); Org.Mockito.Mockito.DoReturn(nnCTime).When(mockStorageInfo).GetCTime(); Org.Mockito.Mockito.DoReturn(HdfsConstants.DatanodeLayoutVersion).When(mockStorageInfo ).GetLayoutVersion(); DatanodeRegistration dnReg = new DatanodeRegistration(dnId, mockStorageInfo, null , VersionInfo.GetVersion()); rpcServer.RegisterDatanode(dnReg); DatanodeInfo[] report = client.DatanodeReport(HdfsConstants.DatanodeReportType.All ); NUnit.Framework.Assert.AreEqual("Expected a registered datanode", 1, report.Length ); // register the same datanode again with a different storage ID dnId = new DatanodeID(DnIpAddr, DnHostname, "changed-fake-datanode-id", DnXferPort , DnInfoPort, DnInfoSecurePort, DnIpcPort); dnReg = new DatanodeRegistration(dnId, mockStorageInfo, null, VersionInfo.GetVersion ()); rpcServer.RegisterDatanode(dnReg); report = client.DatanodeReport(HdfsConstants.DatanodeReportType.All); NUnit.Framework.Assert.AreEqual("Datanode with changed storage ID not recognized" , 1, report.Length); } finally { if (cluster != null) { cluster.Shutdown(); } } }
/// <exception cref="System.IO.IOException"/> internal static void AssertReports(int numDatanodes, HdfsConstants.DatanodeReportType type, DFSClient client, IList <DataNode> datanodes, string bpid) { DatanodeInfo[] infos = client.DatanodeReport(type); NUnit.Framework.Assert.AreEqual(numDatanodes, infos.Length); DatanodeStorageReport[] reports = client.GetDatanodeStorageReport(type); NUnit.Framework.Assert.AreEqual(numDatanodes, reports.Length); for (int i = 0; i < infos.Length; i++) { NUnit.Framework.Assert.AreEqual(infos[i], reports[i].GetDatanodeInfo()); DataNode d = FindDatanode(infos[i].GetDatanodeUuid(), datanodes); if (bpid != null) { //check storage StorageReport[] computed = reports[i].GetStorageReports(); Arrays.Sort(computed, Cmp); StorageReport[] expected = d.GetFSDataset().GetStorageReports(bpid); Arrays.Sort(expected, Cmp); NUnit.Framework.Assert.AreEqual(expected.Length, computed.Length); for (int j = 0; j < expected.Length; j++) { NUnit.Framework.Assert.AreEqual(expected[j].GetStorage().GetStorageID(), computed [j].GetStorage().GetStorageID()); } } } }
public virtual void TestChangeIpcPort() { HdfsConfiguration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).Build(); IPEndPoint addr = new IPEndPoint("localhost", cluster.GetNameNodePort()); DFSClient client = new DFSClient(addr, conf); // Restart datanodes cluster.RestartDataNodes(); // Wait until we get a heartbeat from the new datanode DatanodeInfo[] report = client.DatanodeReport(HdfsConstants.DatanodeReportType.All ); long firstUpdateAfterRestart = report[0].GetLastUpdate(); bool gotHeartbeat = false; for (int i = 0; i < 10 && !gotHeartbeat; i++) { try { Sharpen.Thread.Sleep(i * 1000); } catch (Exception) { } report = client.DatanodeReport(HdfsConstants.DatanodeReportType.All); gotHeartbeat = (report[0].GetLastUpdate() > firstUpdateAfterRestart); } if (!gotHeartbeat) { NUnit.Framework.Assert.Fail("Never got a heartbeat from restarted datanode."); } int realIpcPort = cluster.GetDataNodes()[0].GetIpcPort(); // Now make sure the reported IPC port is the correct one. NUnit.Framework.Assert.AreEqual(realIpcPort, report[0].GetIpcPort()); } finally { if (cluster != null) { cluster.Shutdown(); } } }
/* * Decommissions the node at the given index */ /// <exception cref="System.IO.IOException"/> private string DecommissionNode(FSNamesystem namesystem, DFSClient client, FileSystem localFileSys, int nodeIndex) { DatanodeInfo[] info = client.DatanodeReport(HdfsConstants.DatanodeReportType.Live ); string nodename = info[nodeIndex].GetXferAddr(); DecommissionNode(namesystem, localFileSys, nodename); return(nodename); }
/// <summary>Tests replication in DFS.</summary> /// <exception cref="System.IO.IOException"/> public virtual void RunReplication(bool simulated) { Configuration conf = new HdfsConfiguration(); conf.SetBoolean(DFSConfigKeys.DfsNamenodeReplicationConsiderloadKey, false); if (simulated) { SimulatedFSDataset.SetFactory(conf); } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes ).Racks(racks).Build(); cluster.WaitActive(); IPEndPoint addr = new IPEndPoint("localhost", cluster.GetNameNodePort()); DFSClient client = new DFSClient(addr, conf); DatanodeInfo[] info = client.DatanodeReport(HdfsConstants.DatanodeReportType.Live ); NUnit.Framework.Assert.AreEqual("Number of Datanodes ", numDatanodes, info.Length ); FileSystem fileSys = cluster.GetFileSystem(); try { Path file1 = new Path("/smallblocktest.dat"); WriteFile(fileSys, file1, 3); CheckFile(fileSys, file1, 3); CleanupFile(fileSys, file1); WriteFile(fileSys, file1, 10); CheckFile(fileSys, file1, 10); CleanupFile(fileSys, file1); WriteFile(fileSys, file1, 4); CheckFile(fileSys, file1, 4); CleanupFile(fileSys, file1); WriteFile(fileSys, file1, 1); CheckFile(fileSys, file1, 1); CleanupFile(fileSys, file1); WriteFile(fileSys, file1, 2); CheckFile(fileSys, file1, 2); CleanupFile(fileSys, file1); } finally { fileSys.Close(); cluster.Shutdown(); } }
public virtual void TestBlockReplacement() { Configuration Conf = new HdfsConfiguration(); string[] InitialRacks = new string[] { "/RACK0", "/RACK1", "/RACK2" }; string[] NewRacks = new string[] { "/RACK2" }; short ReplicationFactor = (short)3; int DefaultBlockSize = 1024; Random r = new Random(); Conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockSize); Conf.SetInt(DFSConfigKeys.DfsBytesPerChecksumKey, DefaultBlockSize / 2); Conf.SetLong(DFSConfigKeys.DfsBlockreportIntervalMsecKey, 500); cluster = new MiniDFSCluster.Builder(Conf).NumDataNodes(ReplicationFactor).Racks( InitialRacks).Build(); try { cluster.WaitActive(); FileSystem fs = cluster.GetFileSystem(); Path fileName = new Path("/tmp.txt"); // create a file with one block DFSTestUtil.CreateFile(fs, fileName, DefaultBlockSize, ReplicationFactor, r.NextLong ()); DFSTestUtil.WaitReplication(fs, fileName, ReplicationFactor); // get all datanodes IPEndPoint addr = new IPEndPoint("localhost", cluster.GetNameNodePort()); DFSClient client = new DFSClient(addr, Conf); IList <LocatedBlock> locatedBlocks = client.GetNamenode().GetBlockLocations("/tmp.txt" , 0, DefaultBlockSize).GetLocatedBlocks(); NUnit.Framework.Assert.AreEqual(1, locatedBlocks.Count); LocatedBlock block = locatedBlocks[0]; DatanodeInfo[] oldNodes = block.GetLocations(); NUnit.Framework.Assert.AreEqual(oldNodes.Length, 3); ExtendedBlock b = block.GetBlock(); // add a fourth datanode to the cluster cluster.StartDataNodes(Conf, 1, true, null, NewRacks); cluster.WaitActive(); DatanodeInfo[] datanodes = client.DatanodeReport(HdfsConstants.DatanodeReportType .All); // find out the new node DatanodeInfo newNode = null; foreach (DatanodeInfo node in datanodes) { bool isNewNode = true; foreach (DatanodeInfo oldNode in oldNodes) { if (node.Equals(oldNode)) { isNewNode = false; break; } } if (isNewNode) { newNode = node; break; } } NUnit.Framework.Assert.IsTrue(newNode != null); DatanodeInfo source = null; AList <DatanodeInfo> proxies = new AList <DatanodeInfo>(2); foreach (DatanodeInfo node_1 in datanodes) { if (node_1 != newNode) { if (node_1.GetNetworkLocation().Equals(newNode.GetNetworkLocation())) { source = node_1; } else { proxies.AddItem(node_1); } } } //current state: the newNode is on RACK2, and "source" is the other dn on RACK2. //the two datanodes on RACK0 and RACK1 are in "proxies". //"source" and both "proxies" all contain the block, while newNode doesn't yet. NUnit.Framework.Assert.IsTrue(source != null && proxies.Count == 2); // start to replace the block // case 1: proxySource does not contain the block Log.Info("Testcase 1: Proxy " + newNode + " does not contain the block " + b); NUnit.Framework.Assert.IsFalse(ReplaceBlock(b, source, newNode, proxies[0])); // case 2: destination already contains the block Log.Info("Testcase 2: Destination " + proxies[1] + " contains the block " + b); NUnit.Framework.Assert.IsFalse(ReplaceBlock(b, source, proxies[0], proxies[1])); // case 3: correct case Log.Info("Testcase 3: Source=" + source + " Proxy=" + proxies[0] + " Destination=" + newNode); NUnit.Framework.Assert.IsTrue(ReplaceBlock(b, source, proxies[0], newNode)); // after cluster has time to resolve the over-replication, // block locations should contain two proxies and newNode // but not source CheckBlocks(new DatanodeInfo[] { newNode, proxies[0], proxies[1] }, fileName.ToString (), DefaultBlockSize, ReplicationFactor, client); // case 4: proxies.get(0) is not a valid del hint // expect either source or newNode replica to be deleted instead Log.Info("Testcase 4: invalid del hint " + proxies[0]); NUnit.Framework.Assert.IsTrue(ReplaceBlock(b, proxies[0], proxies[1], source)); // after cluster has time to resolve the over-replication, // block locations should contain two proxies, // and either source or newNode, but not both. CheckBlocks(Sharpen.Collections.ToArray(proxies, new DatanodeInfo[proxies.Count]) , fileName.ToString(), DefaultBlockSize, ReplicationFactor, client); } finally { cluster.Shutdown(); } }
public virtual void TestDecommissionStatus() { IPEndPoint addr = new IPEndPoint("localhost", cluster.GetNameNodePort()); DFSClient client = new DFSClient(addr, conf); DatanodeInfo[] info = client.DatanodeReport(HdfsConstants.DatanodeReportType.Live ); NUnit.Framework.Assert.AreEqual("Number of Datanodes ", 2, info.Length); DistributedFileSystem fileSys = cluster.GetFileSystem(); DFSAdmin admin = new DFSAdmin(cluster.GetConfiguration(0)); short replicas = numDatanodes; // // Decommission one node. Verify the decommission status // Path file1 = new Path("decommission.dat"); WriteFile(fileSys, file1, replicas); Path file2 = new Path("decommission1.dat"); FSDataOutputStream st1 = WriteIncompleteFile(fileSys, file2, replicas); foreach (DataNode d in cluster.GetDataNodes()) { DataNodeTestUtils.TriggerBlockReport(d); } FSNamesystem fsn = cluster.GetNamesystem(); DatanodeManager dm = fsn.GetBlockManager().GetDatanodeManager(); for (int iteration = 0; iteration < numDatanodes; iteration++) { string downnode = DecommissionNode(fsn, client, localFileSys, iteration); dm.RefreshNodes(conf); decommissionedNodes.AddItem(downnode); BlockManagerTestUtil.RecheckDecommissionState(dm); IList <DatanodeDescriptor> decommissioningNodes = dm.GetDecommissioningNodes(); if (iteration == 0) { NUnit.Framework.Assert.AreEqual(decommissioningNodes.Count, 1); DatanodeDescriptor decommNode = decommissioningNodes[0]; CheckDecommissionStatus(decommNode, 3, 0, 1); CheckDFSAdminDecommissionStatus(decommissioningNodes.SubList(0, 1), fileSys, admin ); } else { NUnit.Framework.Assert.AreEqual(decommissioningNodes.Count, 2); DatanodeDescriptor decommNode1 = decommissioningNodes[0]; DatanodeDescriptor decommNode2 = decommissioningNodes[1]; // This one is still 3,3,1 since it passed over the UC block // earlier, before node 2 was decommed CheckDecommissionStatus(decommNode1, 3, 3, 1); // This one is 4,4,2 since it has the full state CheckDecommissionStatus(decommNode2, 4, 4, 2); CheckDFSAdminDecommissionStatus(decommissioningNodes.SubList(0, 2), fileSys, admin ); } } // Call refreshNodes on FSNamesystem with empty exclude file. // This will remove the datanodes from decommissioning list and // make them available again. WriteConfigFile(localFileSys, excludeFile, null); dm.RefreshNodes(conf); st1.Close(); CleanupFile(fileSys, file1); CleanupFile(fileSys, file2); }
public virtual void TestTimes() { Configuration conf = new HdfsConfiguration(); int MaxIdleTime = 2000; // 2s conf.SetInt("ipc.client.connection.maxidletime", MaxIdleTime); conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1000); conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes ).Build(); cluster.WaitActive(); int nnport = cluster.GetNameNodePort(); IPEndPoint addr = new IPEndPoint("localhost", cluster.GetNameNodePort()); DFSClient client = new DFSClient(addr, conf); DatanodeInfo[] info = client.DatanodeReport(HdfsConstants.DatanodeReportType.Live ); NUnit.Framework.Assert.AreEqual("Number of Datanodes ", numDatanodes, info.Length ); FileSystem fileSys = cluster.GetFileSystem(); int replicas = 1; NUnit.Framework.Assert.IsTrue(fileSys is DistributedFileSystem); try { // // create file and record atime/mtime // System.Console.Out.WriteLine("Creating testdir1 and testdir1/test1.dat."); Path dir1 = new Path("testdir1"); Path file1 = new Path(dir1, "test1.dat"); FSDataOutputStream stm = WriteFile(fileSys, file1, replicas); FileStatus stat = fileSys.GetFileStatus(file1); long atimeBeforeClose = stat.GetAccessTime(); string adate = dateForm.Format(Sharpen.Extensions.CreateDate(atimeBeforeClose)); System.Console.Out.WriteLine("atime on " + file1 + " before close is " + adate + " (" + atimeBeforeClose + ")"); NUnit.Framework.Assert.IsTrue(atimeBeforeClose != 0); stm.Close(); stat = fileSys.GetFileStatus(file1); long atime1 = stat.GetAccessTime(); long mtime1 = stat.GetModificationTime(); adate = dateForm.Format(Sharpen.Extensions.CreateDate(atime1)); string mdate = dateForm.Format(Sharpen.Extensions.CreateDate(mtime1)); System.Console.Out.WriteLine("atime on " + file1 + " is " + adate + " (" + atime1 + ")"); System.Console.Out.WriteLine("mtime on " + file1 + " is " + mdate + " (" + mtime1 + ")"); NUnit.Framework.Assert.IsTrue(atime1 != 0); // // record dir times // stat = fileSys.GetFileStatus(dir1); long mdir1 = stat.GetAccessTime(); NUnit.Framework.Assert.IsTrue(mdir1 == 0); // set the access time to be one day in the past long atime2 = atime1 - (24L * 3600L * 1000L); fileSys.SetTimes(file1, -1, atime2); // check new access time on file stat = fileSys.GetFileStatus(file1); long atime3 = stat.GetAccessTime(); string adate3 = dateForm.Format(Sharpen.Extensions.CreateDate(atime3)); System.Console.Out.WriteLine("new atime on " + file1 + " is " + adate3 + " (" + atime3 + ")"); NUnit.Framework.Assert.IsTrue(atime2 == atime3); NUnit.Framework.Assert.IsTrue(mtime1 == stat.GetModificationTime()); // set the modification time to be 1 hour in the past long mtime2 = mtime1 - (3600L * 1000L); fileSys.SetTimes(file1, mtime2, -1); // check new modification time on file stat = fileSys.GetFileStatus(file1); long mtime3 = stat.GetModificationTime(); string mdate3 = dateForm.Format(Sharpen.Extensions.CreateDate(mtime3)); System.Console.Out.WriteLine("new mtime on " + file1 + " is " + mdate3 + " (" + mtime3 + ")"); NUnit.Framework.Assert.IsTrue(atime2 == stat.GetAccessTime()); NUnit.Framework.Assert.IsTrue(mtime2 == mtime3); long mtime4 = Time.Now() - (3600L * 1000L); long atime4 = Time.Now(); fileSys.SetTimes(dir1, mtime4, atime4); // check new modification time on file stat = fileSys.GetFileStatus(dir1); NUnit.Framework.Assert.IsTrue("Not matching the modification times", mtime4 == stat .GetModificationTime()); NUnit.Framework.Assert.IsTrue("Not matching the access times", atime4 == stat.GetAccessTime ()); Path nonExistingDir = new Path(dir1, "/nonExistingDir/"); try { fileSys.SetTimes(nonExistingDir, mtime4, atime4); NUnit.Framework.Assert.Fail("Expecting FileNotFoundException"); } catch (FileNotFoundException e) { NUnit.Framework.Assert.IsTrue(e.Message.Contains("File/Directory " + nonExistingDir .ToString() + " does not exist.")); } // shutdown cluster and restart cluster.Shutdown(); try { Sharpen.Thread.Sleep(2 * MaxIdleTime); } catch (Exception) { } cluster = new MiniDFSCluster.Builder(conf).NameNodePort(nnport).Format(false).Build (); cluster.WaitActive(); fileSys = cluster.GetFileSystem(); // verify that access times and modification times persist after a // cluster restart. System.Console.Out.WriteLine("Verifying times after cluster restart"); stat = fileSys.GetFileStatus(file1); NUnit.Framework.Assert.IsTrue(atime2 == stat.GetAccessTime()); NUnit.Framework.Assert.IsTrue(mtime3 == stat.GetModificationTime()); CleanupFile(fileSys, file1); CleanupFile(fileSys, dir1); } catch (IOException e) { info = client.DatanodeReport(HdfsConstants.DatanodeReportType.All); PrintDatanodeReport(info); throw; } finally { fileSys.Close(); cluster.Shutdown(); } }
public virtual void TestTimesAtClose() { Configuration conf = new HdfsConfiguration(); int MaxIdleTime = 2000; // 2s int replicas = 1; // parameter initialization conf.SetInt("ipc.client.connection.maxidletime", MaxIdleTime); conf.SetInt(DFSConfigKeys.DfsNamenodeHeartbeatRecheckIntervalKey, 1000); conf.SetInt(DFSConfigKeys.DfsHeartbeatIntervalKey, 1); conf.SetInt(DFSConfigKeys.DfsDatanodeHandlerCountKey, 50); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes ).Build(); cluster.WaitActive(); IPEndPoint addr = new IPEndPoint("localhost", cluster.GetNameNodePort()); DFSClient client = new DFSClient(addr, conf); DatanodeInfo[] info = client.DatanodeReport(HdfsConstants.DatanodeReportType.Live ); NUnit.Framework.Assert.AreEqual("Number of Datanodes ", numDatanodes, info.Length ); FileSystem fileSys = cluster.GetFileSystem(); NUnit.Framework.Assert.IsTrue(fileSys is DistributedFileSystem); try { // create a new file and write to it Path file1 = new Path("/simple.dat"); FSDataOutputStream stm = WriteFile(fileSys, file1, replicas); System.Console.Out.WriteLine("Created and wrote file simple.dat"); FileStatus statBeforeClose = fileSys.GetFileStatus(file1); long mtimeBeforeClose = statBeforeClose.GetModificationTime(); string mdateBeforeClose = dateForm.Format(Sharpen.Extensions.CreateDate(mtimeBeforeClose )); System.Console.Out.WriteLine("mtime on " + file1 + " before close is " + mdateBeforeClose + " (" + mtimeBeforeClose + ")"); NUnit.Framework.Assert.IsTrue(mtimeBeforeClose != 0); //close file after writing stm.Close(); System.Console.Out.WriteLine("Closed file."); FileStatus statAfterClose = fileSys.GetFileStatus(file1); long mtimeAfterClose = statAfterClose.GetModificationTime(); string mdateAfterClose = dateForm.Format(Sharpen.Extensions.CreateDate(mtimeAfterClose )); System.Console.Out.WriteLine("mtime on " + file1 + " after close is " + mdateAfterClose + " (" + mtimeAfterClose + ")"); NUnit.Framework.Assert.IsTrue(mtimeAfterClose != 0); NUnit.Framework.Assert.IsTrue(mtimeBeforeClose != mtimeAfterClose); CleanupFile(fileSys, file1); } catch (IOException e) { info = client.DatanodeReport(HdfsConstants.DatanodeReportType.All); PrintDatanodeReport(info); throw; } finally { fileSys.Close(); cluster.Shutdown(); } }
public virtual void TestModTime() { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(numDatanodes ).Build(); cluster.WaitActive(); IPEndPoint addr = new IPEndPoint("localhost", cluster.GetNameNodePort()); DFSClient client = new DFSClient(addr, conf); DatanodeInfo[] info = client.DatanodeReport(HdfsConstants.DatanodeReportType.Live ); NUnit.Framework.Assert.AreEqual("Number of Datanodes ", numDatanodes, info.Length ); FileSystem fileSys = cluster.GetFileSystem(); int replicas = numDatanodes - 1; NUnit.Framework.Assert.IsTrue(fileSys is DistributedFileSystem); try { // // create file and record ctime and mtime of test file // System.Console.Out.WriteLine("Creating testdir1 and testdir1/test1.dat."); Path dir1 = new Path("testdir1"); Path file1 = new Path(dir1, "test1.dat"); WriteFile(fileSys, file1, replicas); FileStatus stat = fileSys.GetFileStatus(file1); long mtime1 = stat.GetModificationTime(); NUnit.Framework.Assert.IsTrue(mtime1 != 0); // // record dir times // stat = fileSys.GetFileStatus(dir1); long mdir1 = stat.GetModificationTime(); // // create second test file // System.Console.Out.WriteLine("Creating testdir1/test2.dat."); Path file2 = new Path(dir1, "test2.dat"); WriteFile(fileSys, file2, replicas); stat = fileSys.GetFileStatus(file2); // // verify that mod time of dir remains the same // as before. modification time of directory has increased. // stat = fileSys.GetFileStatus(dir1); NUnit.Framework.Assert.IsTrue(stat.GetModificationTime() >= mdir1); mdir1 = stat.GetModificationTime(); // // create another directory // Path dir2 = fileSys.MakeQualified(new Path("testdir2/")); System.Console.Out.WriteLine("Creating testdir2 " + dir2); NUnit.Framework.Assert.IsTrue(fileSys.Mkdirs(dir2)); stat = fileSys.GetFileStatus(dir2); long mdir2 = stat.GetModificationTime(); // // rename file1 from testdir into testdir2 // Path newfile = new Path(dir2, "testnew.dat"); System.Console.Out.WriteLine("Moving " + file1 + " to " + newfile); fileSys.Rename(file1, newfile); // // verify that modification time of file1 did not change. // stat = fileSys.GetFileStatus(newfile); NUnit.Framework.Assert.IsTrue(stat.GetModificationTime() == mtime1); // // verify that modification time of testdir1 and testdir2 // were changed. // stat = fileSys.GetFileStatus(dir1); NUnit.Framework.Assert.IsTrue(stat.GetModificationTime() != mdir1); mdir1 = stat.GetModificationTime(); stat = fileSys.GetFileStatus(dir2); NUnit.Framework.Assert.IsTrue(stat.GetModificationTime() != mdir2); mdir2 = stat.GetModificationTime(); // // delete newfile // System.Console.Out.WriteLine("Deleting testdir2/testnew.dat."); NUnit.Framework.Assert.IsTrue(fileSys.Delete(newfile, true)); // // verify that modification time of testdir1 has not changed. // stat = fileSys.GetFileStatus(dir1); NUnit.Framework.Assert.IsTrue(stat.GetModificationTime() == mdir1); // // verify that modification time of testdir2 has changed. // stat = fileSys.GetFileStatus(dir2); NUnit.Framework.Assert.IsTrue(stat.GetModificationTime() != mdir2); mdir2 = stat.GetModificationTime(); CleanupFile(fileSys, file2); CleanupFile(fileSys, dir1); CleanupFile(fileSys, dir2); } catch (IOException e) { info = client.DatanodeReport(HdfsConstants.DatanodeReportType.All); PrintDatanodeReport(info); throw; } finally { fileSys.Close(); cluster.Shutdown(); } }