public virtual void TestStartup() { Configuration conf = new Configuration(); HAUtil.SetAllowStandbyReads(conf, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NnTopology(MiniDFSNNTopology .SimpleHATopology()).NumDataNodes(0).Build(); try { // During HA startup, both nodes should be in // standby and we shouldn't have any edits files // in any edits directory! IList <URI> allDirs = Lists.NewArrayList(); Sharpen.Collections.AddAll(allDirs, cluster.GetNameDirs(0)); Sharpen.Collections.AddAll(allDirs, cluster.GetNameDirs(1)); allDirs.AddItem(cluster.GetSharedEditsDir(0, 1)); AssertNoEditFiles(allDirs); // Set the first NN to active, make sure it creates edits // in its own dirs and the shared dir. The standby // should still have no edits! cluster.TransitionToActive(0); AssertEditFiles(cluster.GetNameDirs(0), NNStorage.GetInProgressEditsFileName(1)); AssertEditFiles(Sharpen.Collections.SingletonList(cluster.GetSharedEditsDir(0, 1) ), NNStorage.GetInProgressEditsFileName(1)); AssertNoEditFiles(cluster.GetNameDirs(1)); cluster.GetNameNode(0).GetRpcServer().Mkdirs("/test", FsPermission.CreateImmutable ((short)0x1ed), true); // Restarting the standby should not finalize any edits files // in the shared directory when it starts up! cluster.RestartNameNode(1); AssertEditFiles(cluster.GetNameDirs(0), NNStorage.GetInProgressEditsFileName(1)); AssertEditFiles(Sharpen.Collections.SingletonList(cluster.GetSharedEditsDir(0, 1) ), NNStorage.GetInProgressEditsFileName(1)); AssertNoEditFiles(cluster.GetNameDirs(1)); // Additionally it should not have applied any in-progress logs // at start-up -- otherwise, it would have read half-way into // the current log segment, and on the next roll, it would have to // either replay starting in the middle of the segment (not allowed) // or double-replay the edits (incorrect). NUnit.Framework.Assert.IsNull(NameNodeAdapter.GetFileInfo(cluster.GetNameNode(1), "/test", true)); cluster.GetNameNode(0).GetRpcServer().Mkdirs("/test2", FsPermission.CreateImmutable ((short)0x1ed), true); // If we restart NN0, it'll come back as standby, and we can // transition NN1 to active and make sure it reads edits correctly at this point. cluster.RestartNameNode(0); cluster.TransitionToActive(1); // NN1 should have both the edits that came before its restart, and the edits that // came after its restart. NUnit.Framework.Assert.IsNotNull(NameNodeAdapter.GetFileInfo(cluster.GetNameNode( 1), "/test", true)); NUnit.Framework.Assert.IsNotNull(NameNodeAdapter.GetFileInfo(cluster.GetNameNode( 1), "/test2", true)); } finally { cluster.Shutdown(); } }
/// <summary>Tests modifying xattrs on a directory that has been snapshotted</summary> /// <exception cref="System.Exception"/> public virtual void TestModifyReadsCurrentState() { // Init FileSystem.Mkdirs(hdfs, path, FsPermission.CreateImmutable((short)0x1c0)); SnapshotTestHelper.CreateSnapshot(hdfs, path, snapshotName); hdfs.SetXAttr(path, name1, value1); hdfs.SetXAttr(path, name2, value2); // Verify that current path reflects xattrs, snapshot doesn't IDictionary <string, byte[]> xattrs = hdfs.GetXAttrs(path); NUnit.Framework.Assert.AreEqual(xattrs.Count, 2); Assert.AssertArrayEquals(value1, xattrs[name1]); Assert.AssertArrayEquals(value2, xattrs[name2]); xattrs = hdfs.GetXAttrs(snapshotPath); NUnit.Framework.Assert.AreEqual(xattrs.Count, 0); // Modify each xattr and make sure it's reflected hdfs.SetXAttr(path, name1, value2, EnumSet.Of(XAttrSetFlag.Replace)); xattrs = hdfs.GetXAttrs(path); NUnit.Framework.Assert.AreEqual(xattrs.Count, 2); Assert.AssertArrayEquals(value2, xattrs[name1]); Assert.AssertArrayEquals(value2, xattrs[name2]); hdfs.SetXAttr(path, name2, value1, EnumSet.Of(XAttrSetFlag.Replace)); xattrs = hdfs.GetXAttrs(path); NUnit.Framework.Assert.AreEqual(xattrs.Count, 2); Assert.AssertArrayEquals(value2, xattrs[name1]); Assert.AssertArrayEquals(value1, xattrs[name2]); // Paranoia checks xattrs = hdfs.GetXAttrs(snapshotPath); NUnit.Framework.Assert.AreEqual(xattrs.Count, 0); hdfs.RemoveXAttr(path, name1); hdfs.RemoveXAttr(path, name2); xattrs = hdfs.GetXAttrs(path); NUnit.Framework.Assert.AreEqual(xattrs.Count, 0); }
public virtual void TestSetXAttrSnapshotPath() { FileSystem.Mkdirs(hdfs, path, FsPermission.CreateImmutable((short)0x1c0)); SnapshotTestHelper.CreateSnapshot(hdfs, path, snapshotName); exception.Expect(typeof(SnapshotAccessControlException)); hdfs.SetXAttr(snapshotPath, name1, value1); }
public virtual void TestXAttrForSnapshotRootAfterRemove() { FileSystem.Mkdirs(hdfs, path, FsPermission.CreateImmutable((short)0x1c0)); hdfs.SetXAttr(path, name1, value1); hdfs.SetXAttr(path, name2, value2); SnapshotTestHelper.CreateSnapshot(hdfs, path, snapshotName); // Both original and snapshot have same XAttrs. IDictionary <string, byte[]> xattrs = hdfs.GetXAttrs(path); NUnit.Framework.Assert.AreEqual(xattrs.Count, 2); Assert.AssertArrayEquals(value1, xattrs[name1]); Assert.AssertArrayEquals(value2, xattrs[name2]); xattrs = hdfs.GetXAttrs(snapshotPath); NUnit.Framework.Assert.AreEqual(xattrs.Count, 2); Assert.AssertArrayEquals(value1, xattrs[name1]); Assert.AssertArrayEquals(value2, xattrs[name2]); // Original XAttrs have been removed, but snapshot still has old XAttrs. hdfs.RemoveXAttr(path, name1); hdfs.RemoveXAttr(path, name2); DoSnapshotRootRemovalAssertions(path, snapshotPath); Restart(false); DoSnapshotRootRemovalAssertions(path, snapshotPath); Restart(true); DoSnapshotRootRemovalAssertions(path, snapshotPath); }
public override void SetUp() { ++pathCount; path = new Path("/p" + pathCount); file = new Path(path, "file"); FileSystem.Mkdirs(fs, path, FsPermission.CreateImmutable((short)0x1c0)); base.SetUp(); }
public virtual void TestSuccessiveSnapshotXAttrChanges() { // First snapshot FileSystem.Mkdirs(hdfs, path, FsPermission.CreateImmutable((short)0x1c0)); hdfs.SetXAttr(path, name1, value1); SnapshotTestHelper.CreateSnapshot(hdfs, path, snapshotName); IDictionary <string, byte[]> xattrs = hdfs.GetXAttrs(snapshotPath); NUnit.Framework.Assert.AreEqual(1, xattrs.Count); Assert.AssertArrayEquals(value1, xattrs[name1]); // Second snapshot hdfs.SetXAttr(path, name1, newValue1); hdfs.SetXAttr(path, name2, value2); SnapshotTestHelper.CreateSnapshot(hdfs, path, snapshotName2); xattrs = hdfs.GetXAttrs(snapshotPath2); NUnit.Framework.Assert.AreEqual(2, xattrs.Count); Assert.AssertArrayEquals(newValue1, xattrs[name1]); Assert.AssertArrayEquals(value2, xattrs[name2]); // Third snapshot hdfs.SetXAttr(path, name1, value1); hdfs.RemoveXAttr(path, name2); SnapshotTestHelper.CreateSnapshot(hdfs, path, snapshotName3); xattrs = hdfs.GetXAttrs(snapshotPath3); NUnit.Framework.Assert.AreEqual(1, xattrs.Count); Assert.AssertArrayEquals(value1, xattrs[name1]); // Check that the first and second snapshots' // XAttrs have stayed constant xattrs = hdfs.GetXAttrs(snapshotPath); NUnit.Framework.Assert.AreEqual(1, xattrs.Count); Assert.AssertArrayEquals(value1, xattrs[name1]); xattrs = hdfs.GetXAttrs(snapshotPath2); NUnit.Framework.Assert.AreEqual(2, xattrs.Count); Assert.AssertArrayEquals(newValue1, xattrs[name1]); Assert.AssertArrayEquals(value2, xattrs[name2]); // Remove the second snapshot and verify the first and // third snapshots' XAttrs have stayed constant hdfs.DeleteSnapshot(path, snapshotName2); xattrs = hdfs.GetXAttrs(snapshotPath); NUnit.Framework.Assert.AreEqual(1, xattrs.Count); Assert.AssertArrayEquals(value1, xattrs[name1]); xattrs = hdfs.GetXAttrs(snapshotPath3); NUnit.Framework.Assert.AreEqual(1, xattrs.Count); Assert.AssertArrayEquals(value1, xattrs[name1]); hdfs.DeleteSnapshot(path, snapshotName); hdfs.DeleteSnapshot(path, snapshotName3); }
/// <summary>Test that users can copy a snapshot while preserving its xattrs.</summary> /// <exception cref="System.Exception"/> public virtual void TestCopySnapshotShouldPreserveXAttrs() { FileSystem.Mkdirs(hdfs, path, FsPermission.CreateImmutable((short)0x1c0)); hdfs.SetXAttr(path, name1, value1); hdfs.SetXAttr(path, name2, value2); SnapshotTestHelper.CreateSnapshot(hdfs, path, snapshotName); Path snapshotCopy = new Path(path.ToString() + "-copy"); string[] argv = new string[] { "-cp", "-px", snapshotPath.ToUri().ToString(), snapshotCopy .ToUri().ToString() }; int ret = ToolRunner.Run(new FsShell(conf), argv); NUnit.Framework.Assert.AreEqual("cp -px is not working on a snapshot", Success, ret ); IDictionary <string, byte[]> xattrs = hdfs.GetXAttrs(snapshotCopy); Assert.AssertArrayEquals(value1, xattrs[name1]); Assert.AssertArrayEquals(value2, xattrs[name2]); }
/// <summary> /// Create an aborted in-progress log in the given directory, containing /// only a specified number of "mkdirs" operations. /// </summary> /// <exception cref="System.IO.IOException"/> public static void CreateAbortedLogWithMkdirs(FilePath editsLogDir, int numDirs, long firstTxId, long newInodeId) { FSEditLog editLog = FSImageTestUtil.CreateStandaloneEditLog(editsLogDir); editLog.SetNextTxId(firstTxId); editLog.OpenForWrite(); PermissionStatus perms = PermissionStatus.CreateImmutable("fakeuser", "fakegroup" , FsPermission.CreateImmutable((short)0x1ed)); for (int i = 1; i <= numDirs; i++) { string dirName = "dir" + i; INodeDirectory dir = new INodeDirectory(newInodeId + i - 1, DFSUtil.String2Bytes( dirName), perms, 0L); editLog.LogMkDir("/" + dirName, dir); } editLog.LogSync(); editLog.AbortCurrentLogSegment(); }
/// <exception cref="System.IO.IOException"/> private static FSNamesystem SetupFileSystem() { Configuration conf = new Configuration(); conf.SetLong(DFSConfigKeys.DfsNamenodeAccesstimePrecisionKey, 1L); FSEditLog editlog = Org.Mockito.Mockito.Mock <FSEditLog>(); FSImage image = Org.Mockito.Mockito.Mock <FSImage>(); Org.Mockito.Mockito.When(image.GetEditLog()).ThenReturn(editlog); FSNamesystem fsn = new FSNamesystem(conf, image, true); FSDirectory fsd = fsn.GetFSDirectory(); INodesInPath iip = fsd.GetINodesInPath("/", true); PermissionStatus perm = new PermissionStatus("hdfs", "supergroup", FsPermission.CreateImmutable ((short)unchecked ((int)(0x1ff)))); INodeFile file = new INodeFile(MockInodeId, Sharpen.Runtime.GetBytesForString(FileName , Charsets.Utf8), perm, 1, 1, new BlockInfoContiguous[] { }, (short)1, DFSConfigKeys .DfsBlockSizeDefault); fsn.GetFSDirectory().AddINode(iip, file); return(fsn); }
/// <summary>Tests removing xattrs on a directory that has been snapshotted</summary> /// <exception cref="System.Exception"/> public virtual void TestRemoveReadsCurrentState() { // Init FileSystem.Mkdirs(hdfs, path, FsPermission.CreateImmutable((short)0x1c0)); SnapshotTestHelper.CreateSnapshot(hdfs, path, snapshotName); hdfs.SetXAttr(path, name1, value1); hdfs.SetXAttr(path, name2, value2); // Verify that current path reflects xattrs, snapshot doesn't IDictionary <string, byte[]> xattrs = hdfs.GetXAttrs(path); NUnit.Framework.Assert.AreEqual(xattrs.Count, 2); Assert.AssertArrayEquals(value1, xattrs[name1]); Assert.AssertArrayEquals(value2, xattrs[name2]); xattrs = hdfs.GetXAttrs(snapshotPath); NUnit.Framework.Assert.AreEqual(xattrs.Count, 0); // Remove xattrs and verify one-by-one hdfs.RemoveXAttr(path, name2); xattrs = hdfs.GetXAttrs(path); NUnit.Framework.Assert.AreEqual(xattrs.Count, 1); Assert.AssertArrayEquals(value1, xattrs[name1]); hdfs.RemoveXAttr(path, name1); xattrs = hdfs.GetXAttrs(path); NUnit.Framework.Assert.AreEqual(xattrs.Count, 0); }
public virtual void TestCreate() { Configuration conf = new HdfsConfiguration(); conf.SetBoolean(DFSConfigKeys.DfsPermissionsEnabledKey, true); conf.Set(FsPermission.UmaskLabel, "000"); MiniDFSCluster cluster = null; FileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(3).Build(); cluster.WaitActive(); fs = FileSystem.Get(conf); FsPermission rootPerm = CheckPermission(fs, "/", null); FsPermission inheritPerm = FsPermission.CreateImmutable((short)(rootPerm.ToShort( ) | 0xc0)); FsPermission dirPerm = new FsPermission((short)0x1ff); fs.Mkdirs(new Path("/a1/a2/a3"), dirPerm); CheckPermission(fs, "/a1", dirPerm); CheckPermission(fs, "/a1/a2", dirPerm); CheckPermission(fs, "/a1/a2/a3", dirPerm); dirPerm = new FsPermission((short)0x53); FsPermission permission = FsPermission.CreateImmutable((short)(dirPerm.ToShort() | 0xc0)); fs.Mkdirs(new Path("/aa/1/aa/2/aa/3"), dirPerm); CheckPermission(fs, "/aa/1", permission); CheckPermission(fs, "/aa/1/aa/2", permission); CheckPermission(fs, "/aa/1/aa/2/aa/3", dirPerm); FsPermission filePerm = new FsPermission((short)0x124); Path p = new Path("/b1/b2/b3.txt"); FSDataOutputStream @out = fs.Create(p, filePerm, true, conf.GetInt(CommonConfigurationKeys .IoFileBufferSizeKey, 4096), fs.GetDefaultReplication(p), fs.GetDefaultBlockSize (p), null); @out.Write(123); @out.Close(); CheckPermission(fs, "/b1", inheritPerm); CheckPermission(fs, "/b1/b2", inheritPerm); CheckPermission(fs, "/b1/b2/b3.txt", filePerm); conf.Set(FsPermission.UmaskLabel, "022"); permission = FsPermission.CreateImmutable((short)0x1b6); FileSystem.Mkdirs(fs, new Path("/c1"), new FsPermission(permission)); FileSystem.Create(fs, new Path("/c1/c2.txt"), new FsPermission(permission)); CheckPermission(fs, "/c1", permission); CheckPermission(fs, "/c1/c2.txt", permission); } finally { try { if (fs != null) { fs.Close(); } } catch (Exception e) { Log.Error(StringUtils.StringifyException(e)); } try { if (cluster != null) { cluster.Shutdown(); } } catch (Exception e) { Log.Error(StringUtils.StringifyException(e)); } } }
/// <summary> /// Test /// <see cref="Snapshot.IdComparator"/> /// . /// </summary> public virtual void TestIdCmp() { PermissionStatus perm = PermissionStatus.CreateImmutable("user", "group", FsPermission .CreateImmutable((short)0)); INodeDirectory snapshottable = new INodeDirectory(0, DFSUtil.String2Bytes("foo"), perm, 0L); snapshottable.AddSnapshottableFeature(); Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot[] snapshots = new Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot [] { new Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot(1, "s1", snapshottable ), new Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot(1, "s1", snapshottable ), new Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot(2, "s2", snapshottable ), new Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot(2, "s2", snapshottable ) }; NUnit.Framework.Assert.AreEqual(0, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .IdComparator.Compare(null, null)); foreach (Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s in snapshots) { NUnit.Framework.Assert.IsTrue(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .IdComparator.Compare(null, s) > 0); NUnit.Framework.Assert.IsTrue(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .IdComparator.Compare(s, null) < 0); foreach (Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot t in snapshots) { int expected = string.CompareOrdinal(s.GetRoot().GetLocalName(), t.GetRoot().GetLocalName ()); int computed = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.IdComparator .Compare(s, t); NUnit.Framework.Assert.AreEqual(expected > 0, computed > 0); NUnit.Framework.Assert.AreEqual(expected == 0, computed == 0); NUnit.Framework.Assert.AreEqual(expected < 0, computed < 0); } } }
/// <exception cref="System.Exception"/> private void TestReadAcontainerLog(bool logUploadedTime) { Configuration conf = new Configuration(); FilePath workDir = new FilePath(testWorkDir, "testReadAcontainerLogs1"); Path remoteAppLogFile = new Path(workDir.GetAbsolutePath(), "aggregatedLogFile"); Path srcFileRoot = new Path(workDir.GetAbsolutePath(), "srcFiles"); ContainerId testContainerId = TestContainerId.NewContainerId(1, 1, 1, 1); Path t = new Path(srcFileRoot, testContainerId.GetApplicationAttemptId().GetApplicationId ().ToString()); Path srcFilePath = new Path(t, testContainerId.ToString()); int numChars = 80000; // create a sub-folder under srcFilePath // and create file logs in this sub-folder. // We only aggregate top level files. // So, this log file should be ignored. Path subDir = new Path(srcFilePath, "subDir"); fs.Mkdirs(subDir); WriteSrcFile(subDir, "logs", numChars); // create file stderr and stdout in containerLogDir WriteSrcFile(srcFilePath, "stderr", numChars); WriteSrcFile(srcFilePath, "stdout", numChars); UserGroupInformation ugi = UserGroupInformation.GetCurrentUser(); AggregatedLogFormat.LogWriter logWriter = new AggregatedLogFormat.LogWriter(conf, remoteAppLogFile, ugi); AggregatedLogFormat.LogKey logKey = new AggregatedLogFormat.LogKey(testContainerId ); AggregatedLogFormat.LogValue logValue = new AggregatedLogFormat.LogValue(Collections .SingletonList(srcFileRoot.ToString()), testContainerId, ugi.GetShortUserName()); // When we try to open FileInputStream for stderr, it will throw out an IOException. // Skip the log aggregation for stderr. AggregatedLogFormat.LogValue spyLogValue = Org.Mockito.Mockito.Spy(logValue); FilePath errorFile = new FilePath((new Path(srcFilePath, "stderr")).ToString()); Org.Mockito.Mockito.DoThrow(new IOException("Mock can not open FileInputStream")) .When(spyLogValue).SecureOpenFile(errorFile); logWriter.Append(logKey, spyLogValue); logWriter.Close(); // make sure permission are correct on the file FileStatus fsStatus = fs.GetFileStatus(remoteAppLogFile); NUnit.Framework.Assert.AreEqual("permissions on log aggregation file are wrong", FsPermission.CreateImmutable((short)0x1a0), fsStatus.GetPermission()); AggregatedLogFormat.LogReader logReader = new AggregatedLogFormat.LogReader(conf, remoteAppLogFile); AggregatedLogFormat.LogKey rLogKey = new AggregatedLogFormat.LogKey(); DataInputStream dis = logReader.Next(rLogKey); TextWriter writer = new StringWriter(); if (logUploadedTime) { AggregatedLogFormat.LogReader.ReadAcontainerLogs(dis, writer, Runtime.CurrentTimeMillis ()); } else { AggregatedLogFormat.LogReader.ReadAcontainerLogs(dis, writer); } // We should only do the log aggregation for stdout. // Since we could not open the fileInputStream for stderr, this file is not // aggregated. string s = writer.ToString(); int expectedLength = "LogType:stdout".Length + (logUploadedTime ? ("\nLog Upload Time:" + Times.Format(Runtime.CurrentTimeMillis())).Length : 0) + ("\nLogLength:" + numChars ).Length + "\nLog Contents:\n".Length + numChars + "\n".Length + "End of LogType:stdout\n" .Length; NUnit.Framework.Assert.IsTrue("LogType not matched", s.Contains("LogType:stdout") ); NUnit.Framework.Assert.IsTrue("log file:stderr should not be aggregated.", !s.Contains ("LogType:stderr")); NUnit.Framework.Assert.IsTrue("log file:logs should not be aggregated.", !s.Contains ("LogType:logs")); NUnit.Framework.Assert.IsTrue("LogLength not matched", s.Contains("LogLength:" + numChars)); NUnit.Framework.Assert.IsTrue("Log Contents not matched", s.Contains("Log Contents" )); StringBuilder sb = new StringBuilder(); for (int i = 0; i < numChars; i++) { sb.Append(filler); } string expectedContent = sb.ToString(); NUnit.Framework.Assert.IsTrue("Log content incorrect", s.Contains(expectedContent )); NUnit.Framework.Assert.AreEqual(expectedLength, s.Length); }
/// <exception cref="System.IO.IOException"/> private static INodeFile CreateINodeFile(INodeDirectory parent, string name, string owner, string group, short perm) { PermissionStatus permStatus = PermissionStatus.CreateImmutable(owner, group, FsPermission .CreateImmutable(perm)); INodeFile inodeFile = new INodeFile(INodeId.GrandfatherInodeId, Sharpen.Runtime.GetBytesForString (name, "UTF-8"), permStatus, 0L, 0L, null, Replication, PreferredBlockSize, unchecked ( (byte)0)); parent.AddChild(inodeFile); return(inodeFile); }
/// <exception cref="System.IO.IOException"/> private static INodeDirectory CreateINodeDirectory(INodeDirectory parent, string name, string owner, string group, short perm) { PermissionStatus permStatus = PermissionStatus.CreateImmutable(owner, group, FsPermission .CreateImmutable(perm)); INodeDirectory inodeDirectory = new INodeDirectory(INodeId.GrandfatherInodeId, Sharpen.Runtime.GetBytesForString (name, "UTF-8"), permStatus, 0L); parent.AddChild(inodeDirectory); return(inodeDirectory); }