/// <summary>Create a new encryption zone.</summary> /// <remarks> /// Create a new encryption zone. /// <p/> /// Called while holding the FSDirectory lock. /// </remarks> /// <exception cref="System.IO.IOException"/> internal virtual XAttr CreateEncryptionZone(string src, CipherSuite suite, CryptoProtocolVersion version, string keyName) { System.Diagnostics.Debug.Assert(dir.HasWriteLock()); INodesInPath srcIIP = dir.GetINodesInPath4Write(src, false); if (dir.IsNonEmptyDirectory(srcIIP)) { throw new IOException("Attempt to create an encryption zone for a non-empty directory." ); } if (srcIIP != null && srcIIP.GetLastINode() != null && !srcIIP.GetLastINode().IsDirectory ()) { throw new IOException("Attempt to create an encryption zone for a file."); } EncryptionZoneManager.EncryptionZoneInt ezi = GetEncryptionZoneForPath(srcIIP); if (ezi != null) { throw new IOException("Directory " + src + " is already in an " + "encryption zone. (" + GetFullPathName(ezi) + ")"); } HdfsProtos.ZoneEncryptionInfoProto proto = PBHelper.Convert(suite, version, keyName ); XAttr ezXAttr = XAttrHelper.BuildXAttr(HdfsServerConstants.CryptoXattrEncryptionZone , proto.ToByteArray()); IList <XAttr> xattrs = Lists.NewArrayListWithCapacity(1); xattrs.AddItem(ezXAttr); // updating the xattr will call addEncryptionZone, // done this way to handle edit log loading FSDirXAttrOp.UnprotectedSetXAttrs(dir, src, xattrs, EnumSet.Of(XAttrSetFlag.Create )); return(ezXAttr); }
/// <summary> /// For a given absolute path, create all ancestors as directories along the /// path. /// </summary> /// <remarks> /// For a given absolute path, create all ancestors as directories along the /// path. All ancestors inherit their parent's permission plus an implicit /// u+wx permission. This is used by create() and addSymlink() for /// implicitly creating all directories along the path. /// For example, path="/foo/bar/spam", "/foo" is an existing directory, /// "/foo/bar" is not existing yet, the function will create directory bar. /// </remarks> /// <returns> /// a tuple which contains both the new INodesInPath (with all the /// existing and newly created directories) and the last component in the /// relative path. Or return null if there are errors. /// </returns> /// <exception cref="System.IO.IOException"/> internal static KeyValuePair <INodesInPath, string> CreateAncestorDirectories(FSDirectory fsd, INodesInPath iip, PermissionStatus permission) { string last = new string(iip.GetLastLocalName(), Charsets.Utf8); INodesInPath existing = iip.GetExistingINodes(); IList <string> children = iip.GetPath(existing.Length(), iip.Length() - existing.Length ()); int size = children.Count; if (size > 1) { // otherwise all ancestors have been created IList <string> directories = children.SubList(0, size - 1); INode parentINode = existing.GetLastINode(); // Ensure that the user can traversal the path by adding implicit // u+wx permission to all ancestor directories existing = CreateChildrenDirectories(fsd, existing, directories, AddImplicitUwx(parentINode .GetPermissionStatus(), permission)); if (existing == null) { return(null); } } return(new AbstractMap.SimpleImmutableEntry <INodesInPath, string>(existing, last)); }
/// <summary>for normal (non-snapshot) file.</summary> /// <exception cref="System.Exception"/> public virtual void TestNonSnapshotPathINodes() { // Get the inodes by resolving the path of a normal file string[] names = INode.GetPathNames(file1.ToString()); byte[][] components = INode.GetPathComponents(names); INodesInPath nodesInPath = INodesInPath.Resolve(fsdir.rootDir, components, false); // The number of inodes should be equal to components.length NUnit.Framework.Assert.AreEqual(nodesInPath.Length(), components.Length); // The returned nodesInPath should be non-snapshot AssertSnapshot(nodesInPath, false, null, -1); // The last INode should be associated with file1 NUnit.Framework.Assert.IsTrue("file1=" + file1 + ", nodesInPath=" + nodesInPath, nodesInPath.GetINode(components.Length - 1) != null); NUnit.Framework.Assert.AreEqual(nodesInPath.GetINode(components.Length - 1).GetFullPathName (), file1.ToString()); NUnit.Framework.Assert.AreEqual(nodesInPath.GetINode(components.Length - 2).GetFullPathName (), sub1.ToString()); NUnit.Framework.Assert.AreEqual(nodesInPath.GetINode(components.Length - 3).GetFullPathName (), dir.ToString()); nodesInPath = INodesInPath.Resolve(fsdir.rootDir, components, false); NUnit.Framework.Assert.AreEqual(nodesInPath.Length(), components.Length); AssertSnapshot(nodesInPath, false, null, -1); NUnit.Framework.Assert.AreEqual(nodesInPath.GetLastINode().GetFullPathName(), file1 .ToString()); }
/// <exception cref="System.IO.IOException"/> private static ContentSummary GetContentSummaryInt(FSDirectory fsd, INodesInPath iip) { fsd.ReadLock(); try { INode targetNode = iip.GetLastINode(); if (targetNode == null) { throw new FileNotFoundException("File does not exist: " + iip.GetPath()); } else { // Make it relinquish locks everytime contentCountLimit entries are // processed. 0 means disabled. I.e. blocking for the entire duration. ContentSummaryComputationContext cscc = new ContentSummaryComputationContext(fsd, fsd.GetFSNamesystem(), fsd.GetContentCountLimit(), fsd.GetContentSleepMicroSec() ); ContentSummary cs = targetNode.ComputeAndConvertContentSummary(cscc); fsd.AddYieldCount(cscc.GetYieldCount()); return(cs); } } finally { fsd.ReadUnlock(); } }
/// <summary>Delete the target directory and collect the blocks under it</summary> /// <param name="iip">the INodesInPath instance containing all the INodes for the path /// </param> /// <param name="collectedBlocks">Blocks under the deleted directory</param> /// <param name="removedINodes">INodes that should be removed from inodeMap</param> /// <returns>the number of files that have been removed</returns> /// <exception cref="System.IO.IOException"/> internal static long Delete(FSDirectory fsd, INodesInPath iip, INode.BlocksMapUpdateInfo collectedBlocks, IList <INode> removedINodes, long mtime) { if (NameNode.stateChangeLog.IsDebugEnabled()) { NameNode.stateChangeLog.Debug("DIR* FSDirectory.delete: " + iip.GetPath()); } long filesRemoved; fsd.WriteLock(); try { if (!DeleteAllowed(iip, iip.GetPath())) { filesRemoved = -1; } else { IList <INodeDirectory> snapshottableDirs = new AList <INodeDirectory>(); FSDirSnapshotOp.CheckSnapshot(iip.GetLastINode(), snapshottableDirs); filesRemoved = UnprotectedDelete(fsd, iip, collectedBlocks, removedINodes, mtime); fsd.GetFSNamesystem().RemoveSnapshottableDirs(snapshottableDirs); } } finally { fsd.WriteUnlock(); } return(filesRemoved); }
/// <summary> /// Verify quota for rename operation where srcInodes[srcInodes.length-1] moves /// dstInodes[dstInodes.length-1] /// </summary> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> private static void VerifyQuotaForRename(FSDirectory fsd, INodesInPath src, INodesInPath dst) { if (!fsd.GetFSNamesystem().IsImageLoaded() || fsd.ShouldSkipQuotaChecks()) { // Do not check quota if edits log is still being processed return; } int i = 0; while (src.GetINode(i) == dst.GetINode(i)) { i++; } // src[i - 1] is the last common ancestor. BlockStoragePolicySuite bsps = fsd.GetBlockStoragePolicySuite(); QuotaCounts delta = src.GetLastINode().ComputeQuotaUsage(bsps); // Reduce the required quota by dst that is being removed INode dstINode = dst.GetLastINode(); if (dstINode != null) { delta.Subtract(dstINode.ComputeQuotaUsage(bsps)); } FSDirectory.VerifyQuota(dst, dst.Length() - 1, delta, src.GetINode(i - 1)); }
/// <summary> /// Delete a path from the name space /// Update the count at each ancestor directory with quota /// <br /> /// Note: This is to be used by /// <see cref="FSEditLog"/> /// only. /// <br /> /// </summary> /// <param name="src">a string representation of a path to an inode</param> /// <param name="mtime">the time the inode is removed</param> /// <exception cref="System.IO.IOException"/> internal static void DeleteForEditLog(FSDirectory fsd, string src, long mtime) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); FSNamesystem fsn = fsd.GetFSNamesystem(); INode.BlocksMapUpdateInfo collectedBlocks = new INode.BlocksMapUpdateInfo(); IList <INode> removedINodes = new ChunkedArrayList <INode>(); INodesInPath iip = fsd.GetINodesInPath4Write(FSDirectory.NormalizePath(src), false ); if (!DeleteAllowed(iip, src)) { return; } IList <INodeDirectory> snapshottableDirs = new AList <INodeDirectory>(); FSDirSnapshotOp.CheckSnapshot(iip.GetLastINode(), snapshottableDirs); long filesRemoved = UnprotectedDelete(fsd, iip, collectedBlocks, removedINodes, mtime ); fsn.RemoveSnapshottableDirs(snapshottableDirs); if (filesRemoved >= 0) { fsn.RemoveLeasesAndINodes(src, removedINodes, false); fsn.RemoveBlocksAndUpdateSafemodeTotal(collectedBlocks); } }
/// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> internal static bool UnprotectedSetTimes(FSDirectory fsd, string src, long mtime, long atime, bool force) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); INodesInPath i = fsd.GetINodesInPath(src, true); return(UnprotectedSetTimes(fsd, i.GetLastINode(), mtime, atime, force, i.GetLatestSnapshotId ())); }
/// <exception cref="System.IO.IOException"/> internal static DirectoryListing GetListingInt(FSDirectory fsd, string srcArg, byte [] startAfter, bool needLocation) { FSPermissionChecker pc = fsd.GetPermissionChecker(); byte[][] pathComponents = FSDirectory.GetPathComponentsForReservedPath(srcArg); string startAfterString = new string(startAfter, Charsets.Utf8); string src = fsd.ResolvePath(pc, srcArg, pathComponents); INodesInPath iip = fsd.GetINodesInPath(src, true); // Get file name when startAfter is an INodePath if (FSDirectory.IsReservedName(startAfterString)) { byte[][] startAfterComponents = FSDirectory.GetPathComponentsForReservedPath(startAfterString ); try { string tmp = FSDirectory.ResolvePath(src, startAfterComponents, fsd); byte[][] regularPath = INode.GetPathComponents(tmp); startAfter = regularPath[regularPath.Length - 1]; } catch (IOException) { // Possibly the inode is deleted throw new DirectoryListingStartAfterNotFoundException("Can't find startAfter " + startAfterString); } } bool isSuperUser = true; if (fsd.IsPermissionEnabled()) { if (iip.GetLastINode() != null && iip.GetLastINode().IsDirectory()) { fsd.CheckPathAccess(pc, iip, FsAction.ReadExecute); } else { fsd.CheckTraverse(pc, iip); } isSuperUser = pc.IsSuperUser(); } return(GetListing(fsd, iip, src, startAfter, needLocation, isSuperUser)); }
internal virtual bool AddSourceToDestination() { INode dstParent = dstParentIIP.GetLastINode(); byte[] dstChildName = dstIIP.GetLastLocalName(); INode toDst; if (withCount == null) { srcChild.SetLocalName(dstChildName); toDst = srcChild; } else { withCount.GetReferredINode().SetLocalName(dstChildName); toDst = new INodeReference.DstReference(dstParent.AsDirectory(), withCount, dstIIP .GetLatestSnapshotId()); } return(fsd.AddLastINodeNoQuotaCheck(dstParentIIP, toDst) != null); }
/// <summary>create a directory at path specified by parent</summary> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.AclException"/> /// <exception cref="Org.Apache.Hadoop.FS.FileAlreadyExistsException"/> private static INodesInPath UnprotectedMkdir(FSDirectory fsd, long inodeId, INodesInPath parent, byte[] name, PermissionStatus permission, IList <AclEntry> aclEntries, long timestamp) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); System.Diagnostics.Debug.Assert(parent.GetLastINode() != null); if (!parent.GetLastINode().IsDirectory()) { throw new FileAlreadyExistsException("Parent path is not a directory: " + parent. GetPath() + " " + DFSUtil.Bytes2String(name)); } INodeDirectory dir = new INodeDirectory(inodeId, name, permission, timestamp); INodesInPath iip = fsd.AddLastINode(parent, dir, true); if (iip != null && aclEntries != null) { AclStorage.UpdateINodeAcl(dir, aclEntries, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId); } return(iip); }
/// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.AclException"/> /// <exception cref="Org.Apache.Hadoop.FS.FileAlreadyExistsException"/> internal static void MkdirForEditLog(FSDirectory fsd, long inodeId, string src, PermissionStatus permissions, IList <AclEntry> aclEntries, long timestamp) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); INodesInPath iip = fsd.GetINodesInPath(src, false); byte[] localName = iip.GetLastLocalName(); INodesInPath existing = iip.GetParentINodesInPath(); Preconditions.CheckState(existing.GetLastINode() != null); UnprotectedMkdir(fsd, inodeId, existing, localName, permissions, aclEntries, timestamp ); }
internal virtual long RemoveDst() { long removedNum = fsd.RemoveLastINode(dstIIP); if (removedNum != -1) { oldDstChild = dstIIP.GetLastINode(); // update the quota count if necessary fsd.UpdateCountForDelete(oldDstChild, dstIIP); dstIIP = INodesInPath.Replace(dstIIP, dstIIP.Length() - 1, null); } return(removedNum); }
/// <summary>for snapshot file after deleting the original file.</summary> /// <exception cref="System.Exception"/> public virtual void TestSnapshotPathINodesAfterDeletion() { // Create a snapshot for the dir, and check the inodes for the path // pointing to a snapshot file hdfs.AllowSnapshot(sub1); hdfs.CreateSnapshot(sub1, "s2"); // Delete the original file /TestSnapshot/sub1/file1 hdfs.Delete(file1, false); Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot snapshot; { // Resolve the path for the snapshot file // /TestSnapshot/sub1/.snapshot/s2/file1 string snapshotPath = sub1.ToString() + "/.snapshot/s2/file1"; string[] names = INode.GetPathNames(snapshotPath); byte[][] components = INode.GetPathComponents(names); INodesInPath nodesInPath = INodesInPath.Resolve(fsdir.rootDir, components, false); // Length of inodes should be (components.length - 1), since we will ignore // ".snapshot" NUnit.Framework.Assert.AreEqual(nodesInPath.Length(), components.Length - 1); // SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s2, file1} snapshot = GetSnapshot(nodesInPath, "s2", 3); AssertSnapshot(nodesInPath, true, snapshot, 3); // Check the INode for file1 (snapshot file) INode inode = nodesInPath.GetLastINode(); NUnit.Framework.Assert.AreEqual(file1.GetName(), inode.GetLocalName()); NUnit.Framework.Assert.IsTrue(inode.AsFile().IsWithSnapshot()); } // Check the INodes for path /TestSnapshot/sub1/file1 string[] names_1 = INode.GetPathNames(file1.ToString()); byte[][] components_1 = INode.GetPathComponents(names_1); INodesInPath nodesInPath_1 = INodesInPath.Resolve(fsdir.rootDir, components_1, false ); // The length of inodes should be equal to components.length NUnit.Framework.Assert.AreEqual(nodesInPath_1.Length(), components_1.Length); // The number of non-null elements should be components.length - 1 since // file1 has been deleted NUnit.Framework.Assert.AreEqual(GetNumNonNull(nodesInPath_1), components_1.Length - 1); // The returned nodesInPath should be non-snapshot AssertSnapshot(nodesInPath_1, false, snapshot, -1); // The last INode should be null, and the one before should be associated // with sub1 NUnit.Framework.Assert.IsNull(nodesInPath_1.GetINode(components_1.Length - 1)); NUnit.Framework.Assert.AreEqual(nodesInPath_1.GetINode(components_1.Length - 2).GetFullPathName (), sub1.ToString()); NUnit.Framework.Assert.AreEqual(nodesInPath_1.GetINode(components_1.Length - 3).GetFullPathName (), dir.ToString()); hdfs.DeleteSnapshot(sub1, "s2"); hdfs.DisallowSnapshot(sub1); }
/// <summary> /// Delete a path from the name space /// Update the count at each ancestor directory with quota /// </summary> /// <param name="iip">the inodes resolved from the path</param> /// <param name="collectedBlocks">blocks collected from the deleted path</param> /// <param name="removedINodes">inodes that should be removed from inodeMap</param> /// <param name="mtime">the time the inode is removed</param> /// <returns>the number of inodes deleted; 0 if no inodes are deleted.</returns> private static long UnprotectedDelete(FSDirectory fsd, INodesInPath iip, INode.BlocksMapUpdateInfo collectedBlocks, IList <INode> removedINodes, long mtime) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); // check if target node exists INode targetNode = iip.GetLastINode(); if (targetNode == null) { return(-1); } // record modification int latestSnapshot = iip.GetLatestSnapshotId(); targetNode.RecordModification(latestSnapshot); // Remove the node from the namespace long removed = fsd.RemoveLastINode(iip); if (removed == -1) { return(-1); } // set the parent's modification time INodeDirectory parent = targetNode.GetParent(); parent.UpdateModificationTime(mtime, latestSnapshot); fsd.UpdateCountForDelete(targetNode, iip); if (removed == 0) { return(0); } // collect block and update quota if (!targetNode.IsInLatestSnapshot(latestSnapshot)) { targetNode.DestroyAndCollectBlocks(fsd.GetBlockStoragePolicySuite(), collectedBlocks , removedINodes); } else { QuotaCounts counts = targetNode.CleanSubtree(fsd.GetBlockStoragePolicySuite(), Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId, latestSnapshot, collectedBlocks, removedINodes); removed = counts.GetNameSpace(); fsd.UpdateCountNoQuotaCheck(iip, iip.Length() - 1, counts.Negation()); } if (NameNode.stateChangeLog.IsDebugEnabled()) { NameNode.stateChangeLog.Debug("DIR* FSDirectory.unprotectedDelete: " + iip.GetPath () + " is removed"); } return(removed); }
/// <summary>Returns true if the file is closed</summary> /// <exception cref="System.IO.IOException"/> internal static bool IsFileClosed(FSDirectory fsd, string src) { FSPermissionChecker pc = fsd.GetPermissionChecker(); byte[][] pathComponents = FSDirectory.GetPathComponentsForReservedPath(src); src = fsd.ResolvePath(pc, src, pathComponents); INodesInPath iip = fsd.GetINodesInPath(src, true); if (fsd.IsPermissionEnabled()) { fsd.CheckTraverse(pc, iip); } return(!INodeFile.ValueOf(iip.GetLastINode(), src).IsUnderConstruction()); }
/// <exception cref="System.IO.FileNotFoundException"/> /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.SnapshotAccessControlException"/ /// > internal static void UnprotectedSetPermission(FSDirectory fsd, string src, FsPermission permissions) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); INodesInPath inodesInPath = fsd.GetINodesInPath4Write(src, true); INode inode = inodesInPath.GetLastINode(); if (inode == null) { throw new FileNotFoundException("File does not exist: " + src); } int snapshotId = inodesInPath.GetLatestSnapshotId(); inode.SetPermission(permissions, snapshotId); }
/// <summary>Get the file info for a specific file.</summary> /// <param name="fsd">FSDirectory</param> /// <param name="src">The string representation of the path to the file</param> /// <param name="isRawPath">true if a /.reserved/raw pathname was passed by the user</param> /// <param name="includeStoragePolicy">whether to include storage policy</param> /// <returns> /// object containing information regarding the file /// or null if file not found /// </returns> /// <exception cref="System.IO.IOException"/> internal static HdfsFileStatus GetFileInfo(FSDirectory fsd, string path, INodesInPath src, bool isRawPath, bool includeStoragePolicy) { fsd.ReadLock(); try { INode i = src.GetLastINode(); byte policyId = includeStoragePolicy && i != null && !i.IsSymlink() ? i.GetStoragePolicyID () : BlockStoragePolicySuite.IdUnspecified; return(i == null ? null : CreateFileStatus(fsd, path, HdfsFileStatus.EmptyName, i , policyId, src.GetPathSnapshotId(), isRawPath, src)); } finally { fsd.ReadUnlock(); } }
/// <exception cref="System.IO.IOException"/> private static void VerifyTargetFile(FSDirectory fsd, string target, INodesInPath targetIIP) { // check the target if (fsd.GetEZForPath(targetIIP) != null) { throw new HadoopIllegalArgumentException("concat can not be called for files in an encryption zone." ); } INodeFile targetINode = INodeFile.ValueOf(targetIIP.GetLastINode(), target); if (targetINode.IsUnderConstruction()) { throw new HadoopIllegalArgumentException("concat: target file " + target + " is under construction" ); } }
/// <summary> /// Throws an exception if the provided path cannot be renamed into the /// destination because of differing encryption zones. /// </summary> /// <remarks> /// Throws an exception if the provided path cannot be renamed into the /// destination because of differing encryption zones. /// <p/> /// Called while holding the FSDirectory lock. /// </remarks> /// <param name="srcIIP">source IIP</param> /// <param name="dstIIP">destination IIP</param> /// <param name="src">source path, used for debugging</param> /// <exception cref="System.IO.IOException">if the src cannot be renamed to the dst</exception> internal virtual void CheckMoveValidity(INodesInPath srcIIP, INodesInPath dstIIP, string src) { System.Diagnostics.Debug.Assert(dir.HasReadLock()); EncryptionZoneManager.EncryptionZoneInt srcEZI = GetEncryptionZoneForPath(srcIIP); EncryptionZoneManager.EncryptionZoneInt dstEZI = GetEncryptionZoneForPath(dstIIP); bool srcInEZ = (srcEZI != null); bool dstInEZ = (dstEZI != null); if (srcInEZ) { if (!dstInEZ) { if (srcEZI.GetINodeId() == srcIIP.GetLastINode().GetId()) { // src is ez root and dest is not in an ez. Allow the rename. return; } throw new IOException(src + " can't be moved from an encryption zone."); } } else { if (dstInEZ) { throw new IOException(src + " can't be moved into an encryption zone."); } } if (srcInEZ) { if (srcEZI != dstEZI) { string srcEZPath = GetFullPathName(srcEZI); string dstEZPath = GetFullPathName(dstEZI); StringBuilder sb = new StringBuilder(src); sb.Append(" can't be moved from encryption zone "); sb.Append(srcEZPath); sb.Append(" to encryption zone "); sb.Append(dstEZPath); sb.Append("."); throw new IOException(sb.ToString()); } } }
/// <exception cref="System.IO.IOException"/> internal static void UnprotectedSetStoragePolicy(FSDirectory fsd, BlockManager bm , INodesInPath iip, byte policyId) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); INode inode = iip.GetLastINode(); if (inode == null) { throw new FileNotFoundException("File/Directory does not exist: " + iip.GetPath() ); } int snapshotId = iip.GetLatestSnapshotId(); if (inode.IsFile()) { BlockStoragePolicy newPolicy = bm.GetStoragePolicy(policyId); if (newPolicy.IsCopyOnCreateFile()) { throw new HadoopIllegalArgumentException("Policy " + newPolicy + " cannot be set after file creation." ); } BlockStoragePolicy currentPolicy = bm.GetStoragePolicy(inode.GetLocalStoragePolicyID ()); if (currentPolicy != null && currentPolicy.IsCopyOnCreateFile()) { throw new HadoopIllegalArgumentException("Existing policy " + currentPolicy.GetName () + " cannot be changed after file creation."); } inode.AsFile().SetStoragePolicyID(policyId, snapshotId); } else { if (inode.IsDirectory()) { SetDirStoragePolicy(fsd, inode.AsDirectory(), policyId, snapshotId); } else { throw new FileNotFoundException(iip.GetPath() + " is not a file or directory"); } } }
/// <exception cref="Org.Apache.Hadoop.Security.AccessControlException"/> private static void CheckXAttrChangeAccess(FSDirectory fsd, INodesInPath iip, XAttr xAttr, FSPermissionChecker pc) { if (fsd.IsPermissionEnabled() && xAttr.GetNameSpace() == XAttr.NameSpace.User) { INode inode = iip.GetLastINode(); if (inode != null && inode.IsDirectory() && inode.GetFsPermission().GetStickyBit( )) { if (!pc.IsSuperUser()) { fsd.CheckOwner(pc, iip); } } else { fsd.CheckPathAccess(pc, iip, FsAction.Write); } } }
/// <exception cref="System.IO.FileNotFoundException"/> /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.SnapshotAccessControlException"/ /// > internal static void UnprotectedSetOwner(FSDirectory fsd, string src, string username , string groupname) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); INodesInPath inodesInPath = fsd.GetINodesInPath4Write(src, true); INode inode = inodesInPath.GetLastINode(); if (inode == null) { throw new FileNotFoundException("File does not exist: " + src); } if (username != null) { inode = inode.SetUser(username, inodesInPath.GetLatestSnapshotId()); } if (groupname != null) { inode.SetGroup(groupname, inodesInPath.GetLatestSnapshotId()); } }
/// <summary>Cursor-based listing of encryption zones.</summary> /// <remarks> /// Cursor-based listing of encryption zones. /// <p/> /// Called while holding the FSDirectory lock. /// </remarks> /// <exception cref="System.IO.IOException"/> internal virtual BatchedRemoteIterator.BatchedListEntries <EncryptionZone> ListEncryptionZones (long prevId) { System.Diagnostics.Debug.Assert(dir.HasReadLock()); NavigableMap <long, EncryptionZoneManager.EncryptionZoneInt> tailMap = encryptionZones .TailMap(prevId, false); int numResponses = Math.Min(maxListEncryptionZonesResponses, tailMap.Count); IList <EncryptionZone> zones = Lists.NewArrayListWithExpectedSize(numResponses); int count = 0; foreach (EncryptionZoneManager.EncryptionZoneInt ezi in tailMap.Values) { /* * Skip EZs that are only present in snapshots. Re-resolve the path to * see if the path's current inode ID matches EZ map's INode ID. * * INode#getFullPathName simply calls getParent recursively, so will return * the INode's parents at the time it was snapshotted. It will not * contain a reference INode. */ string pathName = GetFullPathName(ezi); INodesInPath iip = dir.GetINodesInPath(pathName, false); INode lastINode = iip.GetLastINode(); if (lastINode == null || lastINode.GetId() != ezi.GetINodeId()) { continue; } // Add the EZ to the result list zones.AddItem(new EncryptionZone(ezi.GetINodeId(), pathName, ezi.GetSuite(), ezi. GetVersion(), ezi.GetKeyName())); count++; if (count >= numResponses) { break; } } bool hasMore = (numResponses < tailMap.Count); return(new BatchedRemoteIterator.BatchedListEntries <EncryptionZone>(zones, hasMore )); }
/// <exception cref="System.IO.IOException"/> internal static long GetPreferredBlockSize(FSDirectory fsd, string src) { FSPermissionChecker pc = fsd.GetPermissionChecker(); byte[][] pathComponents = FSDirectory.GetPathComponentsForReservedPath(src); fsd.ReadLock(); try { src = fsd.ResolvePath(pc, src, pathComponents); INodesInPath iip = fsd.GetINodesInPath(src, false); if (fsd.IsPermissionEnabled()) { fsd.CheckTraverse(pc, iip); } return(INodeFile.ValueOf(iip.GetLastINode(), src).GetPreferredBlockSize()); } finally { fsd.ReadUnlock(); } }
/// <exception cref="System.IO.IOException"/> private static void ValidateRenameSource(INodesInPath srcIIP) { string error; INode srcInode = srcIIP.GetLastINode(); // validate source if (srcInode == null) { error = "rename source " + srcIIP.GetPath() + " is not found."; NameNode.stateChangeLog.Warn("DIR* FSDirectory.unprotectedRenameTo: " + error); throw new FileNotFoundException(error); } if (srcIIP.Length() == 1) { error = "rename source cannot be the root"; NameNode.stateChangeLog.Warn("DIR* FSDirectory.unprotectedRenameTo: " + error); throw new IOException(error); } // srcInode and its subtree cannot contain snapshottable directories with // snapshots FSDirSnapshotOp.CheckSnapshot(srcInode, null); }
/// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.SnapshotAccessControlException"/ /// > internal static Block[] UnprotectedSetReplication(FSDirectory fsd, string src, short replication, short[] blockRepls) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); INodesInPath iip = fsd.GetINodesInPath4Write(src, true); INode inode = iip.GetLastINode(); if (inode == null || !inode.IsFile()) { return(null); } INodeFile file = inode.AsFile(); short oldBR = file.GetBlockReplication(); // before setFileReplication, check for increasing block replication. // if replication > oldBR, then newBR == replication. // if replication < oldBR, we don't know newBR yet. if (replication > oldBR) { long dsDelta = file.StoragespaceConsumed() / oldBR; fsd.UpdateCount(iip, 0L, dsDelta, oldBR, replication, true); } file.SetFileReplication(replication, iip.GetLatestSnapshotId()); short newBR = file.GetBlockReplication(); // check newBR < oldBR case. if (newBR < oldBR) { long dsDelta = file.StoragespaceConsumed() / newBR; fsd.UpdateCount(iip, 0L, dsDelta, oldBR, newBR, true); } if (blockRepls != null) { blockRepls[0] = oldBR; blockRepls[1] = newBR; } return(file.GetBlocks()); }
private static bool DeleteAllowed(INodesInPath iip, string src) { if (iip.Length() < 1 || iip.GetLastINode() == null) { if (NameNode.stateChangeLog.IsDebugEnabled()) { NameNode.stateChangeLog.Debug("DIR* FSDirectory.unprotectedDelete: failed to remove " + src + " because it does not exist"); } return(false); } else { if (iip.Length() == 1) { // src is the root NameNode.stateChangeLog.Warn("DIR* FSDirectory.unprotectedDelete: failed to remove " + src + " because the root is not allowed to be deleted"); return(false); } } return(true); }
/// <exception cref="System.Exception"/> private void TestTruncate(long newLength, long expectedDiff, long expectedUsage) { // before doing the real truncation, make sure the computation is correct INodesInPath iip = fsdir.GetINodesInPath4Write(file.ToString()); INodeFile fileNode = iip.GetLastINode().AsFile(); fileNode.RecordModification(iip.GetLatestSnapshotId(), true); long diff = fileNode.ComputeQuotaDeltaForTruncate(newLength); NUnit.Framework.Assert.AreEqual(expectedDiff, diff); // do the real truncation dfs.Truncate(file, newLength); // wait for truncate to finish TestFileTruncate.CheckBlockRecovery(file, dfs); INodeDirectory dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory(); long spaceUsed = dirNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed().GetStorageSpace (); long diskUsed = dirNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed().GetTypeSpaces ().Get(StorageType.Disk); NUnit.Framework.Assert.AreEqual(expectedUsage, spaceUsed); NUnit.Framework.Assert.AreEqual(expectedUsage, diskUsed); }
/// <exception cref="System.IO.IOException"/> private static INodesInPath CreateSingleDirectory(FSDirectory fsd, INodesInPath existing , string localName, PermissionStatus perm) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); existing = UnprotectedMkdir(fsd, fsd.AllocateNewInodeId(), existing, Sharpen.Runtime.GetBytesForString (localName, Charsets.Utf8), perm, null, Time.Now()); if (existing == null) { return(null); } INode newNode = existing.GetLastINode(); // Directory creation also count towards FilesCreated // to match count of FilesDeleted metric. NameNode.GetNameNodeMetrics().IncrFilesCreated(); string cur = existing.GetPath(); fsd.GetEditLog().LogMkDir(cur, newNode); if (NameNode.stateChangeLog.IsDebugEnabled()) { NameNode.stateChangeLog.Debug("mkdirs: created directory " + cur); } return(existing); }