/// <summary>Delete the target directory and collect the blocks under it</summary> /// <param name="iip">the INodesInPath instance containing all the INodes for the path /// </param> /// <param name="collectedBlocks">Blocks under the deleted directory</param> /// <param name="removedINodes">INodes that should be removed from inodeMap</param> /// <returns>the number of files that have been removed</returns> /// <exception cref="System.IO.IOException"/> internal static long Delete(FSDirectory fsd, INodesInPath iip, INode.BlocksMapUpdateInfo collectedBlocks, IList <INode> removedINodes, long mtime) { if (NameNode.stateChangeLog.IsDebugEnabled()) { NameNode.stateChangeLog.Debug("DIR* FSDirectory.delete: " + iip.GetPath()); } long filesRemoved; fsd.WriteLock(); try { if (!DeleteAllowed(iip, iip.GetPath())) { filesRemoved = -1; } else { IList <INodeDirectory> snapshottableDirs = new AList <INodeDirectory>(); FSDirSnapshotOp.CheckSnapshot(iip.GetLastINode(), snapshottableDirs); filesRemoved = UnprotectedDelete(fsd, iip, collectedBlocks, removedINodes, mtime); fsd.GetFSNamesystem().RemoveSnapshottableDirs(snapshottableDirs); } } finally { fsd.WriteUnlock(); } return(filesRemoved); }
/// <exception cref="System.IO.IOException"/> private static ContentSummary GetContentSummaryInt(FSDirectory fsd, INodesInPath iip) { fsd.ReadLock(); try { INode targetNode = iip.GetLastINode(); if (targetNode == null) { throw new FileNotFoundException("File does not exist: " + iip.GetPath()); } else { // Make it relinquish locks everytime contentCountLimit entries are // processed. 0 means disabled. I.e. blocking for the entire duration. ContentSummaryComputationContext cscc = new ContentSummaryComputationContext(fsd, fsd.GetFSNamesystem(), fsd.GetContentCountLimit(), fsd.GetContentSleepMicroSec() ); ContentSummary cs = targetNode.ComputeAndConvertContentSummary(cscc); fsd.AddYieldCount(cscc.GetYieldCount()); return(cs); } } finally { fsd.ReadUnlock(); } }
/// <summary> /// For a given absolute path, create all ancestors as directories along the /// path. /// </summary> /// <remarks> /// For a given absolute path, create all ancestors as directories along the /// path. All ancestors inherit their parent's permission plus an implicit /// u+wx permission. This is used by create() and addSymlink() for /// implicitly creating all directories along the path. /// For example, path="/foo/bar/spam", "/foo" is an existing directory, /// "/foo/bar" is not existing yet, the function will create directory bar. /// </remarks> /// <returns> /// a tuple which contains both the new INodesInPath (with all the /// existing and newly created directories) and the last component in the /// relative path. Or return null if there are errors. /// </returns> /// <exception cref="System.IO.IOException"/> internal static KeyValuePair <INodesInPath, string> CreateAncestorDirectories(FSDirectory fsd, INodesInPath iip, PermissionStatus permission) { string last = new string(iip.GetLastLocalName(), Charsets.Utf8); INodesInPath existing = iip.GetExistingINodes(); IList <string> children = iip.GetPath(existing.Length(), iip.Length() - existing.Length ()); int size = children.Count; if (size > 1) { // otherwise all ancestors have been created IList <string> directories = children.SubList(0, size - 1); INode parentINode = existing.GetLastINode(); // Ensure that the user can traversal the path by adding implicit // u+wx permission to all ancestor directories existing = CreateChildrenDirectories(fsd, existing, directories, AddImplicitUwx(parentINode .GetPermissionStatus(), permission)); if (existing == null) { return(null); } } return(new AbstractMap.SimpleImmutableEntry <INodesInPath, string>(existing, last)); }
/// <summary>Check whether current user have permissions to access the path.</summary> /// <remarks> /// Check whether current user have permissions to access the path. /// Traverse is always checked. /// Parent path means the parent directory for the path. /// Ancestor path means the last (the closest) existing ancestor directory /// of the path. /// Note that if the parent path exists, /// then the parent path and the ancestor path are the same. /// For example, suppose the path is "/foo/bar/baz". /// No matter baz is a file or a directory, /// the parent path is "/foo/bar". /// If bar exists, then the ancestor path is also "/foo/bar". /// If bar does not exist and foo exists, /// then the ancestor path is "/foo". /// Further, if both foo and bar do not exist, /// then the ancestor path is "/". /// </remarks> /// <param name="doCheckOwner">Require user to be the owner of the path?</param> /// <param name="ancestorAccess">The access required by the ancestor of the path.</param> /// <param name="parentAccess">The access required by the parent of the path.</param> /// <param name="access">The access required by the path.</param> /// <param name="subAccess"> /// If path is a directory, /// it is the access required of the path and all the sub-directories. /// If path is not a directory, there is no effect. /// </param> /// <param name="ignoreEmptyDir">Ignore permission checking for empty directory?</param> /// <exception cref="Org.Apache.Hadoop.Security.AccessControlException"> /// Guarded by /// <see cref="FSNamesystem.ReadLock()"/> /// Caller of this method must hold that lock. /// </exception> internal virtual void CheckPermission(INodesInPath inodesInPath, bool doCheckOwner , FsAction ancestorAccess, FsAction parentAccess, FsAction access, FsAction subAccess , bool ignoreEmptyDir) { if (Log.IsDebugEnabled()) { Log.Debug("ACCESS CHECK: " + this + ", doCheckOwner=" + doCheckOwner + ", ancestorAccess=" + ancestorAccess + ", parentAccess=" + parentAccess + ", access=" + access + ", subAccess=" + subAccess + ", ignoreEmptyDir=" + ignoreEmptyDir); } // check if (parentAccess != null) && file exists, then check sb // If resolveLink, the check is performed on the link target. int snapshotId = inodesInPath.GetPathSnapshotId(); INode[] inodes = inodesInPath.GetINodesArray(); INodeAttributes[] inodeAttrs = new INodeAttributes[inodes.Length]; byte[][] pathByNameArr = new byte[inodes.Length][]; for (int i = 0; i < inodes.Length && inodes[i] != null; i++) { if (inodes[i] != null) { pathByNameArr[i] = inodes[i].GetLocalNameBytes(); inodeAttrs[i] = GetINodeAttrs(pathByNameArr, i, inodes[i], snapshotId); } } string path = inodesInPath.GetPath(); int ancestorIndex = inodes.Length - 2; INodeAttributeProvider.AccessControlEnforcer enforcer = GetAttributesProvider().GetExternalAccessControlEnforcer (this); enforcer.CheckPermission(fsOwner, supergroup, callerUgi, inodeAttrs, inodes, pathByNameArr , snapshotId, path, ancestorIndex, doCheckOwner, ancestorAccess, parentAccess, access , subAccess, ignoreEmptyDir); }
/// <exception cref="System.IO.IOException"/> internal static void UnprotectedSetStoragePolicy(FSDirectory fsd, BlockManager bm , INodesInPath iip, byte policyId) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); INode inode = iip.GetLastINode(); if (inode == null) { throw new FileNotFoundException("File/Directory does not exist: " + iip.GetPath() ); } int snapshotId = iip.GetLatestSnapshotId(); if (inode.IsFile()) { BlockStoragePolicy newPolicy = bm.GetStoragePolicy(policyId); if (newPolicy.IsCopyOnCreateFile()) { throw new HadoopIllegalArgumentException("Policy " + newPolicy + " cannot be set after file creation." ); } BlockStoragePolicy currentPolicy = bm.GetStoragePolicy(inode.GetLocalStoragePolicyID ()); if (currentPolicy != null && currentPolicy.IsCopyOnCreateFile()) { throw new HadoopIllegalArgumentException("Existing policy " + currentPolicy.GetName () + " cannot be changed after file creation."); } inode.AsFile().SetStoragePolicyID(policyId, snapshotId); } else { if (inode.IsDirectory()) { SetDirStoragePolicy(fsd, inode.AsDirectory(), policyId, snapshotId); } else { throw new FileNotFoundException(iip.GetPath() + " is not a file or directory"); } } }
/// <exception cref="System.IO.IOException"/> private static void ValidateRenameSource(INodesInPath srcIIP) { string error; INode srcInode = srcIIP.GetLastINode(); // validate source if (srcInode == null) { error = "rename source " + srcIIP.GetPath() + " is not found."; NameNode.stateChangeLog.Warn("DIR* FSDirectory.unprotectedRenameTo: " + error); throw new FileNotFoundException(error); } if (srcIIP.Length() == 1) { error = "rename source cannot be the root"; NameNode.stateChangeLog.Warn("DIR* FSDirectory.unprotectedRenameTo: " + error); throw new IOException(error); } // srcInode and its subtree cannot contain snapshottable directories with // snapshots FSDirSnapshotOp.CheckSnapshot(srcInode, null); }
/// <exception cref="System.IO.IOException"/> private static INodesInPath CreateSingleDirectory(FSDirectory fsd, INodesInPath existing , string localName, PermissionStatus perm) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); existing = UnprotectedMkdir(fsd, fsd.AllocateNewInodeId(), existing, Sharpen.Runtime.GetBytesForString (localName, Charsets.Utf8), perm, null, Time.Now()); if (existing == null) { return(null); } INode newNode = existing.GetLastINode(); // Directory creation also count towards FilesCreated // to match count of FilesDeleted metric. NameNode.GetNameNodeMetrics().IncrFilesCreated(); string cur = existing.GetPath(); fsd.GetEditLog().LogMkDir(cur, newNode); if (NameNode.stateChangeLog.IsDebugEnabled()) { NameNode.stateChangeLog.Debug("mkdirs: created directory " + cur); } return(existing); }
/// <summary>Concat all the blocks from srcs to trg and delete the srcs files</summary> /// <param name="fsd">FSDirectory</param> /// <exception cref="System.IO.IOException"/> internal static void UnprotectedConcat(FSDirectory fsd, INodesInPath targetIIP, INodeFile [] srcList, long timestamp) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); if (NameNode.stateChangeLog.IsDebugEnabled()) { NameNode.stateChangeLog.Debug("DIR* FSNamesystem.concat to " + targetIIP.GetPath( )); } INodeFile trgInode = targetIIP.GetLastINode().AsFile(); QuotaCounts deltas = ComputeQuotaDeltas(fsd, trgInode, srcList); VerifyQuota(fsd, targetIIP, deltas); // the target file can be included in a snapshot trgInode.RecordModification(targetIIP.GetLatestSnapshotId()); INodeDirectory trgParent = targetIIP.GetINode(-2).AsDirectory(); trgInode.ConcatBlocks(srcList); // since we are in the same dir - we can use same parent to remove files int count = 0; foreach (INodeFile nodeToRemove in srcList) { if (nodeToRemove != null) { nodeToRemove.SetBlocks(null); nodeToRemove.GetParent().RemoveChild(nodeToRemove); fsd.GetINodeMap().Remove(nodeToRemove); count++; } } trgInode.SetModificationTime(timestamp, targetIIP.GetLatestSnapshotId()); trgParent.UpdateModificationTime(timestamp, targetIIP.GetLatestSnapshotId()); // update quota on the parent directory with deltas FSDirectory.UnprotectedUpdateCount(targetIIP, targetIIP.Length() - 1, deltas); }
/// <summary> /// Delete a path from the name space /// Update the count at each ancestor directory with quota /// </summary> /// <param name="iip">the inodes resolved from the path</param> /// <param name="collectedBlocks">blocks collected from the deleted path</param> /// <param name="removedINodes">inodes that should be removed from inodeMap</param> /// <param name="mtime">the time the inode is removed</param> /// <returns>the number of inodes deleted; 0 if no inodes are deleted.</returns> private static long UnprotectedDelete(FSDirectory fsd, INodesInPath iip, INode.BlocksMapUpdateInfo collectedBlocks, IList <INode> removedINodes, long mtime) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); // check if target node exists INode targetNode = iip.GetLastINode(); if (targetNode == null) { return(-1); } // record modification int latestSnapshot = iip.GetLatestSnapshotId(); targetNode.RecordModification(latestSnapshot); // Remove the node from the namespace long removed = fsd.RemoveLastINode(iip); if (removed == -1) { return(-1); } // set the parent's modification time INodeDirectory parent = targetNode.GetParent(); parent.UpdateModificationTime(mtime, latestSnapshot); fsd.UpdateCountForDelete(targetNode, iip); if (removed == 0) { return(0); } // collect block and update quota if (!targetNode.IsInLatestSnapshot(latestSnapshot)) { targetNode.DestroyAndCollectBlocks(fsd.GetBlockStoragePolicySuite(), collectedBlocks , removedINodes); } else { QuotaCounts counts = targetNode.CleanSubtree(fsd.GetBlockStoragePolicySuite(), Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId, latestSnapshot, collectedBlocks, removedINodes); removed = counts.GetNameSpace(); fsd.UpdateCountNoQuotaCheck(iip, iip.Length() - 1, counts.Negation()); } if (NameNode.stateChangeLog.IsDebugEnabled()) { NameNode.stateChangeLog.Debug("DIR* FSDirectory.unprotectedDelete: " + iip.GetPath () + " is removed"); } return(removed); }
/// <summary>create a directory at path specified by parent</summary> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.AclException"/> /// <exception cref="Org.Apache.Hadoop.FS.FileAlreadyExistsException"/> private static INodesInPath UnprotectedMkdir(FSDirectory fsd, long inodeId, INodesInPath parent, byte[] name, PermissionStatus permission, IList <AclEntry> aclEntries, long timestamp) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); System.Diagnostics.Debug.Assert(parent.GetLastINode() != null); if (!parent.GetLastINode().IsDirectory()) { throw new FileAlreadyExistsException("Parent path is not a directory: " + parent. GetPath() + " " + DFSUtil.Bytes2String(name)); } INodeDirectory dir = new INodeDirectory(inodeId, name, permission, timestamp); INodesInPath iip = fsd.AddLastINode(parent, dir, true); if (iip != null && aclEntries != null) { AclStorage.UpdateINodeAcl(dir, aclEntries, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId); } return(iip); }
/// <exception cref="System.IO.IOException"/> internal static HdfsFileStatus Mkdirs(FSNamesystem fsn, string src, PermissionStatus permissions, bool createParent) { FSDirectory fsd = fsn.GetFSDirectory(); if (NameNode.stateChangeLog.IsDebugEnabled()) { NameNode.stateChangeLog.Debug("DIR* NameSystem.mkdirs: " + src); } if (!DFSUtil.IsValidName(src)) { throw new InvalidPathException(src); } FSPermissionChecker pc = fsd.GetPermissionChecker(); byte[][] pathComponents = FSDirectory.GetPathComponentsForReservedPath(src); fsd.WriteLock(); try { src = fsd.ResolvePath(pc, src, pathComponents); INodesInPath iip = fsd.GetINodesInPath4Write(src); if (fsd.IsPermissionEnabled()) { fsd.CheckTraverse(pc, iip); } INode lastINode = iip.GetLastINode(); if (lastINode != null && lastINode.IsFile()) { throw new FileAlreadyExistsException("Path is not a directory: " + src); } INodesInPath existing = lastINode != null ? iip : iip.GetExistingINodes(); if (lastINode == null) { if (fsd.IsPermissionEnabled()) { fsd.CheckAncestorAccess(pc, iip, FsAction.Write); } if (!createParent) { fsd.VerifyParentDir(iip, src); } // validate that we have enough inodes. This is, at best, a // heuristic because the mkdirs() operation might need to // create multiple inodes. fsn.CheckFsObjectLimit(); IList <string> nonExisting = iip.GetPath(existing.Length(), iip.Length() - existing .Length()); int length = nonExisting.Count; if (length > 1) { IList <string> ancestors = nonExisting.SubList(0, length - 1); // Ensure that the user can traversal the path by adding implicit // u+wx permission to all ancestor directories existing = CreateChildrenDirectories(fsd, existing, ancestors, AddImplicitUwx(permissions , permissions)); if (existing == null) { throw new IOException("Failed to create directory: " + src); } } if ((existing = CreateChildrenDirectories(fsd, existing, nonExisting.SubList(length - 1, length), permissions)) == null) { throw new IOException("Failed to create directory: " + src); } } return(fsd.GetAuditFileInfo(existing)); } finally { fsd.WriteUnlock(); } }
/// <exception cref="System.IO.IOException"/> private static INodeFile[] VerifySrcFiles(FSDirectory fsd, string[] srcs, INodesInPath targetIIP, FSPermissionChecker pc) { // to make sure no two files are the same ICollection <INodeFile> si = new LinkedHashSet <INodeFile>(); INodeFile targetINode = targetIIP.GetLastINode().AsFile(); INodeDirectory targetParent = targetINode.GetParent(); // now check the srcs foreach (string src in srcs) { INodesInPath iip = fsd.GetINodesInPath4Write(src); // permission check for srcs if (pc != null) { fsd.CheckPathAccess(pc, iip, FsAction.Read); // read the file fsd.CheckParentAccess(pc, iip, FsAction.Write); } // for delete INode srcINode = iip.GetLastINode(); INodeFile srcINodeFile = INodeFile.ValueOf(srcINode, src); // make sure the src file and the target file are in the same dir if (srcINodeFile.GetParent() != targetParent) { throw new HadoopIllegalArgumentException("Source file " + src + " is not in the same directory with the target " + targetIIP.GetPath()); } // make sure all the source files are not in snapshot if (srcINode.IsInLatestSnapshot(iip.GetLatestSnapshotId())) { throw new SnapshotException("Concat: the source file " + src + " is in snapshot"); } // check if the file has other references. if (srcINode.IsReference() && ((INodeReference.WithCount)srcINode.AsReference().GetReferredINode ()).GetReferenceCount() > 1) { throw new SnapshotException("Concat: the source file " + src + " is referred by some other reference in some snapshot." ); } // source file cannot be the same with the target file if (srcINode == targetINode) { throw new HadoopIllegalArgumentException("concat: the src file " + src + " is the same with the target file " + targetIIP.GetPath()); } // source file cannot be under construction or empty if (srcINodeFile.IsUnderConstruction() || srcINodeFile.NumBlocks() == 0) { throw new HadoopIllegalArgumentException("concat: source file " + src + " is invalid or empty or underConstruction" ); } // source file's preferred block size cannot be greater than the target // file if (srcINodeFile.GetPreferredBlockSize() > targetINode.GetPreferredBlockSize()) { throw new HadoopIllegalArgumentException("concat: source file " + src + " has preferred block size " + srcINodeFile.GetPreferredBlockSize() + " which is greater than the target file's preferred block size " + targetINode.GetPreferredBlockSize()); } si.AddItem(srcINodeFile); } // make sure no two files are the same if (si.Count < srcs.Length) { // it means at least two files are the same throw new HadoopIllegalArgumentException("concat: at least two of the source files are the same" ); } return(Sharpen.Collections.ToArray(si, new INodeFile[si.Count])); }