/// <summary> /// Verify quota for rename operation where srcInodes[srcInodes.length-1] moves /// dstInodes[dstInodes.length-1] /// </summary> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> private static void VerifyQuotaForRename(FSDirectory fsd, INodesInPath src, INodesInPath dst) { if (!fsd.GetFSNamesystem().IsImageLoaded() || fsd.ShouldSkipQuotaChecks()) { // Do not check quota if edits log is still being processed return; } int i = 0; while (src.GetINode(i) == dst.GetINode(i)) { i++; } // src[i - 1] is the last common ancestor. BlockStoragePolicySuite bsps = fsd.GetBlockStoragePolicySuite(); QuotaCounts delta = src.GetLastINode().ComputeQuotaUsage(bsps); // Reduce the required quota by dst that is being removed INode dstINode = dst.GetLastINode(); if (dstINode != null) { delta.Subtract(dstINode.ComputeQuotaUsage(bsps)); } FSDirectory.VerifyQuota(dst, dst.Length() - 1, delta, src.GetINode(i - 1)); }
internal static FSDirRenameOp.RenameOldResult RenameToInt(FSDirectory fsd, string srcArg, string dstArg, bool logRetryCache) { string src = srcArg; string dst = dstArg; if (NameNode.stateChangeLog.IsDebugEnabled()) { NameNode.stateChangeLog.Debug("DIR* NameSystem.renameTo: " + src + " to " + dst); } if (!DFSUtil.IsValidName(dst)) { throw new IOException("Invalid name: " + dst); } FSPermissionChecker pc = fsd.GetPermissionChecker(); byte[][] srcComponents = FSDirectory.GetPathComponentsForReservedPath(src); byte[][] dstComponents = FSDirectory.GetPathComponentsForReservedPath(dst); HdfsFileStatus resultingStat = null; src = fsd.ResolvePath(pc, src, srcComponents); dst = fsd.ResolvePath(pc, dst, dstComponents); bool status = RenameTo(fsd, pc, src, dst, logRetryCache); if (status) { INodesInPath dstIIP = fsd.GetINodesInPath(dst, false); resultingStat = fsd.GetAuditFileInfo(dstIIP); } return(new FSDirRenameOp.RenameOldResult(status, resultingStat)); }
/// <summary>The new rename which has the POSIX semantic.</summary> /// <exception cref="System.IO.IOException"/> internal static KeyValuePair <INode.BlocksMapUpdateInfo, HdfsFileStatus> RenameToInt (FSDirectory fsd, string srcArg, string dstArg, bool logRetryCache, params Options.Rename [] options) { string src = srcArg; string dst = dstArg; if (NameNode.stateChangeLog.IsDebugEnabled()) { NameNode.stateChangeLog.Debug("DIR* NameSystem.renameTo: with options -" + " " + src + " to " + dst); } if (!DFSUtil.IsValidName(dst)) { throw new InvalidPathException("Invalid name: " + dst); } FSPermissionChecker pc = fsd.GetPermissionChecker(); byte[][] srcComponents = FSDirectory.GetPathComponentsForReservedPath(src); byte[][] dstComponents = FSDirectory.GetPathComponentsForReservedPath(dst); INode.BlocksMapUpdateInfo collectedBlocks = new INode.BlocksMapUpdateInfo(); src = fsd.ResolvePath(pc, src, srcComponents); dst = fsd.ResolvePath(pc, dst, dstComponents); RenameTo(fsd, pc, src, dst, collectedBlocks, logRetryCache, options); INodesInPath dstIIP = fsd.GetINodesInPath(dst, false); HdfsFileStatus resultingStat = fsd.GetAuditFileInfo(dstIIP); return(new AbstractMap.SimpleImmutableEntry <INode.BlocksMapUpdateInfo, HdfsFileStatus >(collectedBlocks, resultingStat)); }
/// <summary>Check whether current user have permissions to access the path.</summary> /// <remarks> /// Check whether current user have permissions to access the path. /// Traverse is always checked. /// Parent path means the parent directory for the path. /// Ancestor path means the last (the closest) existing ancestor directory /// of the path. /// Note that if the parent path exists, /// then the parent path and the ancestor path are the same. /// For example, suppose the path is "/foo/bar/baz". /// No matter baz is a file or a directory, /// the parent path is "/foo/bar". /// If bar exists, then the ancestor path is also "/foo/bar". /// If bar does not exist and foo exists, /// then the ancestor path is "/foo". /// Further, if both foo and bar do not exist, /// then the ancestor path is "/". /// </remarks> /// <param name="doCheckOwner">Require user to be the owner of the path?</param> /// <param name="ancestorAccess">The access required by the ancestor of the path.</param> /// <param name="parentAccess">The access required by the parent of the path.</param> /// <param name="access">The access required by the path.</param> /// <param name="subAccess"> /// If path is a directory, /// it is the access required of the path and all the sub-directories. /// If path is not a directory, there is no effect. /// </param> /// <param name="ignoreEmptyDir">Ignore permission checking for empty directory?</param> /// <exception cref="Org.Apache.Hadoop.Security.AccessControlException"> /// Guarded by /// <see cref="FSNamesystem.ReadLock()"/> /// Caller of this method must hold that lock. /// </exception> internal virtual void CheckPermission(INodesInPath inodesInPath, bool doCheckOwner , FsAction ancestorAccess, FsAction parentAccess, FsAction access, FsAction subAccess , bool ignoreEmptyDir) { if (Log.IsDebugEnabled()) { Log.Debug("ACCESS CHECK: " + this + ", doCheckOwner=" + doCheckOwner + ", ancestorAccess=" + ancestorAccess + ", parentAccess=" + parentAccess + ", access=" + access + ", subAccess=" + subAccess + ", ignoreEmptyDir=" + ignoreEmptyDir); } // check if (parentAccess != null) && file exists, then check sb // If resolveLink, the check is performed on the link target. int snapshotId = inodesInPath.GetPathSnapshotId(); INode[] inodes = inodesInPath.GetINodesArray(); INodeAttributes[] inodeAttrs = new INodeAttributes[inodes.Length]; byte[][] pathByNameArr = new byte[inodes.Length][]; for (int i = 0; i < inodes.Length && inodes[i] != null; i++) { if (inodes[i] != null) { pathByNameArr[i] = inodes[i].GetLocalNameBytes(); inodeAttrs[i] = GetINodeAttrs(pathByNameArr, i, inodes[i], snapshotId); } } string path = inodesInPath.GetPath(); int ancestorIndex = inodes.Length - 2; INodeAttributeProvider.AccessControlEnforcer enforcer = GetAttributesProvider().GetExternalAccessControlEnforcer (this); enforcer.CheckPermission(fsOwner, supergroup, callerUgi, inodeAttrs, inodes, pathByNameArr , snapshotId, path, ancestorIndex, doCheckOwner, ancestorAccess, parentAccess, access , subAccess, ignoreEmptyDir); }
/// <summary>Create a new encryption zone.</summary> /// <remarks> /// Create a new encryption zone. /// <p/> /// Called while holding the FSDirectory lock. /// </remarks> /// <exception cref="System.IO.IOException"/> internal virtual XAttr CreateEncryptionZone(string src, CipherSuite suite, CryptoProtocolVersion version, string keyName) { System.Diagnostics.Debug.Assert(dir.HasWriteLock()); INodesInPath srcIIP = dir.GetINodesInPath4Write(src, false); if (dir.IsNonEmptyDirectory(srcIIP)) { throw new IOException("Attempt to create an encryption zone for a non-empty directory." ); } if (srcIIP != null && srcIIP.GetLastINode() != null && !srcIIP.GetLastINode().IsDirectory ()) { throw new IOException("Attempt to create an encryption zone for a file."); } EncryptionZoneManager.EncryptionZoneInt ezi = GetEncryptionZoneForPath(srcIIP); if (ezi != null) { throw new IOException("Directory " + src + " is already in an " + "encryption zone. (" + GetFullPathName(ezi) + ")"); } HdfsProtos.ZoneEncryptionInfoProto proto = PBHelper.Convert(suite, version, keyName ); XAttr ezXAttr = XAttrHelper.BuildXAttr(HdfsServerConstants.CryptoXattrEncryptionZone , proto.ToByteArray()); IList <XAttr> xattrs = Lists.NewArrayListWithCapacity(1); xattrs.AddItem(ezXAttr); // updating the xattr will call addEncryptionZone, // done this way to handle edit log loading FSDirXAttrOp.UnprotectedSetXAttrs(dir, src, xattrs, EnumSet.Of(XAttrSetFlag.Create )); return(ezXAttr); }
/// <summary>Create FileStatus with location info by file INode</summary> /// <exception cref="System.IO.IOException"/> private static HdfsLocatedFileStatus CreateLocatedFileStatus(FSDirectory fsd, string fullPath, byte[] path, INode node, byte storagePolicy, int snapshot, bool isRawPath , INodesInPath iip) { System.Diagnostics.Debug.Assert(fsd.HasReadLock()); long size = 0; // length is zero for directories short replication = 0; long blocksize = 0; LocatedBlocks loc = null; bool isEncrypted; FileEncryptionInfo feInfo = isRawPath ? null : fsd.GetFileEncryptionInfo(node, snapshot , iip); if (node.IsFile()) { INodeFile fileNode = node.AsFile(); size = fileNode.ComputeFileSize(snapshot); replication = fileNode.GetFileReplication(snapshot); blocksize = fileNode.GetPreferredBlockSize(); bool inSnapshot = snapshot != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId; bool isUc = !inSnapshot && fileNode.IsUnderConstruction(); long fileSize = !inSnapshot && isUc?fileNode.ComputeFileSizeNotIncludingLastUcBlock () : size; loc = fsd.GetFSNamesystem().GetBlockManager().CreateLocatedBlocks(fileNode.GetBlocks (snapshot), fileSize, isUc, 0L, size, false, inSnapshot, feInfo); if (loc == null) { loc = new LocatedBlocks(); } isEncrypted = (feInfo != null) || (isRawPath && fsd.IsInAnEZ(INodesInPath.FromINode (node))); } else { isEncrypted = fsd.IsInAnEZ(INodesInPath.FromINode(node)); } int childrenNum = node.IsDirectory() ? node.AsDirectory().GetChildrenNum(snapshot ) : 0; INodeAttributes nodeAttrs = fsd.GetAttributes(fullPath, path, node, snapshot); HdfsLocatedFileStatus status = new HdfsLocatedFileStatus(size, node.IsDirectory() , replication, blocksize, node.GetModificationTime(snapshot), node.GetAccessTime (snapshot), GetPermissionForFileStatus(nodeAttrs, isEncrypted), nodeAttrs.GetUserName (), nodeAttrs.GetGroupName(), node.IsSymlink() ? node.AsSymlink().GetSymlink() : null, path, node.GetId(), loc, childrenNum, feInfo, storagePolicy); // Set caching information for the located blocks. if (loc != null) { CacheManager cacheManager = fsd.GetFSNamesystem().GetCacheManager(); foreach (LocatedBlock lb in loc.GetLocatedBlocks()) { cacheManager.SetCachedLocations(lb); } } return(status); }
/// <exception cref="System.IO.IOException"/> private void AssertPermissionGranted(UserGroupInformation user, string path, FsAction access) { INodesInPath iip = dir.GetINodesInPath(path, true); dir.GetPermissionChecker(Superuser, Supergroup, user).CheckPermission(iip, false, null, null, access, null, false); }
/// <exception cref="System.IO.IOException"/> private static void CheckSubtreeReadPermission(FSDirectory fsd, FSPermissionChecker pc, string snapshottablePath, string snapshot) { string fromPath = snapshot == null ? snapshottablePath : Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .GetSnapshotPath(snapshottablePath, snapshot); INodesInPath iip = fsd.GetINodesInPath(fromPath, true); fsd.CheckPermission(pc, iip, false, null, null, FsAction.Read, FsAction.Read); }
/// <exception cref="System.Exception"/> public Void Answer(InvocationOnMock invocation) { INodesInPath iip = fsd.GetINodesInPath(TestGetBlockLocations.FilePath, true); FSDirDeleteOp.Delete(fsd, iip, new INode.BlocksMapUpdateInfo(), new AList <INode>( ), Time.Now()); invocation.CallRealMethod(); return(null); }
/// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> internal static bool UnprotectedSetTimes(FSDirectory fsd, string src, long mtime, long atime, bool force) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); INodesInPath i = fsd.GetINodesInPath(src, true); return(UnprotectedSetTimes(fsd, i.GetLastINode(), mtime, atime, force, i.GetLatestSnapshotId ())); }
/// <exception cref="System.IO.IOException"/> internal static IList <XAttr> GetXAttrs(FSDirectory fsd, string srcArg, IList <XAttr > xAttrs) { string src = srcArg; CheckXAttrsConfigFlag(fsd); FSPermissionChecker pc = fsd.GetPermissionChecker(); bool isRawPath = FSDirectory.IsReservedRawName(src); bool getAll = xAttrs == null || xAttrs.IsEmpty(); if (!getAll) { XAttrPermissionFilter.CheckPermissionForApi(pc, xAttrs, isRawPath); } byte[][] pathComponents = FSDirectory.GetPathComponentsForReservedPath(src); src = fsd.ResolvePath(pc, src, pathComponents); INodesInPath iip = fsd.GetINodesInPath(src, true); if (fsd.IsPermissionEnabled()) { fsd.CheckPathAccess(pc, iip, FsAction.Read); } IList <XAttr> all = FSDirXAttrOp.GetXAttrs(fsd, src); IList <XAttr> filteredAll = XAttrPermissionFilter.FilterXAttrsForApi(pc, all, isRawPath ); if (getAll) { return(filteredAll); } if (filteredAll == null || filteredAll.IsEmpty()) { return(null); } IList <XAttr> toGet = Lists.NewArrayListWithCapacity(xAttrs.Count); foreach (XAttr xAttr in xAttrs) { bool foundIt = false; foreach (XAttr a in filteredAll) { if (xAttr.GetNameSpace() == a.GetNameSpace() && xAttr.GetName().Equals(a.GetName( ))) { toGet.AddItem(a); foundIt = true; break; } } if (!foundIt) { throw new IOException("At least one of the attributes provided was not found."); } } return(toGet); }
/// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> private static void VerifyQuota(FSDirectory fsd, INodesInPath targetIIP, QuotaCounts deltas) { if (!fsd.GetFSNamesystem().IsImageLoaded() || fsd.ShouldSkipQuotaChecks()) { // Do not check quota if editlog is still being processed return; } FSDirectory.VerifyQuota(targetIIP, targetIIP.Length() - 1, deltas, null); }
/// <summary>Get the key name for an encryption zone.</summary> /// <remarks> /// Get the key name for an encryption zone. Returns null if <tt>iip</tt> is /// not within an encryption zone. /// <p/> /// Called while holding the FSDirectory lock. /// </remarks> internal virtual string GetKeyName(INodesInPath iip) { System.Diagnostics.Debug.Assert(dir.HasReadLock()); EncryptionZoneManager.EncryptionZoneInt ezi = GetEncryptionZoneForPath(iip); if (ezi == null) { return(null); } return(ezi.GetKeyName()); }
/// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> internal static INodeSymlink UnprotectedAddSymlink(FSDirectory fsd, INodesInPath iip, byte[] localName, long id, string target, long mtime, long atime, PermissionStatus perm) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); INodeSymlink symlink = new INodeSymlink(id, null, perm, mtime, atime, target); symlink.SetLocalName(localName); return(fsd.AddINode(iip, symlink) != null ? symlink : null); }
internal static Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot GetSnapshot (INodesInPath inodesInPath, string name, int index) { if (name == null) { return(null); } INode inode = inodesInPath.GetINode(index - 1); return(inode.AsDirectory().GetSnapshot(DFSUtil.String2Bytes(name))); }
internal static bool RenameForEditLog(FSDirectory fsd, string src, string dst, long timestamp) { if (fsd.IsDir(dst)) { dst += Path.Separator + new Path(src).GetName(); } INodesInPath srcIIP = fsd.GetINodesInPath4Write(src, false); INodesInPath dstIIP = fsd.GetINodesInPath4Write(dst, false); return(UnprotectedRenameTo(fsd, src, dst, srcIIP, dstIIP, timestamp)); }
/// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> internal RenameOperation(FSDirectory fsd, string src, string dst, INodesInPath srcIIP , INodesInPath dstIIP) { this.fsd = fsd; this.src = src; this.dst = dst; this.srcIIP = srcIIP; this.dstIIP = dstIIP; this.srcParentIIP = srcIIP.GetParentINodesInPath(); this.dstParentIIP = dstIIP.GetParentINodesInPath(); BlockStoragePolicySuite bsps = fsd.GetBlockStoragePolicySuite(); srcChild = this.srcIIP.GetLastINode(); srcChildName = srcChild.GetLocalNameBytes(); int srcLatestSnapshotId = srcIIP.GetLatestSnapshotId(); isSrcInSnapshot = srcChild.IsInLatestSnapshot(srcLatestSnapshotId); srcChildIsReference = srcChild.IsReference(); srcParent = this.srcIIP.GetINode(-2).AsDirectory(); // Record the snapshot on srcChild. After the rename, before any new // snapshot is taken on the dst tree, changes will be recorded in the // latest snapshot of the src tree. if (isSrcInSnapshot) { srcChild.RecordModification(srcLatestSnapshotId); } // check srcChild for reference srcRefDstSnapshot = srcChildIsReference ? srcChild.AsReference().GetDstSnapshotId () : Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId; oldSrcCounts = new QuotaCounts.Builder().Build(); if (isSrcInSnapshot) { INodeReference.WithName withName = srcParent.ReplaceChild4ReferenceWithName(srcChild , srcLatestSnapshotId); withCount = (INodeReference.WithCount)withName.GetReferredINode(); srcChild = withName; this.srcIIP = INodesInPath.Replace(srcIIP, srcIIP.Length() - 1, srcChild); // get the counts before rename withCount.GetReferredINode().ComputeQuotaUsage(bsps, oldSrcCounts, true); } else { if (srcChildIsReference) { // srcChild is reference but srcChild is not in latest snapshot withCount = (INodeReference.WithCount)srcChild.AsReference().GetReferredINode(); } else { withCount = null; } } }
internal virtual long RemoveDst() { long removedNum = fsd.RemoveLastINode(dstIIP); if (removedNum != -1) { oldDstChild = dstIIP.GetLastINode(); // update the quota count if necessary fsd.UpdateCountForDelete(oldDstChild, dstIIP); dstIIP = INodesInPath.Replace(dstIIP, dstIIP.Length() - 1, null); } return(removedNum); }
/// <summary>Returns an EncryptionZone representing the ez for a given path.</summary> /// <remarks> /// Returns an EncryptionZone representing the ez for a given path. /// Returns an empty marker EncryptionZone if path is not in an ez. /// </remarks> /// <param name="iip">The INodesInPath of the path to check</param> /// <returns>the EncryptionZone representing the ez for the path.</returns> internal virtual EncryptionZone GetEZINodeForPath(INodesInPath iip) { EncryptionZoneManager.EncryptionZoneInt ezi = GetEncryptionZoneForPath(iip); if (ezi == null) { return(null); } else { return(new EncryptionZone(ezi.GetINodeId(), GetFullPathName(ezi), ezi.GetSuite(), ezi.GetVersion(), ezi.GetKeyName())); } }
/// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.AclException"/> /// <exception cref="Org.Apache.Hadoop.FS.FileAlreadyExistsException"/> internal static void MkdirForEditLog(FSDirectory fsd, long inodeId, string src, PermissionStatus permissions, IList <AclEntry> aclEntries, long timestamp) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); INodesInPath iip = fsd.GetINodesInPath(src, false); byte[] localName = iip.GetLastLocalName(); INodesInPath existing = iip.GetParentINodesInPath(); Preconditions.CheckState(existing.GetLastINode() != null); UnprotectedMkdir(fsd, inodeId, existing, localName, permissions, aclEntries, timestamp ); }
private int GetNumNonNull(INodesInPath iip) { IList <INode> inodes = iip.GetReadOnlyINodes(); for (int i = inodes.Count - 1; i >= 0; i--) { if (inodes[i] != null) { return(i + 1); } } return(0); }
/// <summary> /// Checks file system limits (max component length and max directory items) /// during a rename operation. /// </summary> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.FSLimitException.PathComponentTooLongException /// "/> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.FSLimitException.MaxDirectoryItemsExceededException /// "/> internal static void VerifyFsLimitsForRename(FSDirectory fsd, INodesInPath srcIIP , INodesInPath dstIIP) { byte[] dstChildName = dstIIP.GetLastLocalName(); string parentPath = dstIIP.GetParentPath(); fsd.VerifyMaxComponentLength(dstChildName, parentPath); // Do not enforce max directory items if renaming within same directory. if (srcIIP.GetINode(-2) != dstIIP.GetINode(-2)) { fsd.VerifyMaxDirItems(dstIIP.GetINode(-2).AsDirectory(), parentPath); } }
/// <summary> /// Delete a path from the name space /// Update the count at each ancestor directory with quota /// </summary> /// <param name="iip">the inodes resolved from the path</param> /// <param name="collectedBlocks">blocks collected from the deleted path</param> /// <param name="removedINodes">inodes that should be removed from inodeMap</param> /// <param name="mtime">the time the inode is removed</param> /// <returns>the number of inodes deleted; 0 if no inodes are deleted.</returns> private static long UnprotectedDelete(FSDirectory fsd, INodesInPath iip, INode.BlocksMapUpdateInfo collectedBlocks, IList <INode> removedINodes, long mtime) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); // check if target node exists INode targetNode = iip.GetLastINode(); if (targetNode == null) { return(-1); } // record modification int latestSnapshot = iip.GetLatestSnapshotId(); targetNode.RecordModification(latestSnapshot); // Remove the node from the namespace long removed = fsd.RemoveLastINode(iip); if (removed == -1) { return(-1); } // set the parent's modification time INodeDirectory parent = targetNode.GetParent(); parent.UpdateModificationTime(mtime, latestSnapshot); fsd.UpdateCountForDelete(targetNode, iip); if (removed == 0) { return(0); } // collect block and update quota if (!targetNode.IsInLatestSnapshot(latestSnapshot)) { targetNode.DestroyAndCollectBlocks(fsd.GetBlockStoragePolicySuite(), collectedBlocks , removedINodes); } else { QuotaCounts counts = targetNode.CleanSubtree(fsd.GetBlockStoragePolicySuite(), Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId, latestSnapshot, collectedBlocks, removedINodes); removed = counts.GetNameSpace(); fsd.UpdateCountNoQuotaCheck(iip, iip.Length() - 1, counts.Negation()); } if (NameNode.stateChangeLog.IsDebugEnabled()) { NameNode.stateChangeLog.Debug("DIR* FSDirectory.unprotectedDelete: " + iip.GetPath () + " is removed"); } return(removed); }
/// <summary>for snapshot file after deleting the original file.</summary> /// <exception cref="System.Exception"/> public virtual void TestSnapshotPathINodesAfterDeletion() { // Create a snapshot for the dir, and check the inodes for the path // pointing to a snapshot file hdfs.AllowSnapshot(sub1); hdfs.CreateSnapshot(sub1, "s2"); // Delete the original file /TestSnapshot/sub1/file1 hdfs.Delete(file1, false); Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot snapshot; { // Resolve the path for the snapshot file // /TestSnapshot/sub1/.snapshot/s2/file1 string snapshotPath = sub1.ToString() + "/.snapshot/s2/file1"; string[] names = INode.GetPathNames(snapshotPath); byte[][] components = INode.GetPathComponents(names); INodesInPath nodesInPath = INodesInPath.Resolve(fsdir.rootDir, components, false); // Length of inodes should be (components.length - 1), since we will ignore // ".snapshot" NUnit.Framework.Assert.AreEqual(nodesInPath.Length(), components.Length - 1); // SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s2, file1} snapshot = GetSnapshot(nodesInPath, "s2", 3); AssertSnapshot(nodesInPath, true, snapshot, 3); // Check the INode for file1 (snapshot file) INode inode = nodesInPath.GetLastINode(); NUnit.Framework.Assert.AreEqual(file1.GetName(), inode.GetLocalName()); NUnit.Framework.Assert.IsTrue(inode.AsFile().IsWithSnapshot()); } // Check the INodes for path /TestSnapshot/sub1/file1 string[] names_1 = INode.GetPathNames(file1.ToString()); byte[][] components_1 = INode.GetPathComponents(names_1); INodesInPath nodesInPath_1 = INodesInPath.Resolve(fsdir.rootDir, components_1, false ); // The length of inodes should be equal to components.length NUnit.Framework.Assert.AreEqual(nodesInPath_1.Length(), components_1.Length); // The number of non-null elements should be components.length - 1 since // file1 has been deleted NUnit.Framework.Assert.AreEqual(GetNumNonNull(nodesInPath_1), components_1.Length - 1); // The returned nodesInPath should be non-snapshot AssertSnapshot(nodesInPath_1, false, snapshot, -1); // The last INode should be null, and the one before should be associated // with sub1 NUnit.Framework.Assert.IsNull(nodesInPath_1.GetINode(components_1.Length - 1)); NUnit.Framework.Assert.AreEqual(nodesInPath_1.GetINode(components_1.Length - 2).GetFullPathName (), sub1.ToString()); NUnit.Framework.Assert.AreEqual(nodesInPath_1.GetINode(components_1.Length - 3).GetFullPathName (), dir.ToString()); hdfs.DeleteSnapshot(sub1, "s2"); hdfs.DisallowSnapshot(sub1); }
/// <summary>Returns true if the file is closed</summary> /// <exception cref="System.IO.IOException"/> internal static bool IsFileClosed(FSDirectory fsd, string src) { FSPermissionChecker pc = fsd.GetPermissionChecker(); byte[][] pathComponents = FSDirectory.GetPathComponentsForReservedPath(src); src = fsd.ResolvePath(pc, src, pathComponents); INodesInPath iip = fsd.GetINodesInPath(src, true); if (fsd.IsPermissionEnabled()) { fsd.CheckTraverse(pc, iip); } return(!INodeFile.ValueOf(iip.GetLastINode(), src).IsUnderConstruction()); }
/// <summary>for snapshot file while adding a new file after snapshot.</summary> /// <exception cref="System.Exception"/> public virtual void TestSnapshotPathINodesWithAddedFile() { // Create a snapshot for the dir, and check the inodes for the path // pointing to a snapshot file hdfs.AllowSnapshot(sub1); hdfs.CreateSnapshot(sub1, "s4"); // Add a new file /TestSnapshot/sub1/file3 Path file3 = new Path(sub1, "file3"); DFSTestUtil.CreateFile(hdfs, file3, 1024, Replication, seed); Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s4; { // Check the inodes for /TestSnapshot/sub1/.snapshot/s4/file3 string snapshotPath = sub1.ToString() + "/.snapshot/s4/file3"; string[] names = INode.GetPathNames(snapshotPath); byte[][] components = INode.GetPathComponents(names); INodesInPath nodesInPath = INodesInPath.Resolve(fsdir.rootDir, components, false); // Length of inodes should be (components.length - 1), since we will ignore // ".snapshot" NUnit.Framework.Assert.AreEqual(nodesInPath.Length(), components.Length - 1); // The number of non-null inodes should be components.length - 2, since // snapshot of file3 does not exist NUnit.Framework.Assert.AreEqual(GetNumNonNull(nodesInPath), components.Length - 2 ); s4 = GetSnapshot(nodesInPath, "s4", 3); // SnapshotRootIndex should still be 3: {root, Testsnapshot, sub1, s4, null} AssertSnapshot(nodesInPath, true, s4, 3); // Check the last INode in inodes, which should be null NUnit.Framework.Assert.IsNull(nodesInPath.GetINode(nodesInPath.Length() - 1)); } // Check the inodes for /TestSnapshot/sub1/file3 string[] names_1 = INode.GetPathNames(file3.ToString()); byte[][] components_1 = INode.GetPathComponents(names_1); INodesInPath nodesInPath_1 = INodesInPath.Resolve(fsdir.rootDir, components_1, false ); // The number of inodes should be equal to components.length NUnit.Framework.Assert.AreEqual(nodesInPath_1.Length(), components_1.Length); // The returned nodesInPath should be non-snapshot AssertSnapshot(nodesInPath_1, false, s4, -1); // The last INode should be associated with file3 NUnit.Framework.Assert.AreEqual(nodesInPath_1.GetINode(components_1.Length - 1).GetFullPathName (), file3.ToString()); NUnit.Framework.Assert.AreEqual(nodesInPath_1.GetINode(components_1.Length - 2).GetFullPathName (), sub1.ToString()); NUnit.Framework.Assert.AreEqual(nodesInPath_1.GetINode(components_1.Length - 3).GetFullPathName (), dir.ToString()); hdfs.DeleteSnapshot(sub1, "s4"); hdfs.DisallowSnapshot(sub1); }
/// <exception cref="System.IO.IOException"/> internal static ContentSummary GetContentSummary(FSDirectory fsd, string src) { byte[][] pathComponents = FSDirectory.GetPathComponentsForReservedPath(src); FSPermissionChecker pc = fsd.GetPermissionChecker(); src = fsd.ResolvePath(pc, src, pathComponents); INodesInPath iip = fsd.GetINodesInPath(src, false); if (fsd.IsPermissionEnabled()) { fsd.CheckPermission(pc, iip, false, null, null, null, FsAction.ReadExecute); } return(GetContentSummaryInt(fsd, iip)); }
/// <summary> /// Create the directory /// <c>parent</c> /// / /// <paramref name="children"/> /// and all ancestors /// along the path. /// </summary> /// <param name="fsd">FSDirectory</param> /// <param name="existing"> /// The INodesInPath instance containing all the existing /// ancestral INodes /// </param> /// <param name="children"> /// The relative path from the parent towards children, /// starting with "/" /// </param> /// <param name="perm"> /// the permission of the directory. Note that all ancestors /// created along the path has implicit /// <c>u+wx</c> /// permissions. /// </param> /// <returns> /// /// <see cref="INodesInPath"/> /// which contains all inodes to the /// target directory, After the execution parentPath points to the path of /// the returned INodesInPath. The function return null if the operation has /// failed. /// </returns> /// <exception cref="System.IO.IOException"/> private static INodesInPath CreateChildrenDirectories(FSDirectory fsd, INodesInPath existing, IList <string> children, PermissionStatus perm) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); foreach (string component in children) { existing = CreateSingleDirectory(fsd, existing, component, perm); if (existing == null) { return(null); } } return(existing); }
/// <summary>create an hdfs file status from an inode</summary> /// <param name="fsd">FSDirectory</param> /// <param name="path">the local name</param> /// <param name="node">inode</param> /// <param name="needLocation">if block locations need to be included or not</param> /// <param name="isRawPath"> /// true if this is being called on behalf of a path in /// /.reserved/raw /// </param> /// <returns>a file status</returns> /// <exception cref="System.IO.IOException">if any error occurs</exception> internal static HdfsFileStatus CreateFileStatus(FSDirectory fsd, string fullPath, byte[] path, INode node, bool needLocation, byte storagePolicy, int snapshot, bool isRawPath, INodesInPath iip) { if (needLocation) { return(CreateLocatedFileStatus(fsd, fullPath, path, node, storagePolicy, snapshot , isRawPath, iip)); } else { return(CreateFileStatus(fsd, fullPath, path, node, storagePolicy, snapshot, isRawPath , iip)); } }
/// <exception cref="System.IO.FileNotFoundException"/> /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.SnapshotAccessControlException"/ /// > internal static void UnprotectedSetPermission(FSDirectory fsd, string src, FsPermission permissions) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); INodesInPath inodesInPath = fsd.GetINodesInPath4Write(src, true); INode inode = inodesInPath.GetLastINode(); if (inode == null) { throw new FileNotFoundException("File does not exist: " + src); } int snapshotId = inodesInPath.GetLatestSnapshotId(); inode.SetPermission(permissions, snapshotId); }