/// <summary>
        /// Both traditional space quota and the storage type quota for SSD are set and
        /// not exceeded.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeWithTraditionalQuota()
        {
            Path foo = new Path(dir, "foo");

            dfs.Mkdirs(foo);
            dfs.SetStoragePolicy(foo, HdfsConstants.OnessdStoragePolicyName);
            dfs.SetQuotaByStorageType(foo, StorageType.Ssd, Blocksize * 10);
            dfs.SetQuota(foo, long.MaxValue - 1, Replication * Blocksize * 10);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            Path createdFile = new Path(foo, "created_file.data");
            long fileLen     = Blocksize * 2 + Blocksize / 2;

            DFSTestUtil.CreateFile(dfs, createdFile, Blocksize / 16, fileLen, Blocksize, Replication
                                   , seed);
            QuotaCounts cnt = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                  ();

            NUnit.Framework.Assert.AreEqual(2, cnt.GetNameSpace());
            NUnit.Framework.Assert.AreEqual(fileLen * Replication, cnt.GetStorageSpace());
            dfs.Delete(createdFile, true);
            QuotaCounts cntAfterDelete = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                             ();

            NUnit.Framework.Assert.AreEqual(1, cntAfterDelete.GetNameSpace());
            NUnit.Framework.Assert.AreEqual(0, cntAfterDelete.GetStorageSpace());
            // Validate the computeQuotaUsage()
            QuotaCounts counts = new QuotaCounts.Builder().Build();

            fnode.ComputeQuotaUsage(fsn.GetBlockManager().GetStoragePolicySuite(), counts, true
                                    );
            NUnit.Framework.Assert.AreEqual(fnode.DumpTreeRecursively().ToString(), 1, counts
                                            .GetNameSpace());
            NUnit.Framework.Assert.AreEqual(fnode.DumpTreeRecursively().ToString(), 0, counts
                                            .GetStorageSpace());
        }
Example #2
0
 /// <summary>
 /// When destroying a reference node (WithName or DstReference), we call this
 /// method to identify the snapshot which is the latest snapshot before the
 /// reference node's creation.
 /// </summary>
 internal static int GetPriorSnapshot(Org.Apache.Hadoop.Hdfs.Server.Namenode.INodeReference
                                      @ref)
 {
     INodeReference.WithCount wc = (INodeReference.WithCount)@ref.GetReferredINode();
     INodeReference.WithName  wn = null;
     if (@ref is INodeReference.DstReference)
     {
         wn = wc.GetLastWithName();
     }
     else
     {
         if (@ref is INodeReference.WithName)
         {
             wn = wc.GetPriorWithName((INodeReference.WithName)@ref);
         }
     }
     if (wn != null)
     {
         INode referred = wc.GetReferredINode();
         if (referred.IsFile() && referred.AsFile().IsWithSnapshot())
         {
             return(referred.AsFile().GetDiffs().GetPrior(wn.lastSnapshotId));
         }
         else
         {
             if (referred.IsDirectory())
             {
                 DirectoryWithSnapshotFeature sf = referred.AsDirectory().GetDirectoryWithSnapshotFeature
                                                       ();
                 if (sf != null)
                 {
                     return(sf.GetDiffs().GetPrior(wn.lastSnapshotId));
                 }
             }
         }
     }
     return(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.NoSnapshotId);
 }
Example #3
0
            private int GetSelfSnapshot()
            {
                INode referred = GetReferredINode().AsReference().GetReferredINode();
                int   snapshot = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.NoSnapshotId;

                if (referred.IsFile() && referred.AsFile().IsWithSnapshot())
                {
                    snapshot = referred.AsFile().GetDiffs().GetPrior(lastSnapshotId);
                }
                else
                {
                    if (referred.IsDirectory())
                    {
                        DirectoryWithSnapshotFeature sf = referred.AsDirectory().GetDirectoryWithSnapshotFeature
                                                              ();
                        if (sf != null)
                        {
                            snapshot = sf.GetDiffs().GetPrior(lastSnapshotId);
                        }
                    }
                }
                return(snapshot);
            }
Example #4
0
        /// <summary>Updates an inode with a new ACL.</summary>
        /// <remarks>
        /// Updates an inode with a new ACL.  This method takes a full logical ACL and
        /// stores the entries to the inode's
        /// <see cref="Org.Apache.Hadoop.FS.Permission.FsPermission"/>
        /// and
        /// <see cref="AclFeature"/>
        /// .
        /// </remarks>
        /// <param name="inode">INode to update</param>
        /// <param name="newAcl">List<AclEntry> containing new ACL entries</param>
        /// <param name="snapshotId">int latest snapshot ID of inode</param>
        /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.AclException">if the ACL is invalid for the given inode
        ///     </exception>
        /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException">if quota limit is exceeded
        ///     </exception>
        public static void UpdateINodeAcl(INode inode, IList <AclEntry> newAcl, int snapshotId
                                          )
        {
            System.Diagnostics.Debug.Assert(newAcl.Count >= 3);
            FsPermission perm = inode.GetFsPermission();
            FsPermission newPerm;

            if (!AclUtil.IsMinimalAcl(newAcl))
            {
                // This is an extended ACL.  Split entries into access vs. default.
                ScopedAclEntries scoped         = new ScopedAclEntries(newAcl);
                IList <AclEntry> accessEntries  = scoped.GetAccessEntries();
                IList <AclEntry> defaultEntries = scoped.GetDefaultEntries();
                // Only directories may have a default ACL.
                if (!defaultEntries.IsEmpty() && !inode.IsDirectory())
                {
                    throw new AclException("Invalid ACL: only directories may have a default ACL.");
                }
                // Attach entries to the feature.
                if (inode.GetAclFeature() != null)
                {
                    inode.RemoveAclFeature(snapshotId);
                }
                inode.AddAclFeature(CreateAclFeature(accessEntries, defaultEntries), snapshotId);
                newPerm = CreateFsPermissionForExtendedAcl(accessEntries, perm);
            }
            else
            {
                // This is a minimal ACL.  Remove the ACL feature if it previously had one.
                if (inode.GetAclFeature() != null)
                {
                    inode.RemoveAclFeature(snapshotId);
                }
                newPerm = CreateFsPermissionForMinimalAcl(newAcl, perm);
            }
            inode.SetPermission(newPerm, snapshotId);
        }
        /// <exception cref="System.Exception"/>
        private void TestQuotaByStorageTypeOrTraditionalQuotaExceededCase(long storageSpaceQuotaInBlocks
                                                                          , long ssdQuotaInBlocks, long testFileLenInBlocks, short replication)
        {
            string MethodName = GenericTestUtils.GetMethodName();
            Path   testDir    = new Path(dir, MethodName);

            dfs.Mkdirs(testDir);
            dfs.SetStoragePolicy(testDir, HdfsConstants.OnessdStoragePolicyName);
            long ssdQuota          = Blocksize * ssdQuotaInBlocks;
            long storageSpaceQuota = Blocksize * storageSpaceQuotaInBlocks;

            dfs.SetQuota(testDir, long.MaxValue - 1, storageSpaceQuota);
            dfs.SetQuotaByStorageType(testDir, StorageType.Ssd, ssdQuota);
            INode testDirNode = fsdir.GetINode4Write(testDir.ToString());

            NUnit.Framework.Assert.IsTrue(testDirNode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(testDirNode.IsQuotaSet());
            Path createdFile = new Path(testDir, "created_file.data");
            long fileLen     = testFileLenInBlocks * Blocksize;

            try
            {
                DFSTestUtil.CreateFile(dfs, createdFile, Blocksize / 16, fileLen, Blocksize, replication
                                       , seed);
                NUnit.Framework.Assert.Fail("Should have failed with DSQuotaExceededException or "
                                            + "QuotaByStorageTypeExceededException ");
            }
            catch (Exception t)
            {
                Log.Info("Got expected exception ", t);
                long currentSSDConsumed = testDirNode.AsDirectory().GetDirectoryWithQuotaFeature(
                    ).GetSpaceConsumed().GetTypeSpaces().Get(StorageType.Ssd);
                NUnit.Framework.Assert.AreEqual(Math.Min(ssdQuota, storageSpaceQuota / replication
                                                         ), currentSSDConsumed);
            }
        }
Example #6
0
        /// <summary>
        /// If a default ACL is defined on a parent directory, then copies that default
        /// ACL to a newly created child file or directory.
        /// </summary>
        /// <param name="child">INode newly created child</param>
        public static void CopyINodeDefaultAcl(INode child)
        {
            INodeDirectory parent           = child.GetParent();
            AclFeature     parentAclFeature = parent.GetAclFeature();

            if (parentAclFeature == null || !(child.IsFile() || child.IsDirectory()))
            {
                return;
            }
            // Split parent's entries into access vs. default.
            IList <AclEntry> featureEntries       = GetEntriesFromAclFeature(parent.GetAclFeature());
            ScopedAclEntries scopedEntries        = new ScopedAclEntries(featureEntries);
            IList <AclEntry> parentDefaultEntries = scopedEntries.GetDefaultEntries();

            // The parent may have an access ACL but no default ACL.  If so, exit.
            if (parentDefaultEntries.IsEmpty())
            {
                return;
            }
            // Pre-allocate list size for access entries to copy from parent.
            IList <AclEntry> accessEntries = Lists.NewArrayListWithCapacity(parentDefaultEntries
                                                                            .Count);
            FsPermission childPerm = child.GetFsPermission();
            // Copy each default ACL entry from parent to new child's access ACL.
            bool parentDefaultIsMinimal = AclUtil.IsMinimalAcl(parentDefaultEntries);

            foreach (AclEntry entry in parentDefaultEntries)
            {
                AclEntryType     type    = entry.GetType();
                string           name    = entry.GetName();
                AclEntry.Builder builder = new AclEntry.Builder().SetScope(AclEntryScope.Access).
                                           SetType(type).SetName(name);
                // The child's initial permission bits are treated as the mode parameter,
                // which can filter copied permission values for owner, mask and other.
                FsAction permission;
                if (type == AclEntryType.User && name == null)
                {
                    permission = entry.GetPermission().And(childPerm.GetUserAction());
                }
                else
                {
                    if (type == AclEntryType.Group && parentDefaultIsMinimal)
                    {
                        // This only happens if the default ACL is a minimal ACL: exactly 3
                        // entries corresponding to owner, group and other.  In this case,
                        // filter the group permissions.
                        permission = entry.GetPermission().And(childPerm.GetGroupAction());
                    }
                    else
                    {
                        if (type == AclEntryType.Mask)
                        {
                            // Group bits from mode parameter filter permission of mask entry.
                            permission = entry.GetPermission().And(childPerm.GetGroupAction());
                        }
                        else
                        {
                            if (type == AclEntryType.Other)
                            {
                                permission = entry.GetPermission().And(childPerm.GetOtherAction());
                            }
                            else
                            {
                                permission = entry.GetPermission();
                            }
                        }
                    }
                }
                builder.SetPermission(permission);
                accessEntries.AddItem(builder.Build());
            }
            // A new directory also receives a copy of the parent's default ACL.
            IList <AclEntry> defaultEntries = child.IsDirectory() ? parentDefaultEntries : Sharpen.Collections
                                              .EmptyList <AclEntry>();
            FsPermission newPerm;

            if (!AclUtil.IsMinimalAcl(accessEntries) || !defaultEntries.IsEmpty())
            {
                // Save the new ACL to the child.
                child.AddAclFeature(CreateAclFeature(accessEntries, defaultEntries));
                newPerm = CreateFsPermissionForExtendedAcl(accessEntries, childPerm);
            }
            else
            {
                // The child is receiving a minimal ACL.
                newPerm = CreateFsPermissionForMinimalAcl(accessEntries, childPerm);
            }
            child.SetPermission(newPerm);
        }
Example #7
0
        /// <summary>Retrieve existing INodes from a path.</summary>
        /// <remarks>
        /// Retrieve existing INodes from a path. For non-snapshot path,
        /// the number of INodes is equal to the number of path components. For
        /// snapshot path (e.g., /foo/.snapshot/s1/bar), the number of INodes is
        /// (number_of_path_components - 1).
        /// An UnresolvedPathException is always thrown when an intermediate path
        /// component refers to a symbolic link. If the final path component refers
        /// to a symbolic link then an UnresolvedPathException is only thrown if
        /// resolveLink is true.
        /// <p>
        /// Example: <br />
        /// Given the path /c1/c2/c3 where only /c1/c2 exists, resulting in the
        /// following path components: ["","c1","c2","c3"]
        /// <p>
        /// <code>getExistingPathINodes(["","c1","c2"])</code> should fill
        /// the array with [rootINode,c1,c2], <br />
        /// <code>getExistingPathINodes(["","c1","c2","c3"])</code> should
        /// fill the array with [rootINode,c1,c2,null]
        /// </remarks>
        /// <param name="startingDir">the starting directory</param>
        /// <param name="components">array of path component name</param>
        /// <param name="resolveLink">
        /// indicates whether UnresolvedLinkException should
        /// be thrown when the path refers to a symbolic link.
        /// </param>
        /// <returns>the specified number of existing INodes in the path</returns>
        /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/>
        internal static Org.Apache.Hadoop.Hdfs.Server.Namenode.INodesInPath Resolve(INodeDirectory
                                                                                    startingDir, byte[][] components, bool resolveLink)
        {
            Preconditions.CheckArgument(startingDir.CompareTo(components[0]) == 0);
            INode curNode  = startingDir;
            int   count    = 0;
            int   inodeNum = 0;

            INode[] inodes     = new INode[components.Length];
            bool    isSnapshot = false;
            int     snapshotId = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId;

            while (count < components.Length && curNode != null)
            {
                bool lastComp = (count == components.Length - 1);
                inodes[inodeNum++] = curNode;
                bool           isRef = curNode.IsReference();
                bool           isDir = curNode.IsDirectory();
                INodeDirectory dir   = isDir ? curNode.AsDirectory() : null;
                if (!isRef && isDir && dir.IsWithSnapshot())
                {
                    //if the path is a non-snapshot path, update the latest snapshot.
                    if (!isSnapshot && ShouldUpdateLatestId(dir.GetDirectoryWithSnapshotFeature().GetLastSnapshotId
                                                                (), snapshotId))
                    {
                        snapshotId = dir.GetDirectoryWithSnapshotFeature().GetLastSnapshotId();
                    }
                }
                else
                {
                    if (isRef && isDir && !lastComp)
                    {
                        // If the curNode is a reference node, need to check its dstSnapshot:
                        // 1. if the existing snapshot is no later than the dstSnapshot (which
                        // is the latest snapshot in dst before the rename), the changes
                        // should be recorded in previous snapshots (belonging to src).
                        // 2. however, if the ref node is already the last component, we still
                        // need to know the latest snapshot among the ref node's ancestors,
                        // in case of processing a deletion operation. Thus we do not overwrite
                        // the latest snapshot if lastComp is true. In case of the operation is
                        // a modification operation, we do a similar check in corresponding
                        // recordModification method.
                        if (!isSnapshot)
                        {
                            int dstSnapshotId = curNode.AsReference().GetDstSnapshotId();
                            if (snapshotId == Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId ||
                                (dstSnapshotId != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId &&
                                 dstSnapshotId >= snapshotId))
                            {
                                // no snapshot in dst tree of rename
                                // the above scenario
                                int lastSnapshot = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId;
                                DirectoryWithSnapshotFeature sf;
                                if (curNode.IsDirectory() && (sf = curNode.AsDirectory().GetDirectoryWithSnapshotFeature
                                                                       ()) != null)
                                {
                                    lastSnapshot = sf.GetLastSnapshotId();
                                }
                                snapshotId = lastSnapshot;
                            }
                        }
                    }
                }
                if (curNode.IsSymlink() && (!lastComp || resolveLink))
                {
                    string path      = ConstructPath(components, 0, components.Length);
                    string preceding = ConstructPath(components, 0, count);
                    string remainder = ConstructPath(components, count + 1, components.Length);
                    string link      = DFSUtil.Bytes2String(components[count]);
                    string target    = curNode.AsSymlink().GetSymlinkString();
                    if (Log.IsDebugEnabled())
                    {
                        Log.Debug("UnresolvedPathException " + " path: " + path + " preceding: " + preceding
                                  + " count: " + count + " link: " + link + " target: " + target + " remainder: "
                                  + remainder);
                    }
                    throw new UnresolvedPathException(path, preceding, remainder, target);
                }
                if (lastComp || !isDir)
                {
                    break;
                }
                byte[] childName = components[count + 1];
                // check if the next byte[] in components is for ".snapshot"
                if (IsDotSnapshotDir(childName) && dir.IsSnapshottable())
                {
                    // skip the ".snapshot" in components
                    count++;
                    isSnapshot = true;
                    // check if ".snapshot" is the last element of components
                    if (count == components.Length - 1)
                    {
                        break;
                    }
                    // Resolve snapshot root
                    Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s = dir.GetSnapshot(components
                                                                                                 [count + 1]);
                    if (s == null)
                    {
                        curNode = null;
                    }
                    else
                    {
                        // snapshot not found
                        curNode    = s.GetRoot();
                        snapshotId = s.GetId();
                    }
                }
                else
                {
                    // normal case, and also for resolving file/dir under snapshot root
                    curNode = dir.GetChild(childName, isSnapshot ? snapshotId : Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                           .CurrentStateId);
                }
                count++;
            }
            if (isSnapshot && !IsDotSnapshotDir(components[components.Length - 1]))
            {
                // for snapshot path shrink the inode array. however, for path ending with
                // .snapshot, still keep last the null inode in the array
                INode[] newNodes = new INode[components.Length - 1];
                System.Array.Copy(inodes, 0, newNodes, 0, newNodes.Length);
                inodes = newNodes;
            }
            return(new Org.Apache.Hadoop.Hdfs.Server.Namenode.INodesInPath(inodes, components
                                                                           , isSnapshot, snapshotId));
        }
        /// <summary>
        /// Get a partial listing of the indicated directory
        /// We will stop when any of the following conditions is met:
        /// 1) this.lsLimit files have been added
        /// 2) needLocation is true AND enough files have been added such
        /// that at least this.lsLimit block locations are in the response
        /// </summary>
        /// <param name="fsd">FSDirectory</param>
        /// <param name="iip">
        /// the INodesInPath instance containing all the INodes along the
        /// path
        /// </param>
        /// <param name="src">the directory name</param>
        /// <param name="startAfter">the name to start listing after</param>
        /// <param name="needLocation">if block locations are returned</param>
        /// <returns>a partial listing starting after startAfter</returns>
        /// <exception cref="System.IO.IOException"/>
        private static DirectoryListing GetListing(FSDirectory fsd, INodesInPath iip, string
                                                   src, byte[] startAfter, bool needLocation, bool isSuperUser)
        {
            string srcs      = FSDirectory.NormalizePath(src);
            bool   isRawPath = FSDirectory.IsReservedRawName(src);

            fsd.ReadLock();
            try
            {
                if (srcs.EndsWith(HdfsConstants.SeparatorDotSnapshotDir))
                {
                    return(GetSnapshotsListing(fsd, srcs, startAfter));
                }
                int   snapshot   = iip.GetPathSnapshotId();
                INode targetNode = iip.GetLastINode();
                if (targetNode == null)
                {
                    return(null);
                }
                byte parentStoragePolicy = isSuperUser ? targetNode.GetStoragePolicyID() : BlockStoragePolicySuite
                                           .IdUnspecified;
                if (!targetNode.IsDirectory())
                {
                    return(new DirectoryListing(new HdfsFileStatus[] { CreateFileStatus(fsd, src, HdfsFileStatus
                                                                                        .EmptyName, targetNode, needLocation, parentStoragePolicy, snapshot, isRawPath,
                                                                                        iip) }, 0));
                }
                INodeDirectory       dirInode = targetNode.AsDirectory();
                ReadOnlyList <INode> contents = dirInode.GetChildrenList(snapshot);
                int startChild           = INodeDirectory.NextChild(contents, startAfter);
                int totalNumChildren     = contents.Size();
                int numOfListing         = Math.Min(totalNumChildren - startChild, fsd.GetLsLimit());
                int locationBudget       = fsd.GetLsLimit();
                int listingCnt           = 0;
                HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing];
                for (int i = 0; i < numOfListing && locationBudget > 0; i++)
                {
                    INode cur       = contents.Get(startChild + i);
                    byte  curPolicy = isSuperUser && !cur.IsSymlink() ? cur.GetLocalStoragePolicyID() :
                                      BlockStoragePolicySuite.IdUnspecified;
                    listing[i] = CreateFileStatus(fsd, src, cur.GetLocalNameBytes(), cur, needLocation
                                                  , GetStoragePolicyID(curPolicy, parentStoragePolicy), snapshot, isRawPath, iip);
                    listingCnt++;
                    if (needLocation)
                    {
                        // Once we  hit lsLimit locations, stop.
                        // This helps to prevent excessively large response payloads.
                        // Approximate #locations with locatedBlockCount() * repl_factor
                        LocatedBlocks blks = ((HdfsLocatedFileStatus)listing[i]).GetBlockLocations();
                        locationBudget -= (blks == null) ? 0 : blks.LocatedBlockCount() * listing[i].GetReplication
                                              ();
                    }
                }
                // truncate return array if necessary
                if (listingCnt < numOfListing)
                {
                    listing = Arrays.CopyOf(listing, listingCnt);
                }
                return(new DirectoryListing(listing, totalNumChildren - startChild - listingCnt));
            }
            finally
            {
                fsd.ReadUnlock();
            }
        }
Example #9
0
        /// <summary>Rename src to dst.</summary>
        /// <remarks>
        /// Rename src to dst.
        /// See
        /// <see cref="Org.Apache.Hadoop.Hdfs.DistributedFileSystem.Rename(Org.Apache.Hadoop.FS.Path, Org.Apache.Hadoop.FS.Path, Org.Apache.Hadoop.FS.Options.Rename[])
        ///     "/>
        /// for details related to rename semantics and exceptions.
        /// </remarks>
        /// <param name="fsd">FSDirectory</param>
        /// <param name="src">source path</param>
        /// <param name="dst">destination path</param>
        /// <param name="timestamp">modification time</param>
        /// <param name="collectedBlocks">blocks to be removed</param>
        /// <param name="options">Rename options</param>
        /// <returns>whether a file/directory gets overwritten in the dst path</returns>
        /// <exception cref="System.IO.IOException"/>
        internal static bool UnprotectedRenameTo(FSDirectory fsd, string src, string dst,
                                                 INodesInPath srcIIP, INodesInPath dstIIP, long timestamp, INode.BlocksMapUpdateInfo
                                                 collectedBlocks, params Options.Rename[] options)
        {
            System.Diagnostics.Debug.Assert(fsd.HasWriteLock());
            bool overwrite = options != null && Arrays.AsList(options).Contains(Options.Rename
                                                                                .Overwrite);
            string error;
            INode  srcInode = srcIIP.GetLastINode();

            ValidateRenameSource(srcIIP);
            // validate the destination
            if (dst.Equals(src))
            {
                throw new FileAlreadyExistsException("The source " + src + " and destination " +
                                                     dst + " are the same");
            }
            ValidateDestination(src, dst, srcInode);
            if (dstIIP.Length() == 1)
            {
                error = "rename destination cannot be the root";
                NameNode.stateChangeLog.Warn("DIR* FSDirectory.unprotectedRenameTo: " + error);
                throw new IOException(error);
            }
            BlockStoragePolicySuite bsps = fsd.GetBlockStoragePolicySuite();

            fsd.ezManager.CheckMoveValidity(srcIIP, dstIIP, src);
            INode dstInode = dstIIP.GetLastINode();
            IList <INodeDirectory> snapshottableDirs = new AList <INodeDirectory>();

            if (dstInode != null)
            {
                // Destination exists
                ValidateOverwrite(src, dst, overwrite, srcInode, dstInode);
                FSDirSnapshotOp.CheckSnapshot(dstInode, snapshottableDirs);
            }
            INode dstParent = dstIIP.GetINode(-2);

            if (dstParent == null)
            {
                error = "rename destination parent " + dst + " not found.";
                NameNode.stateChangeLog.Warn("DIR* FSDirectory.unprotectedRenameTo: " + error);
                throw new FileNotFoundException(error);
            }
            if (!dstParent.IsDirectory())
            {
                error = "rename destination parent " + dst + " is a file.";
                NameNode.stateChangeLog.Warn("DIR* FSDirectory.unprotectedRenameTo: " + error);
                throw new ParentNotDirectoryException(error);
            }
            // Ensure dst has quota to accommodate rename
            VerifyFsLimitsForRename(fsd, srcIIP, dstIIP);
            VerifyQuotaForRename(fsd, srcIIP, dstIIP);
            FSDirRenameOp.RenameOperation tx = new FSDirRenameOp.RenameOperation(fsd, src, dst
                                                                                 , srcIIP, dstIIP);
            bool undoRemoveSrc = true;

            tx.RemoveSrc();
            bool undoRemoveDst = false;
            long removedNum    = 0;

            try
            {
                if (dstInode != null)
                {
                    // dst exists, remove it
                    removedNum = tx.RemoveDst();
                    if (removedNum != -1)
                    {
                        undoRemoveDst = true;
                    }
                }
                // add src as dst to complete rename
                if (tx.AddSourceToDestination())
                {
                    undoRemoveSrc = false;
                    if (NameNode.stateChangeLog.IsDebugEnabled())
                    {
                        NameNode.stateChangeLog.Debug("DIR* FSDirectory.unprotectedRenameTo: " + src + " is renamed to "
                                                      + dst);
                    }
                    tx.UpdateMtimeAndLease(timestamp);
                    // Collect the blocks and remove the lease for previous dst
                    bool filesDeleted = false;
                    if (undoRemoveDst)
                    {
                        undoRemoveDst = false;
                        if (removedNum > 0)
                        {
                            filesDeleted = tx.CleanDst(bsps, collectedBlocks);
                        }
                    }
                    if (snapshottableDirs.Count > 0)
                    {
                        // There are snapshottable directories (without snapshots) to be
                        // deleted. Need to update the SnapshotManager.
                        fsd.GetFSNamesystem().RemoveSnapshottableDirs(snapshottableDirs);
                    }
                    tx.UpdateQuotasInSourceTree(bsps);
                    return(filesDeleted);
                }
            }
            finally
            {
                if (undoRemoveSrc)
                {
                    tx.RestoreSource();
                }
                if (undoRemoveDst)
                {
                    // Rename failed - restore dst
                    tx.RestoreDst(bsps);
                }
            }
            NameNode.stateChangeLog.Warn("DIR* FSDirectory.unprotectedRenameTo: " + "failed to rename "
                                         + src + " to " + dst);
            throw new IOException("rename from " + src + " to " + dst + " failed.");
        }