/// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeWithSnapshot()
        {
            Path sub1 = new Path(dir, "Sub1");

            dfs.Mkdirs(sub1);
            // Setup ONE_SSD policy and SSD quota of 4 * BLOCKSIZE on sub1
            dfs.SetStoragePolicy(sub1, HdfsConstants.OnessdStoragePolicyName);
            dfs.SetQuotaByStorageType(sub1, StorageType.Ssd, 4 * Blocksize);
            INode sub1Node = fsdir.GetINode4Write(sub1.ToString());

            NUnit.Framework.Assert.IsTrue(sub1Node.IsDirectory());
            NUnit.Framework.Assert.IsTrue(sub1Node.IsQuotaSet());
            // Create file1 of size 2 * BLOCKSIZE under sub1
            Path file1    = new Path(sub1, "file1");
            long file1Len = 2 * Blocksize;

            DFSTestUtil.CreateFile(dfs, file1, file1Len, Replication, seed);
            // Create snapshot on sub1 named s1
            SnapshotTestHelper.CreateSnapshot(dfs, sub1, "s1");
            // Verify sub1 SSD usage is unchanged after creating snapshot s1
            long ssdConsumed = sub1Node.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                   ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumed);
            // Delete file1
            dfs.Delete(file1, false);
            // Verify sub1 SSD usage is unchanged due to the existence of snapshot s1
            ssdConsumed = sub1Node.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                              ().GetTypeSpaces().Get(StorageType.Ssd);
            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumed);
            QuotaCounts counts1 = new QuotaCounts.Builder().Build();

            sub1Node.ComputeQuotaUsage(fsn.GetBlockManager().GetStoragePolicySuite(), counts1
                                       , true);
            NUnit.Framework.Assert.AreEqual(sub1Node.DumpTreeRecursively().ToString(), file1Len
                                            , counts1.GetTypeSpaces().Get(StorageType.Ssd));
            ContentSummary cs1 = dfs.GetContentSummary(sub1);

            NUnit.Framework.Assert.AreEqual(cs1.GetSpaceConsumed(), file1Len * Replication);
            NUnit.Framework.Assert.AreEqual(cs1.GetTypeConsumed(StorageType.Ssd), file1Len);
            NUnit.Framework.Assert.AreEqual(cs1.GetTypeConsumed(StorageType.Disk), file1Len *
                                            2);
            // Delete the snapshot s1
            dfs.DeleteSnapshot(sub1, "s1");
            // Verify sub1 SSD usage is fully reclaimed and changed to 0
            ssdConsumed = sub1Node.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                              ().GetTypeSpaces().Get(StorageType.Ssd);
            NUnit.Framework.Assert.AreEqual(0, ssdConsumed);
            QuotaCounts counts2 = new QuotaCounts.Builder().Build();

            sub1Node.ComputeQuotaUsage(fsn.GetBlockManager().GetStoragePolicySuite(), counts2
                                       , true);
            NUnit.Framework.Assert.AreEqual(sub1Node.DumpTreeRecursively().ToString(), 0, counts2
                                            .GetTypeSpaces().Get(StorageType.Ssd));
            ContentSummary cs2 = dfs.GetContentSummary(sub1);

            NUnit.Framework.Assert.AreEqual(cs2.GetSpaceConsumed(), 0);
            NUnit.Framework.Assert.AreEqual(cs2.GetTypeConsumed(StorageType.Ssd), 0);
            NUnit.Framework.Assert.AreEqual(cs2.GetTypeConsumed(StorageType.Disk), 0);
        }
        /// <summary>
        /// Test if the quota can be correctly updated for create file even
        /// QuotaByStorageTypeExceededException is thrown
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeExceptionWithFileCreate()
        {
            Path foo          = new Path(dir, "foo");
            Path createdFile1 = new Path(foo, "created_file1.data");

            dfs.Mkdirs(foo);
            dfs.SetStoragePolicy(foo, HdfsConstants.OnessdStoragePolicyName);
            dfs.SetQuotaByStorageType(foo, StorageType.Ssd, Blocksize * 4);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            // Create the 1st file of size 2 * BLOCKSIZE under directory "foo" and expect no exception
            long file1Len = Blocksize * 2;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            long currentSSDConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                          ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, currentSSDConsumed);
            // Create the 2nd file of size 1.5 * BLOCKSIZE under directory "foo" and expect no exception
            Path createdFile2 = new Path(foo, "created_file2.data");
            long file2Len     = Blocksize + Blocksize / 2;

            DFSTestUtil.CreateFile(dfs, createdFile2, bufLen, file2Len, Blocksize, Replication
                                   , seed);
            currentSSDConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                     ().GetTypeSpaces().Get(StorageType.Ssd);
            NUnit.Framework.Assert.AreEqual(file1Len + file2Len, currentSSDConsumed);
            // Create the 3rd file of size BLOCKSIZE under directory "foo" and expect quota exceeded exception
            Path createdFile3 = new Path(foo, "created_file3.data");
            long file3Len     = Blocksize;

            try
            {
                DFSTestUtil.CreateFile(dfs, createdFile3, bufLen, file3Len, Blocksize, Replication
                                       , seed);
                NUnit.Framework.Assert.Fail("Should have failed with QuotaByStorageTypeExceededException "
                                            );
            }
            catch (Exception t)
            {
                Log.Info("Got expected exception ", t);
                currentSSDConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                         ().GetTypeSpaces().Get(StorageType.Ssd);
                NUnit.Framework.Assert.AreEqual(file1Len + file2Len, currentSSDConsumed);
            }
        }
Beispiel #3
0
            private int GetSelfSnapshot(int prior)
            {
                INodeReference.WithCount wc = (INodeReference.WithCount)GetReferredINode().AsReference
                                                  ();
                INode referred     = wc.GetReferredINode();
                int   lastSnapshot = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId;

                if (referred.IsFile() && referred.AsFile().IsWithSnapshot())
                {
                    lastSnapshot = referred.AsFile().GetDiffs().GetLastSnapshotId();
                }
                else
                {
                    if (referred.IsDirectory())
                    {
                        DirectoryWithSnapshotFeature sf = referred.AsDirectory().GetDirectoryWithSnapshotFeature
                                                              ();
                        if (sf != null)
                        {
                            lastSnapshot = sf.GetLastSnapshotId();
                        }
                    }
                }
                if (lastSnapshot != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId &&
                    lastSnapshot != prior)
                {
                    return(lastSnapshot);
                }
                else
                {
                    return(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId);
                }
            }
Beispiel #4
0
 /// <summary>
 /// Check if the given INode (or one of its descendants) is snapshottable and
 /// already has snapshots.
 /// </summary>
 /// <param name="target">The given INode</param>
 /// <param name="snapshottableDirs">
 /// The list of directories that are snapshottable
 /// but do not have snapshots yet
 /// </param>
 /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.SnapshotException"/>
 internal static void CheckSnapshot(INode target, IList <INodeDirectory> snapshottableDirs
                                    )
 {
     if (target.IsDirectory())
     {
         INodeDirectory targetDir         = target.AsDirectory();
         DirectorySnapshottableFeature sf = targetDir.GetDirectorySnapshottableFeature();
         if (sf != null)
         {
             if (sf.GetNumSnapshots() > 0)
             {
                 string fullPath = targetDir.GetFullPathName();
                 throw new SnapshotException("The directory " + fullPath + " cannot be deleted since "
                                             + fullPath + " is snapshottable and already has snapshots");
             }
             else
             {
                 if (snapshottableDirs != null)
                 {
                     snapshottableDirs.AddItem(targetDir);
                 }
             }
         }
         foreach (INode child in targetDir.GetChildrenList(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                                           .CurrentStateId))
         {
             CheckSnapshot(child, snapshottableDirs);
         }
     }
 }
        /// <exception cref="System.Exception"/>
        internal virtual void TestQuotaByStorageTypeWithFileCreateCase(string storagePolicy
                                                                       , StorageType storageType, short replication)
        {
            Path foo          = new Path(dir, "foo");
            Path createdFile1 = new Path(foo, "created_file1.data");

            dfs.Mkdirs(foo);
            // set storage policy on directory "foo" to storagePolicy
            dfs.SetStoragePolicy(foo, storagePolicy);
            // set quota by storage type on directory "foo"
            dfs.SetQuotaByStorageType(foo, storageType, Blocksize * 10);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            // Create file of size 2 * BLOCKSIZE under directory "foo"
            long file1Len = Blocksize * 2 + Blocksize / 2;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            // Verify space consumed and remaining quota
            long storageTypeConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                           ().GetTypeSpaces().Get(storageType);

            NUnit.Framework.Assert.AreEqual(file1Len * replication, storageTypeConsumed);
        }
        /// <summary>Create FileStatus by file INode</summary>
        /// <exception cref="System.IO.IOException"/>
        internal static HdfsFileStatus CreateFileStatus(FSDirectory fsd, string fullPath,
                                                        byte[] path, INode node, byte storagePolicy, int snapshot, bool isRawPath, INodesInPath
                                                        iip)
        {
            long size = 0;
            // length is zero for directories
            short replication = 0;
            long  blocksize   = 0;
            bool  isEncrypted;
            FileEncryptionInfo feInfo = isRawPath ? null : fsd.GetFileEncryptionInfo(node, snapshot
                                                                                     , iip);

            if (node.IsFile())
            {
                INodeFile fileNode = node.AsFile();
                size        = fileNode.ComputeFileSize(snapshot);
                replication = fileNode.GetFileReplication(snapshot);
                blocksize   = fileNode.GetPreferredBlockSize();
                isEncrypted = (feInfo != null) || (isRawPath && fsd.IsInAnEZ(INodesInPath.FromINode
                                                                                 (node)));
            }
            else
            {
                isEncrypted = fsd.IsInAnEZ(INodesInPath.FromINode(node));
            }
            int childrenNum = node.IsDirectory() ? node.AsDirectory().GetChildrenNum(snapshot
                                                                                     ) : 0;
            INodeAttributes nodeAttrs = fsd.GetAttributes(fullPath, path, node, snapshot);

            return(new HdfsFileStatus(size, node.IsDirectory(), replication, blocksize, node.
                                      GetModificationTime(snapshot), node.GetAccessTime(snapshot), GetPermissionForFileStatus
                                          (nodeAttrs, isEncrypted), nodeAttrs.GetUserName(), nodeAttrs.GetGroupName(), node
                                      .IsSymlink() ? node.AsSymlink().GetSymlink() : null, path, node.GetId(), childrenNum
                                      , feInfo, storagePolicy));
        }
Beispiel #7
0
        /// <exception cref="System.IO.IOException"/>
        private static void ValidateOverwrite(string src, string dst, bool overwrite, INode
                                              srcInode, INode dstInode)
        {
            string error;

            // It's OK to rename a file to a symlink and vice versa
            if (dstInode.IsDirectory() != srcInode.IsDirectory())
            {
                error = "Source " + src + " and destination " + dst + " must both be directories";
                NameNode.stateChangeLog.Warn("DIR* FSDirectory.unprotectedRenameTo: " + error);
                throw new IOException(error);
            }
            if (!overwrite)
            {
                // If destination exists, overwrite flag must be true
                error = "rename destination " + dst + " already exists";
                NameNode.stateChangeLog.Warn("DIR* FSDirectory.unprotectedRenameTo: " + error);
                throw new FileAlreadyExistsException(error);
            }
            if (dstInode.IsDirectory())
            {
                ReadOnlyList <INode> children = dstInode.AsDirectory().GetChildrenList(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                                                                       .CurrentStateId);
                if (!children.IsEmpty())
                {
                    error = "rename destination directory is not empty: " + dst;
                    NameNode.stateChangeLog.Warn("DIR* FSDirectory.unprotectedRenameTo: " + error);
                    throw new IOException(error);
                }
            }
        }
Beispiel #8
0
        /// <summary>
        /// Guarded by
        /// <see cref="FSNamesystem.ReadLock()"/>
        ///
        /// </summary>
        /// <exception cref="Org.Apache.Hadoop.Security.AccessControlException"/>
        private void CheckSubAccess(byte[][] pathByNameArr, int pathIdx, INode inode, int
                                    snapshotId, FsAction access, bool ignoreEmptyDir)
        {
            if (inode == null || !inode.IsDirectory())
            {
                return;
            }
            Stack <INodeDirectory> directories = new Stack <INodeDirectory>();

            for (directories.Push(inode.AsDirectory()); !directories.IsEmpty();)
            {
                INodeDirectory       d     = directories.Pop();
                ReadOnlyList <INode> cList = d.GetChildrenList(snapshotId);
                if (!(cList.IsEmpty() && ignoreEmptyDir))
                {
                    //TODO have to figure this out with inodeattribute provider
                    Check(GetINodeAttrs(pathByNameArr, pathIdx, d, snapshotId), inode.GetFullPathName
                              (), access);
                }
                foreach (INode child in cList)
                {
                    if (child.IsDirectory())
                    {
                        directories.Push(child.AsDirectory());
                    }
                }
            }
        }
 /// <summary>Save one inode's attributes to the image.</summary>
 /// <exception cref="System.IO.IOException"/>
 public static void SaveINode2Image(INode node, DataOutput @out, bool writeUnderConstruction
                                    , SnapshotFSImageFormat.ReferenceMap referenceMap)
 {
     if (node.IsReference())
     {
         WriteINodeReference(node.AsReference(), @out, writeUnderConstruction, referenceMap
                             );
     }
     else
     {
         if (node.IsDirectory())
         {
             WriteINodeDirectory(node.AsDirectory(), @out);
         }
         else
         {
             if (node.IsSymlink())
             {
                 WriteINodeSymlink(node.AsSymlink(), @out);
             }
             else
             {
                 if (node.IsFile())
                 {
                     WriteINodeFile(node.AsFile(), @out, writeUnderConstruction);
                 }
             }
         }
     }
 }
        /// <summary>Create FileStatus with location info by file INode</summary>
        /// <exception cref="System.IO.IOException"/>
        private static HdfsLocatedFileStatus CreateLocatedFileStatus(FSDirectory fsd, string
                                                                     fullPath, byte[] path, INode node, byte storagePolicy, int snapshot, bool isRawPath
                                                                     , INodesInPath iip)
        {
            System.Diagnostics.Debug.Assert(fsd.HasReadLock());
            long size = 0;
            // length is zero for directories
            short              replication = 0;
            long               blocksize   = 0;
            LocatedBlocks      loc         = null;
            bool               isEncrypted;
            FileEncryptionInfo feInfo = isRawPath ? null : fsd.GetFileEncryptionInfo(node, snapshot
                                                                                     , iip);

            if (node.IsFile())
            {
                INodeFile fileNode = node.AsFile();
                size        = fileNode.ComputeFileSize(snapshot);
                replication = fileNode.GetFileReplication(snapshot);
                blocksize   = fileNode.GetPreferredBlockSize();
                bool inSnapshot = snapshot != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                  .CurrentStateId;
                bool isUc     = !inSnapshot && fileNode.IsUnderConstruction();
                long fileSize = !inSnapshot && isUc?fileNode.ComputeFileSizeNotIncludingLastUcBlock
                                    () : size;

                loc = fsd.GetFSNamesystem().GetBlockManager().CreateLocatedBlocks(fileNode.GetBlocks
                                                                                      (snapshot), fileSize, isUc, 0L, size, false, inSnapshot, feInfo);
                if (loc == null)
                {
                    loc = new LocatedBlocks();
                }
                isEncrypted = (feInfo != null) || (isRawPath && fsd.IsInAnEZ(INodesInPath.FromINode
                                                                                 (node)));
            }
            else
            {
                isEncrypted = fsd.IsInAnEZ(INodesInPath.FromINode(node));
            }
            int childrenNum = node.IsDirectory() ? node.AsDirectory().GetChildrenNum(snapshot
                                                                                     ) : 0;
            INodeAttributes       nodeAttrs = fsd.GetAttributes(fullPath, path, node, snapshot);
            HdfsLocatedFileStatus status    = new HdfsLocatedFileStatus(size, node.IsDirectory()
                                                                        , replication, blocksize, node.GetModificationTime(snapshot), node.GetAccessTime
                                                                            (snapshot), GetPermissionForFileStatus(nodeAttrs, isEncrypted), nodeAttrs.GetUserName
                                                                            (), nodeAttrs.GetGroupName(), node.IsSymlink() ? node.AsSymlink().GetSymlink() :
                                                                        null, path, node.GetId(), loc, childrenNum, feInfo, storagePolicy);

            // Set caching information for the located blocks.
            if (loc != null)
            {
                CacheManager cacheManager = fsd.GetFSNamesystem().GetCacheManager();
                foreach (LocatedBlock lb in loc.GetLocatedBlocks())
                {
                    cacheManager.SetCachedLocations(lb);
                }
            }
            return(status);
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeParentOnChildOff()
        {
            short replication = 1;
            Path  parent      = new Path(dir, "parent");
            Path  child       = new Path(parent, "child");

            dfs.Mkdirs(parent);
            dfs.Mkdirs(child);
            dfs.SetStoragePolicy(parent, HdfsConstants.OnessdStoragePolicyName);
            dfs.SetQuotaByStorageType(parent, StorageType.Ssd, 3 * Blocksize);
            // Create file of size 2.5 * BLOCKSIZE under child directory
            // Verify parent Quota applies
            Path createdFile1 = new Path(child, "created_file1.data");
            long file1Len     = Blocksize * 2 + Blocksize / 2;
            int  bufLen       = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, replication
                                   , seed);
            INode fnode = fsdir.GetINode4Write(parent.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            long currentSSDConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                          ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, currentSSDConsumed);
            // Create the 2nd file of size BLOCKSIZE under child directory and expect quota exceeded exception
            Path createdFile2 = new Path(child, "created_file2.data");
            long file2Len     = Blocksize;

            try
            {
                DFSTestUtil.CreateFile(dfs, createdFile2, bufLen, file2Len, Blocksize, replication
                                       , seed);
                NUnit.Framework.Assert.Fail("Should have failed with QuotaByStorageTypeExceededException "
                                            );
            }
            catch (Exception t)
            {
                Log.Info("Got expected exception ", t);
                currentSSDConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                         ().GetTypeSpaces().Get(StorageType.Ssd);
                NUnit.Framework.Assert.AreEqual(file1Len, currentSSDConsumed);
            }
        }
        internal static Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot GetSnapshot
            (INodesInPath inodesInPath, string name, int index)
        {
            if (name == null)
            {
                return(null);
            }
            INode inode = inodesInPath.GetINode(index - 1);

            return(inode.AsDirectory().GetSnapshot(DFSUtil.String2Bytes(name)));
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeWithFileCreateDelete()
        {
            Path foo          = new Path(dir, "foo");
            Path createdFile1 = new Path(foo, "created_file1.data");

            dfs.Mkdirs(foo);
            dfs.SetStoragePolicy(foo, HdfsConstants.OnessdStoragePolicyName);
            // set quota by storage type on directory "foo"
            dfs.SetQuotaByStorageType(foo, StorageType.Ssd, Blocksize * 10);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            // Create file of size 2.5 * BLOCKSIZE under directory "foo"
            long file1Len = Blocksize * 2 + Blocksize / 2;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            // Verify space consumed and remaining quota
            long storageTypeConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                           ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, storageTypeConsumed);
            // Delete file and verify the consumed space of the storage type is updated
            dfs.Delete(createdFile1, false);
            storageTypeConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                      ().GetTypeSpaces().Get(StorageType.Ssd);
            NUnit.Framework.Assert.AreEqual(0, storageTypeConsumed);
            QuotaCounts counts = new QuotaCounts.Builder().Build();

            fnode.ComputeQuotaUsage(fsn.GetBlockManager().GetStoragePolicySuite(), counts, true
                                    );
            NUnit.Framework.Assert.AreEqual(fnode.DumpTreeRecursively().ToString(), 0, counts
                                            .GetTypeSpaces().Get(StorageType.Ssd));
            ContentSummary cs = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(cs.GetSpaceConsumed(), 0);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Ssd), 0);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Disk), 0);
        }
        public virtual void TestQuotaByStorageTypePersistenceInFsImage()
        {
            string MethodName   = GenericTestUtils.GetMethodName();
            Path   testDir      = new Path(dir, MethodName);
            Path   createdFile1 = new Path(testDir, "created_file1.data");

            dfs.Mkdirs(testDir);
            // set storage policy on testDir to ONESSD
            dfs.SetStoragePolicy(testDir, HdfsConstants.OnessdStoragePolicyName);
            // set quota by storage type on testDir
            long SsdQuota = Blocksize * 4;

            dfs.SetQuotaByStorageType(testDir, StorageType.Ssd, SsdQuota);
            INode testDirNode = fsdir.GetINode4Write(testDir.ToString());

            NUnit.Framework.Assert.IsTrue(testDirNode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(testDirNode.IsQuotaSet());
            // Create file of size 2 * BLOCKSIZE under testDir
            long file1Len = Blocksize * 2;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            // Verify SSD consumed before namenode restart
            long ssdConsumed = testDirNode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                   ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumed);
            // Restart the namenode with checkpoint to make sure fsImage is correct
            dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter);
            dfs.SaveNamespace();
            dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave);
            cluster.RestartNameNode(true);
            RefreshClusterState();
            INode testDirNodeAfterNNRestart = fsdir.GetINode4Write(testDir.ToString());

            NUnit.Framework.Assert.IsTrue(testDirNode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(testDirNode.IsQuotaSet());
            QuotaCounts qc = testDirNodeAfterNNRestart.GetQuotaCounts();

            NUnit.Framework.Assert.AreEqual(SsdQuota, qc.GetTypeSpace(StorageType.Ssd));
            foreach (StorageType t in StorageType.GetTypesSupportingQuota())
            {
                if (t != StorageType.Ssd)
                {
                    NUnit.Framework.Assert.AreEqual(HdfsConstants.QuotaReset, qc.GetTypeSpace(t));
                }
            }
            long ssdConsumedAfterNNRestart = testDirNodeAfterNNRestart.AsDirectory().GetDirectoryWithQuotaFeature
                                                 ().GetSpaceConsumed().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumedAfterNNRestart);
        }
Beispiel #15
0
 /// <summary>
 /// <inheritDoc/>
 /// <br/>
 /// To destroy a DstReference node, we first remove its link with the
 /// referred node. If the reference number of the referred node is &lt;= 0, we
 /// destroy the subtree of the referred node. Otherwise, we clean the
 /// referred node's subtree and delete everything created after the last
 /// rename operation, i.e., everything outside of the scope of the prior
 /// WithName nodes.
 /// </summary>
 public override void DestroyAndCollectBlocks(BlockStoragePolicySuite bsps, INode.BlocksMapUpdateInfo
                                              collectedBlocks, IList <INode> removedINodes)
 {
     if (RemoveReference(this) <= 0)
     {
         GetReferredINode().DestroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
     }
     else
     {
         // we will clean everything, including files, directories, and
         // snapshots, that were created after this prior snapshot
         int prior = GetPriorSnapshot(this);
         // prior must be non-null, otherwise we do not have any previous
         // WithName nodes, and the reference number will be 0.
         Preconditions.CheckState(prior != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                  .NoSnapshotId);
         // identify the snapshot created after prior
         int   snapshot = GetSelfSnapshot(prior);
         INode referred = GetReferredINode().AsReference().GetReferredINode();
         if (referred.IsFile())
         {
             // if referred is a file, it must be a file with snapshot since we did
             // recordModification before the rename
             INodeFile file = referred.AsFile();
             Preconditions.CheckState(file.IsWithSnapshot());
             // make sure we mark the file as deleted
             file.GetFileWithSnapshotFeature().DeleteCurrentFile();
             // when calling cleanSubtree of the referred node, since we
             // compute quota usage updates before calling this destroy
             // function, we use true for countDiffChange
             referred.CleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes);
         }
         else
         {
             if (referred.IsDirectory())
             {
                 // similarly, if referred is a directory, it must be an
                 // INodeDirectory with snapshot
                 INodeDirectory dir = referred.AsDirectory();
                 Preconditions.CheckState(dir.IsWithSnapshot());
                 try
                 {
                     DirectoryWithSnapshotFeature.DestroyDstSubtree(bsps, dir, snapshot, prior, collectedBlocks
                                                                    , removedINodes);
                 }
                 catch (QuotaExceededException e)
                 {
                     Log.Error("should not exceed quota while snapshot deletion", e);
                 }
             }
         }
     }
 }
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeWithFileCreateTruncate()
        {
            Path foo          = new Path(dir, "foo");
            Path createdFile1 = new Path(foo, "created_file1.data");

            dfs.Mkdirs(foo);
            // set storage policy on directory "foo" to ONESSD
            dfs.SetStoragePolicy(foo, HdfsConstants.OnessdStoragePolicyName);
            // set quota by storage type on directory "foo"
            dfs.SetQuotaByStorageType(foo, StorageType.Ssd, Blocksize * 4);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            // Create file of size 2 * BLOCKSIZE under directory "foo"
            long file1Len = Blocksize * 2;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            // Verify SSD consumed before truncate
            long ssdConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                   ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumed);
            // Truncate file to 1 * BLOCKSIZE
            int newFile1Len = Blocksize * 1;

            dfs.Truncate(createdFile1, newFile1Len);
            // Verify SSD consumed after truncate
            ssdConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                              ().GetTypeSpaces().Get(StorageType.Ssd);
            NUnit.Framework.Assert.AreEqual(newFile1Len, ssdConsumed);
            ContentSummary cs = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(cs.GetSpaceConsumed(), newFile1Len * Replication);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Ssd), newFile1Len);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Disk), newFile1Len
                                            * 2);
        }
        /// <summary>
        /// Both traditional space quota and the storage type quota for SSD are set and
        /// not exceeded.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeWithTraditionalQuota()
        {
            Path foo = new Path(dir, "foo");

            dfs.Mkdirs(foo);
            dfs.SetStoragePolicy(foo, HdfsConstants.OnessdStoragePolicyName);
            dfs.SetQuotaByStorageType(foo, StorageType.Ssd, Blocksize * 10);
            dfs.SetQuota(foo, long.MaxValue - 1, Replication * Blocksize * 10);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            Path createdFile = new Path(foo, "created_file.data");
            long fileLen     = Blocksize * 2 + Blocksize / 2;

            DFSTestUtil.CreateFile(dfs, createdFile, Blocksize / 16, fileLen, Blocksize, Replication
                                   , seed);
            QuotaCounts cnt = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                  ();

            NUnit.Framework.Assert.AreEqual(2, cnt.GetNameSpace());
            NUnit.Framework.Assert.AreEqual(fileLen * Replication, cnt.GetStorageSpace());
            dfs.Delete(createdFile, true);
            QuotaCounts cntAfterDelete = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                             ();

            NUnit.Framework.Assert.AreEqual(1, cntAfterDelete.GetNameSpace());
            NUnit.Framework.Assert.AreEqual(0, cntAfterDelete.GetStorageSpace());
            // Validate the computeQuotaUsage()
            QuotaCounts counts = new QuotaCounts.Builder().Build();

            fnode.ComputeQuotaUsage(fsn.GetBlockManager().GetStoragePolicySuite(), counts, true
                                    );
            NUnit.Framework.Assert.AreEqual(fnode.DumpTreeRecursively().ToString(), 1, counts
                                            .GetNameSpace());
            NUnit.Framework.Assert.AreEqual(fnode.DumpTreeRecursively().ToString(), 0, counts
                                            .GetStorageSpace());
        }
Beispiel #18
0
 /// <summary>Cast INode to INodeDirectory.</summary>
 /// <exception cref="System.IO.FileNotFoundException"/>
 /// <exception cref="Org.Apache.Hadoop.FS.PathIsNotDirectoryException"/>
 public static Org.Apache.Hadoop.Hdfs.Server.Namenode.INodeDirectory ValueOf(INode
                                                                             inode, object path)
 {
     if (inode == null)
     {
         throw new FileNotFoundException("Directory does not exist: " + DFSUtil.Path2String
                                             (path));
     }
     if (!inode.IsDirectory())
     {
         throw new PathIsNotDirectoryException(DFSUtil.Path2String(path));
     }
     return(inode.AsDirectory());
 }
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeWithFileCreateRename()
        {
            Path foo = new Path(dir, "foo");

            dfs.Mkdirs(foo);
            Path createdFile1foo = new Path(foo, "created_file1.data");
            Path bar             = new Path(dir, "bar");

            dfs.Mkdirs(bar);
            Path createdFile1bar = new Path(bar, "created_file1.data");

            // set storage policy on directory "foo" and "bar" to ONESSD
            dfs.SetStoragePolicy(foo, HdfsConstants.OnessdStoragePolicyName);
            dfs.SetStoragePolicy(bar, HdfsConstants.OnessdStoragePolicyName);
            // set quota by storage type on directory "foo"
            dfs.SetQuotaByStorageType(foo, StorageType.Ssd, Blocksize * 4);
            dfs.SetQuotaByStorageType(bar, StorageType.Ssd, Blocksize * 2);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            // Create file of size 3 * BLOCKSIZE under directory "foo"
            long file1Len = Blocksize * 3;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1foo, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            // Verify space consumed and remaining quota
            long ssdConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                   ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumed);
            // move file from foo to bar
            try
            {
                dfs.Rename(createdFile1foo, createdFile1bar);
                NUnit.Framework.Assert.Fail("Should have failed with QuotaByStorageTypeExceededException "
                                            );
            }
            catch (Exception t)
            {
                Log.Info("Got expected exception ", t);
            }
            ContentSummary cs = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(cs.GetSpaceConsumed(), file1Len * Replication);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Ssd), file1Len);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Disk), file1Len *
                                            2);
        }
Beispiel #20
0
        /// <exception cref="System.IO.IOException"/>
        internal static void UnprotectedSetStoragePolicy(FSDirectory fsd, BlockManager bm
                                                         , INodesInPath iip, byte policyId)
        {
            System.Diagnostics.Debug.Assert(fsd.HasWriteLock());
            INode inode = iip.GetLastINode();

            if (inode == null)
            {
                throw new FileNotFoundException("File/Directory does not exist: " + iip.GetPath()
                                                );
            }
            int snapshotId = iip.GetLatestSnapshotId();

            if (inode.IsFile())
            {
                BlockStoragePolicy newPolicy = bm.GetStoragePolicy(policyId);
                if (newPolicy.IsCopyOnCreateFile())
                {
                    throw new HadoopIllegalArgumentException("Policy " + newPolicy + " cannot be set after file creation."
                                                             );
                }
                BlockStoragePolicy currentPolicy = bm.GetStoragePolicy(inode.GetLocalStoragePolicyID
                                                                           ());
                if (currentPolicy != null && currentPolicy.IsCopyOnCreateFile())
                {
                    throw new HadoopIllegalArgumentException("Existing policy " + currentPolicy.GetName
                                                                 () + " cannot be changed after file creation.");
                }
                inode.AsFile().SetStoragePolicyID(policyId, snapshotId);
            }
            else
            {
                if (inode.IsDirectory())
                {
                    SetDirStoragePolicy(fsd, inode.AsDirectory(), policyId, snapshotId);
                }
                else
                {
                    throw new FileNotFoundException(iip.GetPath() + " is not a file or directory");
                }
            }
        }
Beispiel #21
0
            internal virtual bool AddSourceToDestination()
            {
                INode dstParent = dstParentIIP.GetLastINode();

                byte[] dstChildName = dstIIP.GetLastLocalName();
                INode  toDst;

                if (withCount == null)
                {
                    srcChild.SetLocalName(dstChildName);
                    toDst = srcChild;
                }
                else
                {
                    withCount.GetReferredINode().SetLocalName(dstChildName);
                    toDst = new INodeReference.DstReference(dstParent.AsDirectory(), withCount, dstIIP
                                                            .GetLatestSnapshotId());
                }
                return(fsd.AddLastINodeNoQuotaCheck(dstParentIIP, toDst) != null);
            }
Beispiel #22
0
 /// <exception cref="System.IO.IOException"/>
 private void Save(OutputStream @out, INode n)
 {
     if (n.IsDirectory())
     {
         Save(@out, n.AsDirectory());
     }
     else
     {
         if (n.IsFile())
         {
             Save(@out, n.AsFile());
         }
         else
         {
             if (n.IsSymlink())
             {
                 Save(@out, n.AsSymlink());
             }
         }
     }
 }
        /// <summary>Test allow-snapshot operation.</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestAllowSnapshot()
        {
            string pathStr = sub1.ToString();
            INode  before  = fsdir.GetINode(pathStr);

            // Before a directory is snapshottable
            NUnit.Framework.Assert.IsFalse(before.AsDirectory().IsSnapshottable());
            // After a directory is snapshottable
            Path path = new Path(pathStr);

            hdfs.AllowSnapshot(path);
            {
                INode after = fsdir.GetINode(pathStr);
                NUnit.Framework.Assert.IsTrue(after.AsDirectory().IsSnapshottable());
            }
            hdfs.DisallowSnapshot(path);
            {
                INode after = fsdir.GetINode(pathStr);
                NUnit.Framework.Assert.IsFalse(after.AsDirectory().IsSnapshottable());
            }
        }
Beispiel #24
0
        /// <summary>Test if the quota can be correctly updated for create file</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaUpdateWithFileCreate()
        {
            Path foo         = new Path(dir, "foo");
            Path createdFile = new Path(foo, "created_file.data");

            dfs.Mkdirs(foo);
            dfs.SetQuota(foo, long.MaxValue - 1, long.MaxValue - 1);
            long fileLen = Blocksize * 2 + Blocksize / 2;

            DFSTestUtil.CreateFile(dfs, createdFile, Blocksize / 16, fileLen, Blocksize, Replication
                                   , seed);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            QuotaCounts cnt = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                  ();

            NUnit.Framework.Assert.AreEqual(2, cnt.GetNameSpace());
            NUnit.Framework.Assert.AreEqual(fileLen * Replication, cnt.GetStorageSpace());
        }
Beispiel #25
0
 /// <summary>
 /// When destroying a reference node (WithName or DstReference), we call this
 /// method to identify the snapshot which is the latest snapshot before the
 /// reference node's creation.
 /// </summary>
 internal static int GetPriorSnapshot(Org.Apache.Hadoop.Hdfs.Server.Namenode.INodeReference
                                      @ref)
 {
     INodeReference.WithCount wc = (INodeReference.WithCount)@ref.GetReferredINode();
     INodeReference.WithName  wn = null;
     if (@ref is INodeReference.DstReference)
     {
         wn = wc.GetLastWithName();
     }
     else
     {
         if (@ref is INodeReference.WithName)
         {
             wn = wc.GetPriorWithName((INodeReference.WithName)@ref);
         }
     }
     if (wn != null)
     {
         INode referred = wc.GetReferredINode();
         if (referred.IsFile() && referred.AsFile().IsWithSnapshot())
         {
             return(referred.AsFile().GetDiffs().GetPrior(wn.lastSnapshotId));
         }
         else
         {
             if (referred.IsDirectory())
             {
                 DirectoryWithSnapshotFeature sf = referred.AsDirectory().GetDirectoryWithSnapshotFeature
                                                       ();
                 if (sf != null)
                 {
                     return(sf.GetDiffs().GetPrior(wn.lastSnapshotId));
                 }
             }
         }
     }
     return(Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.NoSnapshotId);
 }
Beispiel #26
0
            private int GetSelfSnapshot()
            {
                INode referred = GetReferredINode().AsReference().GetReferredINode();
                int   snapshot = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.NoSnapshotId;

                if (referred.IsFile() && referred.AsFile().IsWithSnapshot())
                {
                    snapshot = referred.AsFile().GetDiffs().GetPrior(lastSnapshotId);
                }
                else
                {
                    if (referred.IsDirectory())
                    {
                        DirectoryWithSnapshotFeature sf = referred.AsDirectory().GetDirectoryWithSnapshotFeature
                                                              ();
                        if (sf != null)
                        {
                            snapshot = sf.GetDiffs().GetPrior(lastSnapshotId);
                        }
                    }
                }
                return(snapshot);
            }
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeParentOffChildOff()
        {
            Path parent = new Path(dir, "parent");
            Path child  = new Path(parent, "child");

            dfs.Mkdirs(parent);
            dfs.Mkdirs(child);
            dfs.SetStoragePolicy(parent, HdfsConstants.OnessdStoragePolicyName);
            // Create file of size 2.5 * BLOCKSIZE under child directory.
            // Since both parent and child directory do not have SSD quota set,
            // expect succeed without exception
            Path createdFile1 = new Path(child, "created_file1.data");
            long file1Len     = Blocksize * 2 + Blocksize / 2;
            int  bufLen       = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            // Verify SSD usage at the root level as both parent/child don't have DirectoryWithQuotaFeature
            INode fnode       = fsdir.GetINode4Write("/");
            long  ssdConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                    ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumed);
        }
        /// <exception cref="System.Exception"/>
        private void TestQuotaByStorageTypeOrTraditionalQuotaExceededCase(long storageSpaceQuotaInBlocks
                                                                          , long ssdQuotaInBlocks, long testFileLenInBlocks, short replication)
        {
            string MethodName = GenericTestUtils.GetMethodName();
            Path   testDir    = new Path(dir, MethodName);

            dfs.Mkdirs(testDir);
            dfs.SetStoragePolicy(testDir, HdfsConstants.OnessdStoragePolicyName);
            long ssdQuota          = Blocksize * ssdQuotaInBlocks;
            long storageSpaceQuota = Blocksize * storageSpaceQuotaInBlocks;

            dfs.SetQuota(testDir, long.MaxValue - 1, storageSpaceQuota);
            dfs.SetQuotaByStorageType(testDir, StorageType.Ssd, ssdQuota);
            INode testDirNode = fsdir.GetINode4Write(testDir.ToString());

            NUnit.Framework.Assert.IsTrue(testDirNode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(testDirNode.IsQuotaSet());
            Path createdFile = new Path(testDir, "created_file.data");
            long fileLen     = testFileLenInBlocks * Blocksize;

            try
            {
                DFSTestUtil.CreateFile(dfs, createdFile, Blocksize / 16, fileLen, Blocksize, replication
                                       , seed);
                NUnit.Framework.Assert.Fail("Should have failed with DSQuotaExceededException or "
                                            + "QuotaByStorageTypeExceededException ");
            }
            catch (Exception t)
            {
                Log.Info("Got expected exception ", t);
                long currentSSDConsumed = testDirNode.AsDirectory().GetDirectoryWithQuotaFeature(
                    ).GetSpaceConsumed().GetTypeSpaces().Get(StorageType.Ssd);
                NUnit.Framework.Assert.AreEqual(Math.Min(ssdQuota, storageSpaceQuota / replication
                                                         ), currentSSDConsumed);
            }
        }
        /// <summary>
        /// Get a partial listing of the indicated directory
        /// We will stop when any of the following conditions is met:
        /// 1) this.lsLimit files have been added
        /// 2) needLocation is true AND enough files have been added such
        /// that at least this.lsLimit block locations are in the response
        /// </summary>
        /// <param name="fsd">FSDirectory</param>
        /// <param name="iip">
        /// the INodesInPath instance containing all the INodes along the
        /// path
        /// </param>
        /// <param name="src">the directory name</param>
        /// <param name="startAfter">the name to start listing after</param>
        /// <param name="needLocation">if block locations are returned</param>
        /// <returns>a partial listing starting after startAfter</returns>
        /// <exception cref="System.IO.IOException"/>
        private static DirectoryListing GetListing(FSDirectory fsd, INodesInPath iip, string
                                                   src, byte[] startAfter, bool needLocation, bool isSuperUser)
        {
            string srcs      = FSDirectory.NormalizePath(src);
            bool   isRawPath = FSDirectory.IsReservedRawName(src);

            fsd.ReadLock();
            try
            {
                if (srcs.EndsWith(HdfsConstants.SeparatorDotSnapshotDir))
                {
                    return(GetSnapshotsListing(fsd, srcs, startAfter));
                }
                int   snapshot   = iip.GetPathSnapshotId();
                INode targetNode = iip.GetLastINode();
                if (targetNode == null)
                {
                    return(null);
                }
                byte parentStoragePolicy = isSuperUser ? targetNode.GetStoragePolicyID() : BlockStoragePolicySuite
                                           .IdUnspecified;
                if (!targetNode.IsDirectory())
                {
                    return(new DirectoryListing(new HdfsFileStatus[] { CreateFileStatus(fsd, src, HdfsFileStatus
                                                                                        .EmptyName, targetNode, needLocation, parentStoragePolicy, snapshot, isRawPath,
                                                                                        iip) }, 0));
                }
                INodeDirectory       dirInode = targetNode.AsDirectory();
                ReadOnlyList <INode> contents = dirInode.GetChildrenList(snapshot);
                int startChild           = INodeDirectory.NextChild(contents, startAfter);
                int totalNumChildren     = contents.Size();
                int numOfListing         = Math.Min(totalNumChildren - startChild, fsd.GetLsLimit());
                int locationBudget       = fsd.GetLsLimit();
                int listingCnt           = 0;
                HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing];
                for (int i = 0; i < numOfListing && locationBudget > 0; i++)
                {
                    INode cur       = contents.Get(startChild + i);
                    byte  curPolicy = isSuperUser && !cur.IsSymlink() ? cur.GetLocalStoragePolicyID() :
                                      BlockStoragePolicySuite.IdUnspecified;
                    listing[i] = CreateFileStatus(fsd, src, cur.GetLocalNameBytes(), cur, needLocation
                                                  , GetStoragePolicyID(curPolicy, parentStoragePolicy), snapshot, isRawPath, iip);
                    listingCnt++;
                    if (needLocation)
                    {
                        // Once we  hit lsLimit locations, stop.
                        // This helps to prevent excessively large response payloads.
                        // Approximate #locations with locatedBlockCount() * repl_factor
                        LocatedBlocks blks = ((HdfsLocatedFileStatus)listing[i]).GetBlockLocations();
                        locationBudget -= (blks == null) ? 0 : blks.LocatedBlockCount() * listing[i].GetReplication
                                              ();
                    }
                }
                // truncate return array if necessary
                if (listingCnt < numOfListing)
                {
                    listing = Arrays.CopyOf(listing, listingCnt);
                }
                return(new DirectoryListing(listing, totalNumChildren - startChild - listingCnt));
            }
            finally
            {
                fsd.ReadUnlock();
            }
        }
Beispiel #30
0
        /// <summary>Retrieve existing INodes from a path.</summary>
        /// <remarks>
        /// Retrieve existing INodes from a path. For non-snapshot path,
        /// the number of INodes is equal to the number of path components. For
        /// snapshot path (e.g., /foo/.snapshot/s1/bar), the number of INodes is
        /// (number_of_path_components - 1).
        /// An UnresolvedPathException is always thrown when an intermediate path
        /// component refers to a symbolic link. If the final path component refers
        /// to a symbolic link then an UnresolvedPathException is only thrown if
        /// resolveLink is true.
        /// <p>
        /// Example: <br />
        /// Given the path /c1/c2/c3 where only /c1/c2 exists, resulting in the
        /// following path components: ["","c1","c2","c3"]
        /// <p>
        /// <code>getExistingPathINodes(["","c1","c2"])</code> should fill
        /// the array with [rootINode,c1,c2], <br />
        /// <code>getExistingPathINodes(["","c1","c2","c3"])</code> should
        /// fill the array with [rootINode,c1,c2,null]
        /// </remarks>
        /// <param name="startingDir">the starting directory</param>
        /// <param name="components">array of path component name</param>
        /// <param name="resolveLink">
        /// indicates whether UnresolvedLinkException should
        /// be thrown when the path refers to a symbolic link.
        /// </param>
        /// <returns>the specified number of existing INodes in the path</returns>
        /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/>
        internal static Org.Apache.Hadoop.Hdfs.Server.Namenode.INodesInPath Resolve(INodeDirectory
                                                                                    startingDir, byte[][] components, bool resolveLink)
        {
            Preconditions.CheckArgument(startingDir.CompareTo(components[0]) == 0);
            INode curNode  = startingDir;
            int   count    = 0;
            int   inodeNum = 0;

            INode[] inodes     = new INode[components.Length];
            bool    isSnapshot = false;
            int     snapshotId = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId;

            while (count < components.Length && curNode != null)
            {
                bool lastComp = (count == components.Length - 1);
                inodes[inodeNum++] = curNode;
                bool           isRef = curNode.IsReference();
                bool           isDir = curNode.IsDirectory();
                INodeDirectory dir   = isDir ? curNode.AsDirectory() : null;
                if (!isRef && isDir && dir.IsWithSnapshot())
                {
                    //if the path is a non-snapshot path, update the latest snapshot.
                    if (!isSnapshot && ShouldUpdateLatestId(dir.GetDirectoryWithSnapshotFeature().GetLastSnapshotId
                                                                (), snapshotId))
                    {
                        snapshotId = dir.GetDirectoryWithSnapshotFeature().GetLastSnapshotId();
                    }
                }
                else
                {
                    if (isRef && isDir && !lastComp)
                    {
                        // If the curNode is a reference node, need to check its dstSnapshot:
                        // 1. if the existing snapshot is no later than the dstSnapshot (which
                        // is the latest snapshot in dst before the rename), the changes
                        // should be recorded in previous snapshots (belonging to src).
                        // 2. however, if the ref node is already the last component, we still
                        // need to know the latest snapshot among the ref node's ancestors,
                        // in case of processing a deletion operation. Thus we do not overwrite
                        // the latest snapshot if lastComp is true. In case of the operation is
                        // a modification operation, we do a similar check in corresponding
                        // recordModification method.
                        if (!isSnapshot)
                        {
                            int dstSnapshotId = curNode.AsReference().GetDstSnapshotId();
                            if (snapshotId == Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId ||
                                (dstSnapshotId != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId &&
                                 dstSnapshotId >= snapshotId))
                            {
                                // no snapshot in dst tree of rename
                                // the above scenario
                                int lastSnapshot = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId;
                                DirectoryWithSnapshotFeature sf;
                                if (curNode.IsDirectory() && (sf = curNode.AsDirectory().GetDirectoryWithSnapshotFeature
                                                                       ()) != null)
                                {
                                    lastSnapshot = sf.GetLastSnapshotId();
                                }
                                snapshotId = lastSnapshot;
                            }
                        }
                    }
                }
                if (curNode.IsSymlink() && (!lastComp || resolveLink))
                {
                    string path      = ConstructPath(components, 0, components.Length);
                    string preceding = ConstructPath(components, 0, count);
                    string remainder = ConstructPath(components, count + 1, components.Length);
                    string link      = DFSUtil.Bytes2String(components[count]);
                    string target    = curNode.AsSymlink().GetSymlinkString();
                    if (Log.IsDebugEnabled())
                    {
                        Log.Debug("UnresolvedPathException " + " path: " + path + " preceding: " + preceding
                                  + " count: " + count + " link: " + link + " target: " + target + " remainder: "
                                  + remainder);
                    }
                    throw new UnresolvedPathException(path, preceding, remainder, target);
                }
                if (lastComp || !isDir)
                {
                    break;
                }
                byte[] childName = components[count + 1];
                // check if the next byte[] in components is for ".snapshot"
                if (IsDotSnapshotDir(childName) && dir.IsSnapshottable())
                {
                    // skip the ".snapshot" in components
                    count++;
                    isSnapshot = true;
                    // check if ".snapshot" is the last element of components
                    if (count == components.Length - 1)
                    {
                        break;
                    }
                    // Resolve snapshot root
                    Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s = dir.GetSnapshot(components
                                                                                                 [count + 1]);
                    if (s == null)
                    {
                        curNode = null;
                    }
                    else
                    {
                        // snapshot not found
                        curNode    = s.GetRoot();
                        snapshotId = s.GetId();
                    }
                }
                else
                {
                    // normal case, and also for resolving file/dir under snapshot root
                    curNode = dir.GetChild(childName, isSnapshot ? snapshotId : Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                           .CurrentStateId);
                }
                count++;
            }
            if (isSnapshot && !IsDotSnapshotDir(components[components.Length - 1]))
            {
                // for snapshot path shrink the inode array. however, for path ending with
                // .snapshot, still keep last the null inode in the array
                INode[] newNodes = new INode[components.Length - 1];
                System.Array.Copy(inodes, 0, newNodes, 0, newNodes.Length);
                inodes = newNodes;
            }
            return(new Org.Apache.Hadoop.Hdfs.Server.Namenode.INodesInPath(inodes, components
                                                                           , isSnapshot, snapshotId));
        }