/// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeWithSnapshot()
        {
            Path sub1 = new Path(dir, "Sub1");

            dfs.Mkdirs(sub1);
            // Setup ONE_SSD policy and SSD quota of 4 * BLOCKSIZE on sub1
            dfs.SetStoragePolicy(sub1, HdfsConstants.OnessdStoragePolicyName);
            dfs.SetQuotaByStorageType(sub1, StorageType.Ssd, 4 * Blocksize);
            INode sub1Node = fsdir.GetINode4Write(sub1.ToString());

            NUnit.Framework.Assert.IsTrue(sub1Node.IsDirectory());
            NUnit.Framework.Assert.IsTrue(sub1Node.IsQuotaSet());
            // Create file1 of size 2 * BLOCKSIZE under sub1
            Path file1    = new Path(sub1, "file1");
            long file1Len = 2 * Blocksize;

            DFSTestUtil.CreateFile(dfs, file1, file1Len, Replication, seed);
            // Create snapshot on sub1 named s1
            SnapshotTestHelper.CreateSnapshot(dfs, sub1, "s1");
            // Verify sub1 SSD usage is unchanged after creating snapshot s1
            long ssdConsumed = sub1Node.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                   ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumed);
            // Delete file1
            dfs.Delete(file1, false);
            // Verify sub1 SSD usage is unchanged due to the existence of snapshot s1
            ssdConsumed = sub1Node.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                              ().GetTypeSpaces().Get(StorageType.Ssd);
            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumed);
            QuotaCounts counts1 = new QuotaCounts.Builder().Build();

            sub1Node.ComputeQuotaUsage(fsn.GetBlockManager().GetStoragePolicySuite(), counts1
                                       , true);
            NUnit.Framework.Assert.AreEqual(sub1Node.DumpTreeRecursively().ToString(), file1Len
                                            , counts1.GetTypeSpaces().Get(StorageType.Ssd));
            ContentSummary cs1 = dfs.GetContentSummary(sub1);

            NUnit.Framework.Assert.AreEqual(cs1.GetSpaceConsumed(), file1Len * Replication);
            NUnit.Framework.Assert.AreEqual(cs1.GetTypeConsumed(StorageType.Ssd), file1Len);
            NUnit.Framework.Assert.AreEqual(cs1.GetTypeConsumed(StorageType.Disk), file1Len *
                                            2);
            // Delete the snapshot s1
            dfs.DeleteSnapshot(sub1, "s1");
            // Verify sub1 SSD usage is fully reclaimed and changed to 0
            ssdConsumed = sub1Node.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                              ().GetTypeSpaces().Get(StorageType.Ssd);
            NUnit.Framework.Assert.AreEqual(0, ssdConsumed);
            QuotaCounts counts2 = new QuotaCounts.Builder().Build();

            sub1Node.ComputeQuotaUsage(fsn.GetBlockManager().GetStoragePolicySuite(), counts2
                                       , true);
            NUnit.Framework.Assert.AreEqual(sub1Node.DumpTreeRecursively().ToString(), 0, counts2
                                            .GetTypeSpaces().Get(StorageType.Ssd));
            ContentSummary cs2 = dfs.GetContentSummary(sub1);

            NUnit.Framework.Assert.AreEqual(cs2.GetSpaceConsumed(), 0);
            NUnit.Framework.Assert.AreEqual(cs2.GetTypeConsumed(StorageType.Ssd), 0);
            NUnit.Framework.Assert.AreEqual(cs2.GetTypeConsumed(StorageType.Disk), 0);
        }
Beispiel #2
0
        /// <summary>
        /// Verify quota for rename operation where srcInodes[srcInodes.length-1] moves
        /// dstInodes[dstInodes.length-1]
        /// </summary>
        /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/>
        private static void VerifyQuotaForRename(FSDirectory fsd, INodesInPath src, INodesInPath
                                                 dst)
        {
            if (!fsd.GetFSNamesystem().IsImageLoaded() || fsd.ShouldSkipQuotaChecks())
            {
                // Do not check quota if edits log is still being processed
                return;
            }
            int i = 0;

            while (src.GetINode(i) == dst.GetINode(i))
            {
                i++;
            }
            // src[i - 1] is the last common ancestor.
            BlockStoragePolicySuite bsps  = fsd.GetBlockStoragePolicySuite();
            QuotaCounts             delta = src.GetLastINode().ComputeQuotaUsage(bsps);
            // Reduce the required quota by dst that is being removed
            INode dstINode = dst.GetLastINode();

            if (dstINode != null)
            {
                delta.Subtract(dstINode.ComputeQuotaUsage(bsps));
            }
            FSDirectory.VerifyQuota(dst, dst.Length() - 1, delta, src.GetINode(i - 1));
        }
Beispiel #3
0
 /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/>
 internal virtual void UpdateQuotasInSourceTree(BlockStoragePolicySuite bsps)
 {
     // update the quota usage in src tree
     if (isSrcInSnapshot)
     {
         // get the counts after rename
         QuotaCounts newSrcCounts = srcChild.ComputeQuotaUsage(bsps, new QuotaCounts.Builder
                                                                   ().Build(), false);
         newSrcCounts.Subtract(oldSrcCounts);
         srcParent.AddSpaceConsumed(newSrcCounts, false);
     }
 }
Beispiel #4
0
            public sealed override QuotaCounts ComputeQuotaUsage(BlockStoragePolicySuite bsps
                                                                 , byte blockStoragePolicyId, QuotaCounts counts, bool useCache, int lastSnapshotId
                                                                 )
            {
                // if this.lastSnapshotId < lastSnapshotId, the rename of the referred
                // node happened before the rename of its ancestor. This should be
                // impossible since for WithName node we only count its children at the
                // time of the rename.
                Preconditions.CheckState(lastSnapshotId == Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                                         .CurrentStateId || this.lastSnapshotId >= lastSnapshotId);
                INode referred = this.GetReferredINode().AsReference().GetReferredINode();
                // We will continue the quota usage computation using the same snapshot id
                // as time line (if the given snapshot id is valid). Also, we cannot use
                // cache for the referred node since its cached quota may have already
                // been updated by changes in the current tree.
                int id = lastSnapshotId != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                         .CurrentStateId ? lastSnapshotId : this.lastSnapshotId;

                return(referred.ComputeQuotaUsage(bsps, blockStoragePolicyId, counts, false, id));
            }
Beispiel #5
0
        /// <summary>
        /// Undo the rename operation for the dst tree, i.e., if the rename operation
        /// (with OVERWRITE option) removes a file/dir from the dst tree, add it back
        /// and delete possible record in the deleted list.
        /// </summary>
        /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/>
        public virtual void UndoRename4DstParent(BlockStoragePolicySuite bsps, INode deletedChild
                                                 , int latestSnapshotId)
        {
            DirectoryWithSnapshotFeature sf = GetDirectoryWithSnapshotFeature();

            Preconditions.CheckState(sf != null, "Directory does not have snapshot feature");
            bool removeDeletedChild = sf.GetDiffs().RemoveChild(Diff.ListType.Deleted, deletedChild
                                                                );
            int sid = removeDeletedChild ? Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot
                      .CurrentStateId : latestSnapshotId;
            bool added = AddChild(deletedChild, true, sid);

            // update quota usage if adding is successfully and the old child has not
            // been stored in deleted list before
            if (added && !removeDeletedChild)
            {
                QuotaCounts counts = deletedChild.ComputeQuotaUsage(bsps);
                AddSpaceConsumed(counts, false);
            }
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeWithFileCreateDelete()
        {
            Path foo          = new Path(dir, "foo");
            Path createdFile1 = new Path(foo, "created_file1.data");

            dfs.Mkdirs(foo);
            dfs.SetStoragePolicy(foo, HdfsConstants.OnessdStoragePolicyName);
            // set quota by storage type on directory "foo"
            dfs.SetQuotaByStorageType(foo, StorageType.Ssd, Blocksize * 10);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            // Create file of size 2.5 * BLOCKSIZE under directory "foo"
            long file1Len = Blocksize * 2 + Blocksize / 2;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            // Verify space consumed and remaining quota
            long storageTypeConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                           ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, storageTypeConsumed);
            // Delete file and verify the consumed space of the storage type is updated
            dfs.Delete(createdFile1, false);
            storageTypeConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                      ().GetTypeSpaces().Get(StorageType.Ssd);
            NUnit.Framework.Assert.AreEqual(0, storageTypeConsumed);
            QuotaCounts counts = new QuotaCounts.Builder().Build();

            fnode.ComputeQuotaUsage(fsn.GetBlockManager().GetStoragePolicySuite(), counts, true
                                    );
            NUnit.Framework.Assert.AreEqual(fnode.DumpTreeRecursively().ToString(), 0, counts
                                            .GetTypeSpaces().Get(StorageType.Ssd));
            ContentSummary cs = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(cs.GetSpaceConsumed(), 0);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Ssd), 0);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Disk), 0);
        }
        /// <summary>
        /// Both traditional space quota and the storage type quota for SSD are set and
        /// not exceeded.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeWithTraditionalQuota()
        {
            Path foo = new Path(dir, "foo");

            dfs.Mkdirs(foo);
            dfs.SetStoragePolicy(foo, HdfsConstants.OnessdStoragePolicyName);
            dfs.SetQuotaByStorageType(foo, StorageType.Ssd, Blocksize * 10);
            dfs.SetQuota(foo, long.MaxValue - 1, Replication * Blocksize * 10);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            Path createdFile = new Path(foo, "created_file.data");
            long fileLen     = Blocksize * 2 + Blocksize / 2;

            DFSTestUtil.CreateFile(dfs, createdFile, Blocksize / 16, fileLen, Blocksize, Replication
                                   , seed);
            QuotaCounts cnt = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                  ();

            NUnit.Framework.Assert.AreEqual(2, cnt.GetNameSpace());
            NUnit.Framework.Assert.AreEqual(fileLen * Replication, cnt.GetStorageSpace());
            dfs.Delete(createdFile, true);
            QuotaCounts cntAfterDelete = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                             ();

            NUnit.Framework.Assert.AreEqual(1, cntAfterDelete.GetNameSpace());
            NUnit.Framework.Assert.AreEqual(0, cntAfterDelete.GetStorageSpace());
            // Validate the computeQuotaUsage()
            QuotaCounts counts = new QuotaCounts.Builder().Build();

            fnode.ComputeQuotaUsage(fsn.GetBlockManager().GetStoragePolicySuite(), counts, true
                                    );
            NUnit.Framework.Assert.AreEqual(fnode.DumpTreeRecursively().ToString(), 1, counts
                                            .GetNameSpace());
            NUnit.Framework.Assert.AreEqual(fnode.DumpTreeRecursively().ToString(), 0, counts
                                            .GetStorageSpace());
        }