public virtual void TestConcatWithQuotaDecrease()
        {
            short srcRepl = 3;
            // note this is different with REPL_FACTOR
            int  srcNum = 10;
            Path foo    = new Path("/foo");

            Path[] srcs   = new Path[srcNum];
            Path   target = new Path(foo, "target");

            DFSTestUtil.CreateFile(dfs, target, blockSize, ReplFactor, 0L);
            dfs.SetQuota(foo, long.MaxValue - 1, long.MaxValue - 1);
            for (int i = 0; i < srcNum; i++)
            {
                srcs[i] = new Path(foo, "src" + i);
                DFSTestUtil.CreateFile(dfs, srcs[i], blockSize * 2, srcRepl, 0L);
            }
            ContentSummary summary = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(11, summary.GetFileCount());
            NUnit.Framework.Assert.AreEqual(blockSize * ReplFactor + blockSize * 2 * srcRepl
                                            * srcNum, summary.GetSpaceConsumed());
            dfs.Concat(target, srcs);
            summary = dfs.GetContentSummary(foo);
            NUnit.Framework.Assert.AreEqual(1, summary.GetFileCount());
            NUnit.Framework.Assert.AreEqual(blockSize * ReplFactor + blockSize * 2 * ReplFactor
                                            * srcNum, summary.GetSpaceConsumed());
        }
Exemple #2
0
        /// <summary>Test if the quota can be correctly updated for append</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestUpdateQuotaForAppend()
        {
            Path foo            = new Path(dir, "foo");
            Path bar            = new Path(foo, "bar");
            long currentFileLen = Blocksize;

            DFSTestUtil.CreateFile(dfs, bar, currentFileLen, Replication, seed);
            dfs.SetQuota(foo, long.MaxValue - 1, long.MaxValue - 1);
            // append half of the block data, the previous file length is at block
            // boundary
            DFSTestUtil.AppendFile(dfs, bar, Blocksize / 2);
            currentFileLen += (Blocksize / 2);
            INodeDirectory fooNode = fsdir.GetINode4Write(foo.ToString()).AsDirectory();

            NUnit.Framework.Assert.IsTrue(fooNode.IsQuotaSet());
            QuotaCounts quota = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed();
            long        ns    = quota.GetNameSpace();
            long        ds    = quota.GetStorageSpace();

            NUnit.Framework.Assert.AreEqual(2, ns);
            // foo and bar
            NUnit.Framework.Assert.AreEqual(currentFileLen * Replication, ds);
            ContentSummary c = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), ds);
            // append another block, the previous file length is not at block boundary
            DFSTestUtil.AppendFile(dfs, bar, Blocksize);
            currentFileLen += Blocksize;
            quota           = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed();
            ns              = quota.GetNameSpace();
            ds              = quota.GetStorageSpace();
            NUnit.Framework.Assert.AreEqual(2, ns);
            // foo and bar
            NUnit.Framework.Assert.AreEqual(currentFileLen * Replication, ds);
            c = dfs.GetContentSummary(foo);
            NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), ds);
            // append several blocks
            DFSTestUtil.AppendFile(dfs, bar, Blocksize * 3 + Blocksize / 8);
            currentFileLen += (Blocksize * 3 + Blocksize / 8);
            quota           = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed();
            ns              = quota.GetNameSpace();
            ds              = quota.GetStorageSpace();
            NUnit.Framework.Assert.AreEqual(2, ns);
            // foo and bar
            NUnit.Framework.Assert.AreEqual(currentFileLen * Replication, ds);
            c = dfs.GetContentSummary(foo);
            NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), ds);
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestContentSummaryWithoutQuotaByStorageType()
        {
            Path foo          = new Path(dir, "foo");
            Path createdFile1 = new Path(foo, "created_file1.data");

            dfs.Mkdirs(foo);
            // set storage policy on directory "foo" to ONESSD
            dfs.SetStoragePolicy(foo, HdfsConstants.OnessdStoragePolicyName);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(!fnode.IsQuotaSet());
            // Create file of size 2 * BLOCKSIZE under directory "foo"
            long file1Len = Blocksize * 2;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            // Verify getContentSummary without any quota set
            ContentSummary cs = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(cs.GetSpaceConsumed(), file1Len * Replication);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Ssd), file1Len);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Disk), file1Len *
                                            2);
        }
Exemple #4
0
            /// <exception cref="System.Exception"/>
            public Void Run()
            {
                string       path = ServletUtil.GetDecodedPath(request, "/contentSummary");
                PrintWriter  @out = response.GetWriter();
                XMLOutputter xml  = new XMLOutputter(@out, "UTF-8");

                xml.Declaration();
                try
                {
                    ClientProtocol nnproxy = this._enclosing.CreateNameNodeProxy();
                    ContentSummary cs      = nnproxy.GetContentSummary(path);
                    xml.StartTag(typeof(ContentSummary).FullName);
                    if (cs != null)
                    {
                        xml.Attribute("length", string.Empty + cs.GetLength());
                        xml.Attribute("fileCount", string.Empty + cs.GetFileCount());
                        xml.Attribute("directoryCount", string.Empty + cs.GetDirectoryCount());
                        xml.Attribute("quota", string.Empty + cs.GetQuota());
                        xml.Attribute("spaceConsumed", string.Empty + cs.GetSpaceConsumed());
                        xml.Attribute("spaceQuota", string.Empty + cs.GetSpaceQuota());
                    }
                    xml.EndTag();
                }
                catch (IOException ioe)
                {
                    this._enclosing.WriteXml(ioe, path, xml);
                }
                xml.EndDocument();
                return(null);
            }
Exemple #5
0
        /// <exception cref="System.Exception"/>
        private void TestContentSummary()
        {
            FileSystem   fs   = FileSystem.Get(GetProxiedFSConf());
            Path         path = new Path(GetProxiedFSTestDir(), "foo.txt");
            OutputStream os   = fs.Create(path);

            os.Write(1);
            os.Close();
            ContentSummary hdfsContentSummary = fs.GetContentSummary(path);

            fs.Close();
            fs = GetHttpFSFileSystem();
            ContentSummary httpContentSummary = fs.GetContentSummary(path);

            fs.Close();
            NUnit.Framework.Assert.AreEqual(httpContentSummary.GetDirectoryCount(), hdfsContentSummary
                                            .GetDirectoryCount());
            NUnit.Framework.Assert.AreEqual(httpContentSummary.GetFileCount(), hdfsContentSummary
                                            .GetFileCount());
            NUnit.Framework.Assert.AreEqual(httpContentSummary.GetLength(), hdfsContentSummary
                                            .GetLength());
            NUnit.Framework.Assert.AreEqual(httpContentSummary.GetQuota(), hdfsContentSummary
                                            .GetQuota());
            NUnit.Framework.Assert.AreEqual(httpContentSummary.GetSpaceConsumed(), hdfsContentSummary
                                            .GetSpaceConsumed());
            NUnit.Framework.Assert.AreEqual(httpContentSummary.GetSpaceQuota(), hdfsContentSummary
                                            .GetSpaceQuota());
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestContentSummaryWithoutStoragePolicy()
        {
            Path foo          = new Path(dir, "foo");
            Path createdFile1 = new Path(foo, "created_file1.data");

            dfs.Mkdirs(foo);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(!fnode.IsQuotaSet());
            // Create file of size 2 * BLOCKSIZE under directory "foo"
            long file1Len = Blocksize * 2;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            // Verify getContentSummary without any quota set
            // Expect no type quota and usage information available
            ContentSummary cs = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(cs.GetSpaceConsumed(), file1Len * Replication);
            foreach (StorageType t in StorageType.Values())
            {
                NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(t), 0);
                NUnit.Framework.Assert.AreEqual(cs.GetTypeQuota(t), -1);
            }
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeWithSnapshot()
        {
            Path sub1 = new Path(dir, "Sub1");

            dfs.Mkdirs(sub1);
            // Setup ONE_SSD policy and SSD quota of 4 * BLOCKSIZE on sub1
            dfs.SetStoragePolicy(sub1, HdfsConstants.OnessdStoragePolicyName);
            dfs.SetQuotaByStorageType(sub1, StorageType.Ssd, 4 * Blocksize);
            INode sub1Node = fsdir.GetINode4Write(sub1.ToString());

            NUnit.Framework.Assert.IsTrue(sub1Node.IsDirectory());
            NUnit.Framework.Assert.IsTrue(sub1Node.IsQuotaSet());
            // Create file1 of size 2 * BLOCKSIZE under sub1
            Path file1    = new Path(sub1, "file1");
            long file1Len = 2 * Blocksize;

            DFSTestUtil.CreateFile(dfs, file1, file1Len, Replication, seed);
            // Create snapshot on sub1 named s1
            SnapshotTestHelper.CreateSnapshot(dfs, sub1, "s1");
            // Verify sub1 SSD usage is unchanged after creating snapshot s1
            long ssdConsumed = sub1Node.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                   ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumed);
            // Delete file1
            dfs.Delete(file1, false);
            // Verify sub1 SSD usage is unchanged due to the existence of snapshot s1
            ssdConsumed = sub1Node.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                              ().GetTypeSpaces().Get(StorageType.Ssd);
            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumed);
            QuotaCounts counts1 = new QuotaCounts.Builder().Build();

            sub1Node.ComputeQuotaUsage(fsn.GetBlockManager().GetStoragePolicySuite(), counts1
                                       , true);
            NUnit.Framework.Assert.AreEqual(sub1Node.DumpTreeRecursively().ToString(), file1Len
                                            , counts1.GetTypeSpaces().Get(StorageType.Ssd));
            ContentSummary cs1 = dfs.GetContentSummary(sub1);

            NUnit.Framework.Assert.AreEqual(cs1.GetSpaceConsumed(), file1Len * Replication);
            NUnit.Framework.Assert.AreEqual(cs1.GetTypeConsumed(StorageType.Ssd), file1Len);
            NUnit.Framework.Assert.AreEqual(cs1.GetTypeConsumed(StorageType.Disk), file1Len *
                                            2);
            // Delete the snapshot s1
            dfs.DeleteSnapshot(sub1, "s1");
            // Verify sub1 SSD usage is fully reclaimed and changed to 0
            ssdConsumed = sub1Node.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                              ().GetTypeSpaces().Get(StorageType.Ssd);
            NUnit.Framework.Assert.AreEqual(0, ssdConsumed);
            QuotaCounts counts2 = new QuotaCounts.Builder().Build();

            sub1Node.ComputeQuotaUsage(fsn.GetBlockManager().GetStoragePolicySuite(), counts2
                                       , true);
            NUnit.Framework.Assert.AreEqual(sub1Node.DumpTreeRecursively().ToString(), 0, counts2
                                            .GetTypeSpaces().Get(StorageType.Ssd));
            ContentSummary cs2 = dfs.GetContentSummary(sub1);

            NUnit.Framework.Assert.AreEqual(cs2.GetSpaceConsumed(), 0);
            NUnit.Framework.Assert.AreEqual(cs2.GetTypeConsumed(StorageType.Ssd), 0);
            NUnit.Framework.Assert.AreEqual(cs2.GetTypeConsumed(StorageType.Disk), 0);
        }
        /// <summary>
        /// Test that quotas are properly tracked by the standby through
        /// create, append, delete.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotasTrackedOnStandby()
        {
            fs.Mkdirs(TestDir);
            DistributedFileSystem dfs = (DistributedFileSystem)fs;

            dfs.SetQuota(TestDir, NsQuota, DsQuota);
            long expectedSize = 3 * BlockSize + BlockSize / 2;

            DFSTestUtil.CreateFile(fs, TestFile, expectedSize, (short)1, 1L);
            HATestUtil.WaitForStandbyToCatchUp(nn0, nn1);
            ContentSummary cs = nn1.GetRpcServer().GetContentSummary(TestDirStr);

            NUnit.Framework.Assert.AreEqual(NsQuota, cs.GetQuota());
            NUnit.Framework.Assert.AreEqual(DsQuota, cs.GetSpaceQuota());
            NUnit.Framework.Assert.AreEqual(expectedSize, cs.GetSpaceConsumed());
            NUnit.Framework.Assert.AreEqual(1, cs.GetDirectoryCount());
            NUnit.Framework.Assert.AreEqual(1, cs.GetFileCount());
            // Append to the file and make sure quota is updated correctly.
            FSDataOutputStream stm = fs.Append(TestFile);

            try
            {
                byte[] data = new byte[(int)(BlockSize * 3 / 2)];
                stm.Write(data);
                expectedSize += data.Length;
            }
            finally
            {
                IOUtils.CloseStream(stm);
            }
            HATestUtil.WaitForStandbyToCatchUp(nn0, nn1);
            cs = nn1.GetRpcServer().GetContentSummary(TestDirStr);
            NUnit.Framework.Assert.AreEqual(NsQuota, cs.GetQuota());
            NUnit.Framework.Assert.AreEqual(DsQuota, cs.GetSpaceQuota());
            NUnit.Framework.Assert.AreEqual(expectedSize, cs.GetSpaceConsumed());
            NUnit.Framework.Assert.AreEqual(1, cs.GetDirectoryCount());
            NUnit.Framework.Assert.AreEqual(1, cs.GetFileCount());
            fs.Delete(TestFile, true);
            expectedSize = 0;
            HATestUtil.WaitForStandbyToCatchUp(nn0, nn1);
            cs = nn1.GetRpcServer().GetContentSummary(TestDirStr);
            NUnit.Framework.Assert.AreEqual(NsQuota, cs.GetQuota());
            NUnit.Framework.Assert.AreEqual(DsQuota, cs.GetSpaceQuota());
            NUnit.Framework.Assert.AreEqual(expectedSize, cs.GetSpaceConsumed());
            NUnit.Framework.Assert.AreEqual(1, cs.GetDirectoryCount());
            NUnit.Framework.Assert.AreEqual(0, cs.GetFileCount());
        }
        public virtual void TestConcatWithQuotaIncrease()
        {
            short repl   = 3;
            int   srcNum = 10;
            Path  foo    = new Path("/foo");
            Path  bar    = new Path(foo, "bar");

            Path[] srcs   = new Path[srcNum];
            Path   target = new Path(bar, "target");

            DFSTestUtil.CreateFile(dfs, target, blockSize, repl, 0L);
            long dsQuota = blockSize * repl + blockSize * srcNum * ReplFactor;

            dfs.SetQuota(foo, long.MaxValue - 1, dsQuota);
            for (int i = 0; i < srcNum; i++)
            {
                srcs[i] = new Path(bar, "src" + i);
                DFSTestUtil.CreateFile(dfs, srcs[i], blockSize, ReplFactor, 0L);
            }
            ContentSummary summary = dfs.GetContentSummary(bar);

            NUnit.Framework.Assert.AreEqual(11, summary.GetFileCount());
            NUnit.Framework.Assert.AreEqual(dsQuota, summary.GetSpaceConsumed());
            try
            {
                dfs.Concat(target, srcs);
                NUnit.Framework.Assert.Fail("QuotaExceededException expected");
            }
            catch (RemoteException e)
            {
                NUnit.Framework.Assert.IsTrue(e.UnwrapRemoteException() is QuotaExceededException
                                              );
            }
            dfs.SetQuota(foo, long.MaxValue - 1, long.MaxValue - 1);
            dfs.Concat(target, srcs);
            summary = dfs.GetContentSummary(bar);
            NUnit.Framework.Assert.AreEqual(1, summary.GetFileCount());
            NUnit.Framework.Assert.AreEqual(blockSize * repl * (srcNum + 1), summary.GetSpaceConsumed
                                                ());
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeWithFileCreateRename()
        {
            Path foo = new Path(dir, "foo");

            dfs.Mkdirs(foo);
            Path createdFile1foo = new Path(foo, "created_file1.data");
            Path bar             = new Path(dir, "bar");

            dfs.Mkdirs(bar);
            Path createdFile1bar = new Path(bar, "created_file1.data");

            // set storage policy on directory "foo" and "bar" to ONESSD
            dfs.SetStoragePolicy(foo, HdfsConstants.OnessdStoragePolicyName);
            dfs.SetStoragePolicy(bar, HdfsConstants.OnessdStoragePolicyName);
            // set quota by storage type on directory "foo"
            dfs.SetQuotaByStorageType(foo, StorageType.Ssd, Blocksize * 4);
            dfs.SetQuotaByStorageType(bar, StorageType.Ssd, Blocksize * 2);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            // Create file of size 3 * BLOCKSIZE under directory "foo"
            long file1Len = Blocksize * 3;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1foo, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            // Verify space consumed and remaining quota
            long ssdConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                   ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumed);
            // move file from foo to bar
            try
            {
                dfs.Rename(createdFile1foo, createdFile1bar);
                NUnit.Framework.Assert.Fail("Should have failed with QuotaByStorageTypeExceededException "
                                            );
            }
            catch (Exception t)
            {
                Log.Info("Got expected exception ", t);
            }
            ContentSummary cs = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(cs.GetSpaceConsumed(), file1Len * Replication);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Ssd), file1Len);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Disk), file1Len *
                                            2);
        }
Exemple #11
0
        /// <summary>Convert a ContentSummary to a Json string.</summary>
        public static string ToJsonString(ContentSummary contentsummary)
        {
            if (contentsummary == null)
            {
                return(null);
            }
            IDictionary <string, object> m = new SortedDictionary <string, object>();

            m["length"]         = contentsummary.GetLength();
            m["fileCount"]      = contentsummary.GetFileCount();
            m["directoryCount"] = contentsummary.GetDirectoryCount();
            m["quota"]          = contentsummary.GetQuota();
            m["spaceConsumed"]  = contentsummary.GetSpaceConsumed();
            m["spaceQuota"]     = contentsummary.GetSpaceQuota();
            return(ToJsonString(typeof(ContentSummary), m));
        }
Exemple #12
0
        /// <summary>
        /// Converts a <code>ContentSummary</code> object into a JSON array
        /// object.
        /// </summary>
        /// <param name="contentSummary">the content summary</param>
        /// <returns>The JSON representation of the content summary.</returns>
        private static IDictionary ContentSummaryToJSON(ContentSummary contentSummary)
        {
            IDictionary json = new LinkedHashMap();

            json[HttpFSFileSystem.ContentSummaryDirectoryCountJson] = contentSummary.GetDirectoryCount
                                                                          ();
            json[HttpFSFileSystem.ContentSummaryFileCountJson] = contentSummary.GetFileCount(
                );
            json[HttpFSFileSystem.ContentSummaryLengthJson]        = contentSummary.GetLength();
            json[HttpFSFileSystem.ContentSummaryQuotaJson]         = contentSummary.GetQuota();
            json[HttpFSFileSystem.ContentSummarySpaceConsumedJson] = contentSummary.GetSpaceConsumed
                                                                         ();
            json[HttpFSFileSystem.ContentSummarySpaceQuotaJson] = contentSummary.GetSpaceQuota
                                                                      ();
            IDictionary response = new LinkedHashMap();

            response[HttpFSFileSystem.ContentSummaryJson] = json;
            return(response);
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeWithFileCreateDelete()
        {
            Path foo          = new Path(dir, "foo");
            Path createdFile1 = new Path(foo, "created_file1.data");

            dfs.Mkdirs(foo);
            dfs.SetStoragePolicy(foo, HdfsConstants.OnessdStoragePolicyName);
            // set quota by storage type on directory "foo"
            dfs.SetQuotaByStorageType(foo, StorageType.Ssd, Blocksize * 10);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            // Create file of size 2.5 * BLOCKSIZE under directory "foo"
            long file1Len = Blocksize * 2 + Blocksize / 2;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            // Verify space consumed and remaining quota
            long storageTypeConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                           ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, storageTypeConsumed);
            // Delete file and verify the consumed space of the storage type is updated
            dfs.Delete(createdFile1, false);
            storageTypeConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                      ().GetTypeSpaces().Get(StorageType.Ssd);
            NUnit.Framework.Assert.AreEqual(0, storageTypeConsumed);
            QuotaCounts counts = new QuotaCounts.Builder().Build();

            fnode.ComputeQuotaUsage(fsn.GetBlockManager().GetStoragePolicySuite(), counts, true
                                    );
            NUnit.Framework.Assert.AreEqual(fnode.DumpTreeRecursively().ToString(), 0, counts
                                            .GetTypeSpaces().Get(StorageType.Ssd));
            ContentSummary cs = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(cs.GetSpaceConsumed(), 0);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Ssd), 0);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Disk), 0);
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeWithFileCreateTruncate()
        {
            Path foo          = new Path(dir, "foo");
            Path createdFile1 = new Path(foo, "created_file1.data");

            dfs.Mkdirs(foo);
            // set storage policy on directory "foo" to ONESSD
            dfs.SetStoragePolicy(foo, HdfsConstants.OnessdStoragePolicyName);
            // set quota by storage type on directory "foo"
            dfs.SetQuotaByStorageType(foo, StorageType.Ssd, Blocksize * 4);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            // Create file of size 2 * BLOCKSIZE under directory "foo"
            long file1Len = Blocksize * 2;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            // Verify SSD consumed before truncate
            long ssdConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                   ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumed);
            // Truncate file to 1 * BLOCKSIZE
            int newFile1Len = Blocksize * 1;

            dfs.Truncate(createdFile1, newFile1Len);
            // Verify SSD consumed after truncate
            ssdConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                              ().GetTypeSpaces().Get(StorageType.Ssd);
            NUnit.Framework.Assert.AreEqual(newFile1Len, ssdConsumed);
            ContentSummary cs = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(cs.GetSpaceConsumed(), newFile1Len * Replication);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Ssd), newFile1Len);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Disk), newFile1Len
                                            * 2);
        }
        public virtual void TestTruncate()
        {
            short repl        = 3;
            int   blockSize   = 1024;
            int   numOfBlocks = 2;
            Path  dir         = GetTestRootPath(fSys, "test/hadoop");
            Path  file        = GetTestRootPath(fSys, "test/hadoop/file");

            byte[] data = GetFileData(numOfBlocks, blockSize);
            CreateFile(fSys, file, data, blockSize, repl);
            int  newLength = blockSize;
            bool isReady   = fSys.Truncate(file, newLength);

            NUnit.Framework.Assert.IsTrue("Recovery is not expected.", isReady);
            FileStatus fileStatus = fSys.GetFileStatus(file);

            NUnit.Framework.Assert.AreEqual(fileStatus.GetLen(), newLength);
            AppendTestUtil.CheckFullFile(fSys, file, newLength, data, file.ToString());
            ContentSummary cs = fSys.GetContentSummary(dir);

            NUnit.Framework.Assert.AreEqual("Bad disk space usage", cs.GetSpaceConsumed(), newLength
                                            * repl);
            NUnit.Framework.Assert.IsTrue("Deleted", fSys.Delete(dir, true));
        }
Exemple #16
0
        public virtual void TestQuotaCommands()
        {
            Configuration conf = new HdfsConfiguration();
            // set a smaller block size so that we can test with smaller
            // Space quotas
            int DefaultBlockSize = 512;

            conf.SetLong(DFSConfigKeys.DfsBlockSizeKey, DefaultBlockSize);
            // Make it relinquish locks. When run serially, the result should
            // be identical.
            conf.SetInt(DFSConfigKeys.DfsContentSummaryLimitKey, 2);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            NUnit.Framework.Assert.IsTrue("Not a HDFS: " + fs.GetUri(), fs is DistributedFileSystem
                                          );
            DistributedFileSystem dfs = (DistributedFileSystem)fs;
            DFSAdmin admin            = new DFSAdmin(conf);

            try
            {
                int   fileLen     = 1024;
                short replication = 5;
                long  spaceQuota  = fileLen * replication * 15 / 8;
                // 1: create a directory /test and set its quota to be 3
                Path parent = new Path("/test");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(parent));
                string[] args = new string[] { "-setQuota", "3", parent.ToString() };
                RunCommand(admin, args, false);
                //try setting space quota with a 'binary prefix'
                RunCommand(admin, false, "-setSpaceQuota", "2t", parent.ToString());
                NUnit.Framework.Assert.AreEqual(2L << 40, dfs.GetContentSummary(parent).GetSpaceQuota
                                                    ());
                // set diskspace quota to 10000
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota), parent
                           .ToString());
                // 2: create directory /test/data0
                Path childDir0 = new Path(parent, "data0");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(childDir0));
                // 3: create a file /test/datafile0
                Path childFile0 = new Path(parent, "datafile0");
                DFSTestUtil.CreateFile(fs, childFile0, fileLen, replication, 0);
                // 4: count -q /test
                ContentSummary c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetFileCount() + c.GetDirectoryCount(), 3);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 3);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileLen * replication);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), spaceQuota);
                // 5: count -q /test/data0
                c = dfs.GetContentSummary(childDir0);
                NUnit.Framework.Assert.AreEqual(c.GetFileCount() + c.GetDirectoryCount(), 1);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                // check disk space consumed
                c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileLen * replication);
                // 6: create a directory /test/data1
                Path childDir1    = new Path(parent, "data1");
                bool hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Mkdirs(childDir1));
                }
                catch (QuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                OutputStream fout;
                // 7: create a file /test/datafile1
                Path childFile1 = new Path(parent, "datafile1");
                hasException = false;
                try
                {
                    fout = dfs.Create(childFile1);
                }
                catch (QuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // 8: clear quota /test
                RunCommand(admin, new string[] { "-clrQuota", parent.ToString() }, false);
                c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), spaceQuota);
                // 9: clear quota /test/data0
                RunCommand(admin, new string[] { "-clrQuota", childDir0.ToString() }, false);
                c = dfs.GetContentSummary(childDir0);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                // 10: create a file /test/datafile1
                fout = dfs.Create(childFile1, replication);
                // 10.s: but writing fileLen bytes should result in an quota exception
                try
                {
                    fout.Write(new byte[fileLen]);
                    fout.Close();
                    NUnit.Framework.Assert.Fail();
                }
                catch (QuotaExceededException)
                {
                    IOUtils.CloseStream(fout);
                }
                //delete the file
                dfs.Delete(childFile1, false);
                // 9.s: clear diskspace quota
                RunCommand(admin, false, "-clrSpaceQuota", parent.ToString());
                c = dfs.GetContentSummary(parent);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), -1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), -1);
                // now creating childFile1 should succeed
                DFSTestUtil.CreateFile(dfs, childFile1, fileLen, replication, 0);
                // 11: set the quota of /test to be 1
                // HADOOP-5872 - we can set quota even if it is immediately violated
                args = new string[] { "-setQuota", "1", parent.ToString() };
                RunCommand(admin, args, false);
                RunCommand(admin, false, "-setSpaceQuota", Sharpen.Extensions.ToString(fileLen),
                           args[2]);
                // for space quota
                // 12: set the quota of /test/data0 to be 1
                args = new string[] { "-setQuota", "1", childDir0.ToString() };
                RunCommand(admin, args, false);
                // 13: not able create a directory under data0
                hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Mkdirs(new Path(childDir0, "in")));
                }
                catch (QuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                c = dfs.GetContentSummary(childDir0);
                NUnit.Framework.Assert.AreEqual(c.GetDirectoryCount() + c.GetFileCount(), 1);
                NUnit.Framework.Assert.AreEqual(c.GetQuota(), 1);
                // 14a: set quota on a non-existent directory
                Path nonExistentPath = new Path("/test1");
                NUnit.Framework.Assert.IsFalse(dfs.Exists(nonExistentPath));
                args = new string[] { "-setQuota", "1", nonExistentPath.ToString() };
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", "1g", nonExistentPath.ToString());
                // for space quota
                // 14b: set quota on a file
                NUnit.Framework.Assert.IsTrue(dfs.IsFile(childFile0));
                args[1] = childFile0.ToString();
                RunCommand(admin, args, true);
                // same for space quota
                RunCommand(admin, true, "-setSpaceQuota", "1t", args[1]);
                // 15a: clear quota on a file
                args[0] = "-clrQuota";
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-clrSpaceQuota", args[1]);
                // 15b: clear quota on a non-existent directory
                args[1] = nonExistentPath.ToString();
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-clrSpaceQuota", args[1]);
                // 16a: set the quota of /test to be 0
                args = new string[] { "-setQuota", "0", parent.ToString() };
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", "0", args[2]);
                // 16b: set the quota of /test to be -1
                args[1] = "-1";
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
                // 16c: set the quota of /test to be Long.MAX_VALUE+1
                args[1] = (long.MaxValue + 1L).ToString();
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
                // 16d: set the quota of /test to be a non integer
                args[1] = "33aa1.5";
                RunCommand(admin, args, true);
                RunCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
                // 16e: set space quota with a value larger than Long.MAX_VALUE
                RunCommand(admin, true, "-setSpaceQuota", (long.MaxValue / 1024 / 1024 + 1024) +
                           "m", args[2]);
                // 17:  setQuota by a non-administrator
                string username          = "******";
                UserGroupInformation ugi = UserGroupInformation.CreateUserForTesting(username, new
                                                                                     string[] { "groupyy" });
                string[] args2 = args.MemberwiseClone();
                // need final ref for doAs block
                ugi.DoAs(new _PrivilegedExceptionAction_275(this, username, conf, args2, parent));
                // 18: clrQuota by a non-administrator
                // 19: clrQuota on the root directory ("/") should fail
                RunCommand(admin, true, "-clrQuota", "/");
                // 20: setQuota on the root directory ("/") should succeed
                RunCommand(admin, false, "-setQuota", "1000000", "/");
                RunCommand(admin, true, "-clrQuota", "/");
                RunCommand(admin, false, "-clrSpaceQuota", "/");
                RunCommand(admin, new string[] { "-clrQuota", parent.ToString() }, false);
                RunCommand(admin, false, "-clrSpaceQuota", parent.ToString());
                // 2: create directory /test/data2
                Path childDir2 = new Path(parent, "data2");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(childDir2));
                Path childFile2  = new Path(childDir2, "datafile2");
                Path childFile3  = new Path(childDir2, "datafile3");
                long spaceQuota2 = DefaultBlockSize * replication;
                long fileLen2    = DefaultBlockSize;
                // set space quota to a real low value
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           childDir2.ToString());
                // clear space quota
                RunCommand(admin, false, "-clrSpaceQuota", childDir2.ToString());
                // create a file that is greater than the size of space quota
                DFSTestUtil.CreateFile(fs, childFile2, fileLen2, replication, 0);
                // now set space quota again. This should succeed
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           childDir2.ToString());
                hasException = false;
                try
                {
                    DFSTestUtil.CreateFile(fs, childFile3, fileLen2, replication, 0);
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // now test the same for root
                Path childFile4 = new Path("/", "datafile2");
                Path childFile5 = new Path("/", "datafile3");
                RunCommand(admin, true, "-clrQuota", "/");
                RunCommand(admin, false, "-clrSpaceQuota", "/");
                // set space quota to a real low value
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           "/");
                RunCommand(admin, false, "-clrSpaceQuota", "/");
                DFSTestUtil.CreateFile(fs, childFile4, fileLen2, replication, 0);
                RunCommand(admin, false, "-setSpaceQuota", System.Convert.ToString(spaceQuota2),
                           "/");
                hasException = false;
                try
                {
                    DFSTestUtil.CreateFile(fs, childFile5, fileLen2, replication, 0);
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                NUnit.Framework.Assert.AreEqual(4, cluster.GetNamesystem().GetFSDirectory().GetYieldCount
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }
Exemple #17
0
        public virtual void TestSpaceCommands()
        {
            Configuration conf = new HdfsConfiguration();

            // set a smaller block size so that we can test with smaller
            // diskspace quotas
            conf.Set(DFSConfigKeys.DfsBlockSizeKey, "512");
            // Make it relinquish locks. When run serially, the result should
            // be identical.
            conf.SetInt(DFSConfigKeys.DfsContentSummaryLimitKey, 2);
            MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).NumDataNodes(2).Build();
            FileSystem     fs      = cluster.GetFileSystem();

            NUnit.Framework.Assert.IsTrue("Not a HDFS: " + fs.GetUri(), fs is DistributedFileSystem
                                          );
            DistributedFileSystem dfs = (DistributedFileSystem)fs;

            try
            {
                int   fileLen     = 1024;
                short replication = 3;
                int   fileSpace   = fileLen * replication;
                // create directory /nqdir0/qdir1/qdir20/nqdir30
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(new Path("/nqdir0/qdir1/qdir20/nqdir30")
                                                         ));
                // set the quota of /nqdir0/qdir1 to 4 * fileSpace
                Path quotaDir1 = new Path("/nqdir0/qdir1");
                dfs.SetQuota(quotaDir1, HdfsConstants.QuotaDontSet, 4 * fileSpace);
                ContentSummary c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 4 * fileSpace);
                // set the quota of /nqdir0/qdir1/qdir20 to 6 * fileSpace
                Path quotaDir20 = new Path("/nqdir0/qdir1/qdir20");
                dfs.SetQuota(quotaDir20, HdfsConstants.QuotaDontSet, 6 * fileSpace);
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 6 * fileSpace);
                // Create /nqdir0/qdir1/qdir21 and set its space quota to 2 * fileSpace
                Path quotaDir21 = new Path("/nqdir0/qdir1/qdir21");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir21));
                dfs.SetQuota(quotaDir21, HdfsConstants.QuotaDontSet, 2 * fileSpace);
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 2 * fileSpace);
                // 5: Create directory /nqdir0/qdir1/qdir21/nqdir32
                Path tempPath = new Path(quotaDir21, "nqdir32");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(tempPath));
                // create a file under nqdir32/fileDir
                DFSTestUtil.CreateFile(dfs, new Path(tempPath, "fileDir/file1"), fileLen, replication
                                       , 0);
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileSpace);
                // Create a larger file /nqdir0/qdir1/qdir21/nqdir33/
                bool hasException = false;
                try
                {
                    DFSTestUtil.CreateFile(dfs, new Path(quotaDir21, "nqdir33/file2"), 2 * fileLen, replication
                                           , 0);
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // delete nqdir33
                NUnit.Framework.Assert.IsTrue(dfs.Delete(new Path(quotaDir21, "nqdir33"), true));
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileSpace);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 2 * fileSpace);
                // Verify space before the move:
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 0);
                // Move /nqdir0/qdir1/qdir21/nqdir32 /nqdir0/qdir1/qdir20/nqdir30
                Path dstPath = new Path(quotaDir20, "nqdir30");
                Path srcPath = new Path(quotaDir21, "nqdir32");
                NUnit.Framework.Assert.IsTrue(dfs.Rename(srcPath, dstPath));
                // verify space after the move
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileSpace);
                // verify space for its parent
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), fileSpace);
                // verify space for source for the move
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 0);
                Path file2    = new Path(dstPath, "fileDir/file2");
                int  file2Len = 2 * fileLen;
                // create a larger file under /nqdir0/qdir1/qdir20/nqdir30
                DFSTestUtil.CreateFile(dfs, file2, file2Len, replication, 0);
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 3 * fileSpace);
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 0);
                // Reverse: Move /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0/qdir1/qdir21/
                hasException = false;
                try
                {
                    NUnit.Framework.Assert.IsFalse(dfs.Rename(dstPath, srcPath));
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // make sure no intermediate directories left by failed rename
                NUnit.Framework.Assert.IsFalse(dfs.Exists(srcPath));
                // directory should exist
                NUnit.Framework.Assert.IsTrue(dfs.Exists(dstPath));
                // verify space after the failed move
                c = dfs.GetContentSummary(quotaDir20);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 3 * fileSpace);
                c = dfs.GetContentSummary(quotaDir21);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 0);
                // Test Append :
                // verify space quota
                c = dfs.GetContentSummary(quotaDir1);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), 4 * fileSpace);
                // verify space before append;
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 3 * fileSpace);
                OutputStream @out = dfs.Append(file2);
                // appending 1 fileLen should succeed
                @out.Write(new byte[fileLen]);
                @out.Close();
                file2Len += fileLen;
                // after append
                // verify space after append;
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 4 * fileSpace);
                // now increase the quota for quotaDir1
                dfs.SetQuota(quotaDir1, HdfsConstants.QuotaDontSet, 5 * fileSpace);
                // Now, appending more than 1 fileLen should result in an error
                @out         = dfs.Append(file2);
                hasException = false;
                try
                {
                    @out.Write(new byte[fileLen + 1024]);
                    @out.Flush();
                    @out.Close();
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                    IOUtils.CloseStream(@out);
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                file2Len += fileLen;
                // after partial append
                // verify space after partial append
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 5 * fileSpace);
                // Test set replication :
                // first reduce the replication
                dfs.SetReplication(file2, (short)(replication - 1));
                // verify that space is reduced by file2Len
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 5 * fileSpace - file2Len);
                // now try to increase the replication and and expect an error.
                hasException = false;
                try
                {
                    dfs.SetReplication(file2, (short)(replication + 1));
                }
                catch (DSQuotaExceededException)
                {
                    hasException = true;
                }
                NUnit.Framework.Assert.IsTrue(hasException);
                // verify space consumed remains unchanged.
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 5 * fileSpace - file2Len);
                // now increase the quota for quotaDir1 and quotaDir20
                dfs.SetQuota(quotaDir1, HdfsConstants.QuotaDontSet, 10 * fileSpace);
                dfs.SetQuota(quotaDir20, HdfsConstants.QuotaDontSet, 10 * fileSpace);
                // then increasing replication should be ok.
                dfs.SetReplication(file2, (short)(replication + 1));
                // verify increase in space
                c = dfs.GetContentSummary(dstPath);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), 5 * fileSpace + file2Len);
                // Test HDFS-2053 :
                // Create directory /hdfs-2053
                Path quotaDir2053 = new Path("/hdfs-2053");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir2053));
                // Create subdirectories /hdfs-2053/{A,B,C}
                Path quotaDir2053_A = new Path(quotaDir2053, "A");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir2053_A));
                Path quotaDir2053_B = new Path(quotaDir2053, "B");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir2053_B));
                Path quotaDir2053_C = new Path(quotaDir2053, "C");
                NUnit.Framework.Assert.IsTrue(dfs.Mkdirs(quotaDir2053_C));
                // Factors to vary the sizes of test files created in each subdir.
                // The actual factors are not really important but they allow us to create
                // identifiable file sizes per subdir, which helps during debugging.
                int sizeFactorA = 1;
                int sizeFactorB = 2;
                int sizeFactorC = 4;
                // Set space quota for subdirectory C
                dfs.SetQuota(quotaDir2053_C, HdfsConstants.QuotaDontSet, (sizeFactorC + 1) * fileSpace
                             );
                c = dfs.GetContentSummary(quotaDir2053_C);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceQuota(), (sizeFactorC + 1) * fileSpace);
                // Create a file under subdirectory A
                DFSTestUtil.CreateFile(dfs, new Path(quotaDir2053_A, "fileA"), sizeFactorA * fileLen
                                       , replication, 0);
                c = dfs.GetContentSummary(quotaDir2053_A);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), sizeFactorA * fileSpace);
                // Create a file under subdirectory B
                DFSTestUtil.CreateFile(dfs, new Path(quotaDir2053_B, "fileB"), sizeFactorB * fileLen
                                       , replication, 0);
                c = dfs.GetContentSummary(quotaDir2053_B);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), sizeFactorB * fileSpace);
                // Create a file under subdirectory C (which has a space quota)
                DFSTestUtil.CreateFile(dfs, new Path(quotaDir2053_C, "fileC"), sizeFactorC * fileLen
                                       , replication, 0);
                c = dfs.GetContentSummary(quotaDir2053_C);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), sizeFactorC * fileSpace);
                // Check space consumed for /hdfs-2053
                c = dfs.GetContentSummary(quotaDir2053);
                NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), (sizeFactorA + sizeFactorB
                                                                       + sizeFactorC) * fileSpace);
                NUnit.Framework.Assert.AreEqual(20, cluster.GetNamesystem().GetFSDirectory().GetYieldCount
                                                    ());
            }
            finally
            {
                cluster.Shutdown();
            }
        }