/// <exception cref="System.IO.IOException"/>
 private static ContentSummary GetContentSummaryInt(FSDirectory fsd, INodesInPath
                                                    iip)
 {
     fsd.ReadLock();
     try
     {
         INode targetNode = iip.GetLastINode();
         if (targetNode == null)
         {
             throw new FileNotFoundException("File does not exist: " + iip.GetPath());
         }
         else
         {
             // Make it relinquish locks everytime contentCountLimit entries are
             // processed. 0 means disabled. I.e. blocking for the entire duration.
             ContentSummaryComputationContext cscc = new ContentSummaryComputationContext(fsd,
                                                                                          fsd.GetFSNamesystem(), fsd.GetContentCountLimit(), fsd.GetContentSleepMicroSec()
                                                                                          );
             ContentSummary cs = targetNode.ComputeAndConvertContentSummary(cscc);
             fsd.AddYieldCount(cscc.GetYieldCount());
             return(cs);
         }
     }
     finally
     {
         fsd.ReadUnlock();
     }
 }
        /// <exception cref="System.Exception"/>
        public virtual void TestContentSummaryWithoutQuotaByStorageType()
        {
            Path foo          = new Path(dir, "foo");
            Path createdFile1 = new Path(foo, "created_file1.data");

            dfs.Mkdirs(foo);
            // set storage policy on directory "foo" to ONESSD
            dfs.SetStoragePolicy(foo, HdfsConstants.OnessdStoragePolicyName);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(!fnode.IsQuotaSet());
            // Create file of size 2 * BLOCKSIZE under directory "foo"
            long file1Len = Blocksize * 2;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            // Verify getContentSummary without any quota set
            ContentSummary cs = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(cs.GetSpaceConsumed(), file1Len * Replication);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Ssd), file1Len);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Disk), file1Len *
                                            2);
        }
Esempio n. 3
0
        /// <exception cref="System.Exception"/>
        private void TestContentSummary()
        {
            FileSystem   fs   = FileSystem.Get(GetProxiedFSConf());
            Path         path = new Path(GetProxiedFSTestDir(), "foo.txt");
            OutputStream os   = fs.Create(path);

            os.Write(1);
            os.Close();
            ContentSummary hdfsContentSummary = fs.GetContentSummary(path);

            fs.Close();
            fs = GetHttpFSFileSystem();
            ContentSummary httpContentSummary = fs.GetContentSummary(path);

            fs.Close();
            NUnit.Framework.Assert.AreEqual(httpContentSummary.GetDirectoryCount(), hdfsContentSummary
                                            .GetDirectoryCount());
            NUnit.Framework.Assert.AreEqual(httpContentSummary.GetFileCount(), hdfsContentSummary
                                            .GetFileCount());
            NUnit.Framework.Assert.AreEqual(httpContentSummary.GetLength(), hdfsContentSummary
                                            .GetLength());
            NUnit.Framework.Assert.AreEqual(httpContentSummary.GetQuota(), hdfsContentSummary
                                            .GetQuota());
            NUnit.Framework.Assert.AreEqual(httpContentSummary.GetSpaceConsumed(), hdfsContentSummary
                                            .GetSpaceConsumed());
            NUnit.Framework.Assert.AreEqual(httpContentSummary.GetSpaceQuota(), hdfsContentSummary
                                            .GetSpaceQuota());
        }
Esempio n. 4
0
        public async Task <OpenApiResult> GetContentSummary(IOpenApiContext context, string slug, string contentId, string ifNoneMatch)
        {
            IContentStore contentStore = await this.contentStoreFactory.GetContentStoreForTenantAsync(context.CurrentTenantId).ConfigureAwait(false);

            ContentSummary result = await contentStore.GetContentSummaryAsync(contentId, slug).ConfigureAwait(false);

            string etag = EtagHelper.BuildEtag(nameof(ContentSummary), result.ETag);

            // If the etag in the result matches ifNoneMatch then we return 304 Not Modified
            if (EtagHelper.IsMatch(ifNoneMatch, etag))
            {
                return(this.NotModifiedResult());
            }

            HalDocument resultDocument = this.contentSummaryMapper.Map(result, new ResponseMappingContext {
                TenantId = context.CurrentTenantId
            });

            OpenApiResult response = this.OkResult(resultDocument);

            response.Results.Add(HeaderNames.ETag, etag);

            // Since content is immutable we can allow clients to cache it indefinitely.
            response.Results.Add(HeaderNames.CacheControl, Constants.CacheControlHeaderOptions.NeverExpire);

            return(response);
        }
 /// <summary>Creates instance of DataLakeStoreChildItemSummary</summary>
 public DataLakeStoreChildItemSummary(ContentSummary summary)
 {
     DirectoryCount = summary.DirectoryCount;
     FileCount      = summary.FileCount;
     Length         = summary.Length;
     SpaceConsumed  = summary.SpaceConsumed;
 }
Esempio n. 6
0
            /// <exception cref="System.Exception"/>
            public Void Run()
            {
                string       path = ServletUtil.GetDecodedPath(request, "/contentSummary");
                PrintWriter  @out = response.GetWriter();
                XMLOutputter xml  = new XMLOutputter(@out, "UTF-8");

                xml.Declaration();
                try
                {
                    ClientProtocol nnproxy = this._enclosing.CreateNameNodeProxy();
                    ContentSummary cs      = nnproxy.GetContentSummary(path);
                    xml.StartTag(typeof(ContentSummary).FullName);
                    if (cs != null)
                    {
                        xml.Attribute("length", string.Empty + cs.GetLength());
                        xml.Attribute("fileCount", string.Empty + cs.GetFileCount());
                        xml.Attribute("directoryCount", string.Empty + cs.GetDirectoryCount());
                        xml.Attribute("quota", string.Empty + cs.GetQuota());
                        xml.Attribute("spaceConsumed", string.Empty + cs.GetSpaceConsumed());
                        xml.Attribute("spaceQuota", string.Empty + cs.GetSpaceQuota());
                    }
                    xml.EndTag();
                }
                catch (IOException ioe)
                {
                    this._enclosing.WriteXml(ioe, path, xml);
                }
                xml.EndDocument();
                return(null);
            }
Esempio n. 7
0
        public virtual void TestConcatWithQuotaDecrease()
        {
            short srcRepl = 3;
            // note this is different with REPL_FACTOR
            int  srcNum = 10;
            Path foo    = new Path("/foo");

            Path[] srcs   = new Path[srcNum];
            Path   target = new Path(foo, "target");

            DFSTestUtil.CreateFile(dfs, target, blockSize, ReplFactor, 0L);
            dfs.SetQuota(foo, long.MaxValue - 1, long.MaxValue - 1);
            for (int i = 0; i < srcNum; i++)
            {
                srcs[i] = new Path(foo, "src" + i);
                DFSTestUtil.CreateFile(dfs, srcs[i], blockSize * 2, srcRepl, 0L);
            }
            ContentSummary summary = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(11, summary.GetFileCount());
            NUnit.Framework.Assert.AreEqual(blockSize * ReplFactor + blockSize * 2 * srcRepl
                                            * srcNum, summary.GetSpaceConsumed());
            dfs.Concat(target, srcs);
            summary = dfs.GetContentSummary(foo);
            NUnit.Framework.Assert.AreEqual(1, summary.GetFileCount());
            NUnit.Framework.Assert.AreEqual(blockSize * ReplFactor + blockSize * 2 * ReplFactor
                                            * srcNum, summary.GetSpaceConsumed());
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeWithSnapshot()
        {
            Path sub1 = new Path(dir, "Sub1");

            dfs.Mkdirs(sub1);
            // Setup ONE_SSD policy and SSD quota of 4 * BLOCKSIZE on sub1
            dfs.SetStoragePolicy(sub1, HdfsConstants.OnessdStoragePolicyName);
            dfs.SetQuotaByStorageType(sub1, StorageType.Ssd, 4 * Blocksize);
            INode sub1Node = fsdir.GetINode4Write(sub1.ToString());

            NUnit.Framework.Assert.IsTrue(sub1Node.IsDirectory());
            NUnit.Framework.Assert.IsTrue(sub1Node.IsQuotaSet());
            // Create file1 of size 2 * BLOCKSIZE under sub1
            Path file1    = new Path(sub1, "file1");
            long file1Len = 2 * Blocksize;

            DFSTestUtil.CreateFile(dfs, file1, file1Len, Replication, seed);
            // Create snapshot on sub1 named s1
            SnapshotTestHelper.CreateSnapshot(dfs, sub1, "s1");
            // Verify sub1 SSD usage is unchanged after creating snapshot s1
            long ssdConsumed = sub1Node.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                   ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumed);
            // Delete file1
            dfs.Delete(file1, false);
            // Verify sub1 SSD usage is unchanged due to the existence of snapshot s1
            ssdConsumed = sub1Node.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                              ().GetTypeSpaces().Get(StorageType.Ssd);
            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumed);
            QuotaCounts counts1 = new QuotaCounts.Builder().Build();

            sub1Node.ComputeQuotaUsage(fsn.GetBlockManager().GetStoragePolicySuite(), counts1
                                       , true);
            NUnit.Framework.Assert.AreEqual(sub1Node.DumpTreeRecursively().ToString(), file1Len
                                            , counts1.GetTypeSpaces().Get(StorageType.Ssd));
            ContentSummary cs1 = dfs.GetContentSummary(sub1);

            NUnit.Framework.Assert.AreEqual(cs1.GetSpaceConsumed(), file1Len * Replication);
            NUnit.Framework.Assert.AreEqual(cs1.GetTypeConsumed(StorageType.Ssd), file1Len);
            NUnit.Framework.Assert.AreEqual(cs1.GetTypeConsumed(StorageType.Disk), file1Len *
                                            2);
            // Delete the snapshot s1
            dfs.DeleteSnapshot(sub1, "s1");
            // Verify sub1 SSD usage is fully reclaimed and changed to 0
            ssdConsumed = sub1Node.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                              ().GetTypeSpaces().Get(StorageType.Ssd);
            NUnit.Framework.Assert.AreEqual(0, ssdConsumed);
            QuotaCounts counts2 = new QuotaCounts.Builder().Build();

            sub1Node.ComputeQuotaUsage(fsn.GetBlockManager().GetStoragePolicySuite(), counts2
                                       , true);
            NUnit.Framework.Assert.AreEqual(sub1Node.DumpTreeRecursively().ToString(), 0, counts2
                                            .GetTypeSpaces().Get(StorageType.Ssd));
            ContentSummary cs2 = dfs.GetContentSummary(sub1);

            NUnit.Framework.Assert.AreEqual(cs2.GetSpaceConsumed(), 0);
            NUnit.Framework.Assert.AreEqual(cs2.GetTypeConsumed(StorageType.Ssd), 0);
            NUnit.Framework.Assert.AreEqual(cs2.GetTypeConsumed(StorageType.Disk), 0);
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestContentSummaryWithoutStoragePolicy()
        {
            Path foo          = new Path(dir, "foo");
            Path createdFile1 = new Path(foo, "created_file1.data");

            dfs.Mkdirs(foo);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(!fnode.IsQuotaSet());
            // Create file of size 2 * BLOCKSIZE under directory "foo"
            long file1Len = Blocksize * 2;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            // Verify getContentSummary without any quota set
            // Expect no type quota and usage information available
            ContentSummary cs = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(cs.GetSpaceConsumed(), file1Len * Replication);
            foreach (StorageType t in StorageType.Values())
            {
                NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(t), 0);
                NUnit.Framework.Assert.AreEqual(cs.GetTypeQuota(t), -1);
            }
        }
        public virtual void TestGetContentSummary()
        {
            // GetContentSummary of a dir
            fSys.Mkdirs(new Path("/newDir/dirFoo"));
            ContentSummary cs = fSys.GetContentSummary(new Path("/newDir/dirFoo"));

            Assert.Equal(-1L, cs.GetQuota());
            Assert.Equal(-1L, cs.GetSpaceQuota());
        }
        public static int saveSummaryCallout(string contentId, string summary, string callout)
        {
            ContentSummary cs = new ContentSummary();

            cs.ID      = new Guid(contentId);
            cs.Summary = summary.Replace('\"', '\'');
            cs.Callout = callout;

            return((new Report()).sp_ContentSummary_update(cs));
        }
Esempio n. 12
0
 public static void Setup()
 {
     conf = new Configuration();
     conf.SetClass("fs.mockfs.impl", typeof(TestCount.MockFileSystem), typeof(FileSystem
                                                                              ));
     mockFs   = Org.Mockito.Mockito.Mock <FileSystem>();
     fileStat = Org.Mockito.Mockito.Mock <FileStatus>();
     mockCs   = Org.Mockito.Mockito.Mock <ContentSummary>();
     Org.Mockito.Mockito.When(fileStat.IsFile()).ThenReturn(true);
 }
Esempio n. 13
0
 public static void Compare(Content expected, ContentSummary actual)
 {
     Assert.AreEqual(expected.Id, actual.Id);
     Assert.AreEqual(expected.Slug, actual.Slug);
     Assert.AreEqual(expected.Author, actual.Author);
     Assert.AreEqual(string.Join(';', expected.CategoryPaths), string.Join(';', actual.CategoryPaths));
     Assert.AreEqual(string.Join(';', expected.Tags), string.Join(';', actual.Tags));
     Assert.AreEqual(expected.Culture, actual.Culture);
     Assert.AreEqual(expected.Description, actual.Description);
     Assert.AreEqual(expected.Title, actual.Title);
 }
 public void GetContentSummaryTest()
 {
     string data = "Test data.";
     var client = (IHadoopApplianceStorageClient)CreateClient();
     WriteStringData(data, string.Format("{0}/{1}", defaultFilePath, "subdir1/file1.txt"), client);
     WriteStringData(data, string.Format("{0}/{1}", defaultFilePath, "subdir1/file2.txt"), client);
     WriteStringData(data, string.Format("{0}/{1}", defaultFilePath, "subdir1/subdir2/file1.txt"), client);
     ContentSummary summary = client.GetContentSummaryAsync(string.Format("{0}/{1}", defaultFilePath, "subdir1")).Result;
     Assert.AreEqual(2, summary.DirectoryCount, "Directory count is not expected.");
     Assert.AreEqual(2, summary.FileCount, "File count is not expected");
 }
Esempio n. 15
0
 /// <exception cref="Org.Xml.Sax.SAXException"/>
 public override void StartElement(string ns, string localname, string qname, Attributes
                                   attrs)
 {
     if (!typeof(ContentSummary).FullName.Equals(qname))
     {
         if (typeof(RemoteException).Name.Equals(qname))
         {
             throw new SAXException(RemoteException.ValueOf(attrs));
         }
         throw new SAXException("Unrecognized entry: " + qname);
     }
     this.contentsummary = HftpFileSystem.ToContentSummary(attrs);
 }
Esempio n. 16
0
        public int sp_ContentSummary_update(ContentSummary cs)
        {
            List <SqlParameter> paramList = new List <SqlParameter>();

            paramList.Add(new SqlParameter("@ID", cs.ID));
            paramList.Add(new SqlParameter("@title", cs.Title));
            paramList.Add(new SqlParameter("@sequence", cs.Sequence));
            paramList.Add(new SqlParameter("@layout", cs.Layout));
            paramList.Add(new SqlParameter("@summary", cs.Summary));
            paramList.Add(new SqlParameter("@callout", cs.Callout));

            return(util.ExecuteNonQuery("sp_ContentSummary_update", paramList));
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeWithFileCreateRename()
        {
            Path foo = new Path(dir, "foo");

            dfs.Mkdirs(foo);
            Path createdFile1foo = new Path(foo, "created_file1.data");
            Path bar             = new Path(dir, "bar");

            dfs.Mkdirs(bar);
            Path createdFile1bar = new Path(bar, "created_file1.data");

            // set storage policy on directory "foo" and "bar" to ONESSD
            dfs.SetStoragePolicy(foo, HdfsConstants.OnessdStoragePolicyName);
            dfs.SetStoragePolicy(bar, HdfsConstants.OnessdStoragePolicyName);
            // set quota by storage type on directory "foo"
            dfs.SetQuotaByStorageType(foo, StorageType.Ssd, Blocksize * 4);
            dfs.SetQuotaByStorageType(bar, StorageType.Ssd, Blocksize * 2);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            // Create file of size 3 * BLOCKSIZE under directory "foo"
            long file1Len = Blocksize * 3;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1foo, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            // Verify space consumed and remaining quota
            long ssdConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                   ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumed);
            // move file from foo to bar
            try
            {
                dfs.Rename(createdFile1foo, createdFile1bar);
                NUnit.Framework.Assert.Fail("Should have failed with QuotaByStorageTypeExceededException "
                                            );
            }
            catch (Exception t)
            {
                Log.Info("Got expected exception ", t);
            }
            ContentSummary cs = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(cs.GetSpaceConsumed(), file1Len * Replication);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Ssd), file1Len);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Disk), file1Len *
                                            2);
        }
        private static async Task GetContentSummaryAsync(AdlsClient client)
        {
            await client.DeleteRecursiveAsync("/a");

            Console.WriteLine("Build a sample hierarchical directory tree");
            await CreateDirRecursiveAsync(client, "/a", 3, 3, 1);

            Console.WriteLine("Retrieve the content summary");
            ContentSummary summary = client.GetContentSummary("/a");

            Console.WriteLine($"Directory Count: {summary.DirectoryCount}");
            Console.WriteLine($"File Count: {summary.FileCount}");
            Console.WriteLine($"Total Size: {summary.SpaceConsumed}");
        }
Esempio n. 19
0
        /// <summary>Test if the quota can be correctly updated for append</summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestUpdateQuotaForAppend()
        {
            Path foo            = new Path(dir, "foo");
            Path bar            = new Path(foo, "bar");
            long currentFileLen = Blocksize;

            DFSTestUtil.CreateFile(dfs, bar, currentFileLen, Replication, seed);
            dfs.SetQuota(foo, long.MaxValue - 1, long.MaxValue - 1);
            // append half of the block data, the previous file length is at block
            // boundary
            DFSTestUtil.AppendFile(dfs, bar, Blocksize / 2);
            currentFileLen += (Blocksize / 2);
            INodeDirectory fooNode = fsdir.GetINode4Write(foo.ToString()).AsDirectory();

            NUnit.Framework.Assert.IsTrue(fooNode.IsQuotaSet());
            QuotaCounts quota = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed();
            long        ns    = quota.GetNameSpace();
            long        ds    = quota.GetStorageSpace();

            NUnit.Framework.Assert.AreEqual(2, ns);
            // foo and bar
            NUnit.Framework.Assert.AreEqual(currentFileLen * Replication, ds);
            ContentSummary c = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), ds);
            // append another block, the previous file length is not at block boundary
            DFSTestUtil.AppendFile(dfs, bar, Blocksize);
            currentFileLen += Blocksize;
            quota           = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed();
            ns              = quota.GetNameSpace();
            ds              = quota.GetStorageSpace();
            NUnit.Framework.Assert.AreEqual(2, ns);
            // foo and bar
            NUnit.Framework.Assert.AreEqual(currentFileLen * Replication, ds);
            c = dfs.GetContentSummary(foo);
            NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), ds);
            // append several blocks
            DFSTestUtil.AppendFile(dfs, bar, Blocksize * 3 + Blocksize / 8);
            currentFileLen += (Blocksize * 3 + Blocksize / 8);
            quota           = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed();
            ns              = quota.GetNameSpace();
            ds              = quota.GetStorageSpace();
            NUnit.Framework.Assert.AreEqual(2, ns);
            // foo and bar
            NUnit.Framework.Assert.AreEqual(currentFileLen * Replication, ds);
            c = dfs.GetContentSummary(foo);
            NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), ds);
        }
Esempio n. 20
0
        /// <summary>Convert a ContentSummary to a Json string.</summary>
        public static string ToJsonString(ContentSummary contentsummary)
        {
            if (contentsummary == null)
            {
                return(null);
            }
            IDictionary <string, object> m = new SortedDictionary <string, object>();

            m["length"]         = contentsummary.GetLength();
            m["fileCount"]      = contentsummary.GetFileCount();
            m["directoryCount"] = contentsummary.GetDirectoryCount();
            m["quota"]          = contentsummary.GetQuota();
            m["spaceConsumed"]  = contentsummary.GetSpaceConsumed();
            m["spaceQuota"]     = contentsummary.GetSpaceQuota();
            return(ToJsonString(typeof(ContentSummary), m));
        }
Esempio n. 21
0
        private async Task DeleteReferenceContentsAsync(Site site, ContentSummary summary)
        {
            var channel = await _channelRepository.GetAsync(summary.ChannelId);

            var repository = await GetRepositoryAsync(site, channel);

            await repository.DeleteAsync(
                GetQuery(site.Id, channel.Id)
                .Where(nameof(Content.ReferenceId), ">", 0)
                .Where(nameof(Content.Id), summary.Id)
                .CachingRemove(
                    GetListKey(repository.TableName, site.Id, channel.Id),
                    GetEntityKey(repository.TableName, summary.Id)
                    )
                );
        }
Esempio n. 22
0
        /// <summary>
        /// Test that quotas are properly tracked by the standby through
        /// create, append, delete.
        /// </summary>
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotasTrackedOnStandby()
        {
            fs.Mkdirs(TestDir);
            DistributedFileSystem dfs = (DistributedFileSystem)fs;

            dfs.SetQuota(TestDir, NsQuota, DsQuota);
            long expectedSize = 3 * BlockSize + BlockSize / 2;

            DFSTestUtil.CreateFile(fs, TestFile, expectedSize, (short)1, 1L);
            HATestUtil.WaitForStandbyToCatchUp(nn0, nn1);
            ContentSummary cs = nn1.GetRpcServer().GetContentSummary(TestDirStr);

            NUnit.Framework.Assert.AreEqual(NsQuota, cs.GetQuota());
            NUnit.Framework.Assert.AreEqual(DsQuota, cs.GetSpaceQuota());
            NUnit.Framework.Assert.AreEqual(expectedSize, cs.GetSpaceConsumed());
            NUnit.Framework.Assert.AreEqual(1, cs.GetDirectoryCount());
            NUnit.Framework.Assert.AreEqual(1, cs.GetFileCount());
            // Append to the file and make sure quota is updated correctly.
            FSDataOutputStream stm = fs.Append(TestFile);

            try
            {
                byte[] data = new byte[(int)(BlockSize * 3 / 2)];
                stm.Write(data);
                expectedSize += data.Length;
            }
            finally
            {
                IOUtils.CloseStream(stm);
            }
            HATestUtil.WaitForStandbyToCatchUp(nn0, nn1);
            cs = nn1.GetRpcServer().GetContentSummary(TestDirStr);
            NUnit.Framework.Assert.AreEqual(NsQuota, cs.GetQuota());
            NUnit.Framework.Assert.AreEqual(DsQuota, cs.GetSpaceQuota());
            NUnit.Framework.Assert.AreEqual(expectedSize, cs.GetSpaceConsumed());
            NUnit.Framework.Assert.AreEqual(1, cs.GetDirectoryCount());
            NUnit.Framework.Assert.AreEqual(1, cs.GetFileCount());
            fs.Delete(TestFile, true);
            expectedSize = 0;
            HATestUtil.WaitForStandbyToCatchUp(nn0, nn1);
            cs = nn1.GetRpcServer().GetContentSummary(TestDirStr);
            NUnit.Framework.Assert.AreEqual(NsQuota, cs.GetQuota());
            NUnit.Framework.Assert.AreEqual(DsQuota, cs.GetSpaceQuota());
            NUnit.Framework.Assert.AreEqual(expectedSize, cs.GetSpaceConsumed());
            NUnit.Framework.Assert.AreEqual(1, cs.GetDirectoryCount());
            NUnit.Framework.Assert.AreEqual(0, cs.GetFileCount());
        }
Esempio n. 23
0
 public HDFSCloudComputingDriveInfo(string hostname, string userName) : base("HDFS")
 {
     try {
         string host = hostname;
         hdfsClient     = new WebHDFSClient(new Uri("http://" + host + ":50070"), userName);
         contentSummary = hdfsClient.GetContentSummary("/");
         byte[]       myByteArray = System.Text.Encoding.UTF8.GetBytes("test");
         MemoryStream ms          = new MemoryStream(myByteArray);
         hdfsClient.CreateFile(ms, "/test.hdfs", true);
     }
     catch (SafeModeException e) {
         this.error = e.Message;
     }
     catch (Exception e) {
         this.error = e.Message;
     }
 }
Esempio n. 24
0
        public static void MatchStatesAndSummariesToContent(
            List <Content> expectedContents,
            List <string> expectedStates,
            List <ContentSummary> actualSummaries,
            List <ContentState> actualStates)
        {
            Assert.AreEqual(expectedStates.Count, actualStates.Count);

            expectedStates.ForEachAtIndex((expectedState, i) =>
            {
                Assert.AreEqual(expectedState, actualStates[i].StateName);

                Content expectedContent      = expectedContents[i];
                ContentSummary actualContent = actualSummaries.First(x => x.Id == actualStates[i].ContentId && x.Slug == actualStates[i].Slug);

                Compare(expectedContent, actualContent);
            });
        }
Esempio n. 25
0
        public string ToString(bool includeMethod = true, bool includeSummary = true)
        {
            var builder = new StringBuilder();

            if (includeMethod)
            {
                builder.AppendFormat("Req: [{0}] ", Method);
            }

            builder.Append(Url);

            if (includeSummary && ContentSummary.IsNotNullOrWhiteSpace())
            {
                builder.Append(": ");
                builder.Append(ContentSummary);
            }

            return(builder.ToString());
        }
Esempio n. 26
0
        /// <summary>
        /// Converts a <code>ContentSummary</code> object into a JSON array
        /// object.
        /// </summary>
        /// <param name="contentSummary">the content summary</param>
        /// <returns>The JSON representation of the content summary.</returns>
        private static IDictionary ContentSummaryToJSON(ContentSummary contentSummary)
        {
            IDictionary json = new LinkedHashMap();

            json[HttpFSFileSystem.ContentSummaryDirectoryCountJson] = contentSummary.GetDirectoryCount
                                                                          ();
            json[HttpFSFileSystem.ContentSummaryFileCountJson] = contentSummary.GetFileCount(
                );
            json[HttpFSFileSystem.ContentSummaryLengthJson]        = contentSummary.GetLength();
            json[HttpFSFileSystem.ContentSummaryQuotaJson]         = contentSummary.GetQuota();
            json[HttpFSFileSystem.ContentSummarySpaceConsumedJson] = contentSummary.GetSpaceConsumed
                                                                         ();
            json[HttpFSFileSystem.ContentSummarySpaceQuotaJson] = contentSummary.GetSpaceQuota
                                                                      ();
            IDictionary response = new LinkedHashMap();

            response[HttpFSFileSystem.ContentSummaryJson] = json;
            return(response);
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeWithFileCreateDelete()
        {
            Path foo          = new Path(dir, "foo");
            Path createdFile1 = new Path(foo, "created_file1.data");

            dfs.Mkdirs(foo);
            dfs.SetStoragePolicy(foo, HdfsConstants.OnessdStoragePolicyName);
            // set quota by storage type on directory "foo"
            dfs.SetQuotaByStorageType(foo, StorageType.Ssd, Blocksize * 10);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            // Create file of size 2.5 * BLOCKSIZE under directory "foo"
            long file1Len = Blocksize * 2 + Blocksize / 2;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            // Verify space consumed and remaining quota
            long storageTypeConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                           ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, storageTypeConsumed);
            // Delete file and verify the consumed space of the storage type is updated
            dfs.Delete(createdFile1, false);
            storageTypeConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                      ().GetTypeSpaces().Get(StorageType.Ssd);
            NUnit.Framework.Assert.AreEqual(0, storageTypeConsumed);
            QuotaCounts counts = new QuotaCounts.Builder().Build();

            fnode.ComputeQuotaUsage(fsn.GetBlockManager().GetStoragePolicySuite(), counts, true
                                    );
            NUnit.Framework.Assert.AreEqual(fnode.DumpTreeRecursively().ToString(), 0, counts
                                            .GetTypeSpaces().Get(StorageType.Ssd));
            ContentSummary cs = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(cs.GetSpaceConsumed(), 0);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Ssd), 0);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Disk), 0);
        }
Esempio n. 28
0
        public virtual void TestConcatWithQuotaIncrease()
        {
            short repl   = 3;
            int   srcNum = 10;
            Path  foo    = new Path("/foo");
            Path  bar    = new Path(foo, "bar");

            Path[] srcs   = new Path[srcNum];
            Path   target = new Path(bar, "target");

            DFSTestUtil.CreateFile(dfs, target, blockSize, repl, 0L);
            long dsQuota = blockSize * repl + blockSize * srcNum * ReplFactor;

            dfs.SetQuota(foo, long.MaxValue - 1, dsQuota);
            for (int i = 0; i < srcNum; i++)
            {
                srcs[i] = new Path(bar, "src" + i);
                DFSTestUtil.CreateFile(dfs, srcs[i], blockSize, ReplFactor, 0L);
            }
            ContentSummary summary = dfs.GetContentSummary(bar);

            NUnit.Framework.Assert.AreEqual(11, summary.GetFileCount());
            NUnit.Framework.Assert.AreEqual(dsQuota, summary.GetSpaceConsumed());
            try
            {
                dfs.Concat(target, srcs);
                NUnit.Framework.Assert.Fail("QuotaExceededException expected");
            }
            catch (RemoteException e)
            {
                NUnit.Framework.Assert.IsTrue(e.UnwrapRemoteException() is QuotaExceededException
                                              );
            }
            dfs.SetQuota(foo, long.MaxValue - 1, long.MaxValue - 1);
            dfs.Concat(target, srcs);
            summary = dfs.GetContentSummary(bar);
            NUnit.Framework.Assert.AreEqual(1, summary.GetFileCount());
            NUnit.Framework.Assert.AreEqual(blockSize * repl * (srcNum + 1), summary.GetSpaceConsumed
                                                ());
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestQuotaByStorageTypeWithFileCreateTruncate()
        {
            Path foo          = new Path(dir, "foo");
            Path createdFile1 = new Path(foo, "created_file1.data");

            dfs.Mkdirs(foo);
            // set storage policy on directory "foo" to ONESSD
            dfs.SetStoragePolicy(foo, HdfsConstants.OnessdStoragePolicyName);
            // set quota by storage type on directory "foo"
            dfs.SetQuotaByStorageType(foo, StorageType.Ssd, Blocksize * 4);
            INode fnode = fsdir.GetINode4Write(foo.ToString());

            NUnit.Framework.Assert.IsTrue(fnode.IsDirectory());
            NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet());
            // Create file of size 2 * BLOCKSIZE under directory "foo"
            long file1Len = Blocksize * 2;
            int  bufLen   = Blocksize / 16;

            DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, Replication
                                   , seed);
            // Verify SSD consumed before truncate
            long ssdConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                                   ().GetTypeSpaces().Get(StorageType.Ssd);

            NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumed);
            // Truncate file to 1 * BLOCKSIZE
            int newFile1Len = Blocksize * 1;

            dfs.Truncate(createdFile1, newFile1Len);
            // Verify SSD consumed after truncate
            ssdConsumed = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed
                              ().GetTypeSpaces().Get(StorageType.Ssd);
            NUnit.Framework.Assert.AreEqual(newFile1Len, ssdConsumed);
            ContentSummary cs = dfs.GetContentSummary(foo);

            NUnit.Framework.Assert.AreEqual(cs.GetSpaceConsumed(), newFile1Len * Replication);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Ssd), newFile1Len);
            NUnit.Framework.Assert.AreEqual(cs.GetTypeConsumed(StorageType.Disk), newFile1Len
                                            * 2);
        }
        public virtual void TestTruncate()
        {
            short repl        = 3;
            int   blockSize   = 1024;
            int   numOfBlocks = 2;
            Path  dir         = GetTestRootPath(fSys, "test/hadoop");
            Path  file        = GetTestRootPath(fSys, "test/hadoop/file");

            byte[] data = GetFileData(numOfBlocks, blockSize);
            CreateFile(fSys, file, data, blockSize, repl);
            int  newLength = blockSize;
            bool isReady   = fSys.Truncate(file, newLength);

            NUnit.Framework.Assert.IsTrue("Recovery is not expected.", isReady);
            FileStatus fileStatus = fSys.GetFileStatus(file);

            NUnit.Framework.Assert.AreEqual(fileStatus.GetLen(), newLength);
            AppendTestUtil.CheckFullFile(fSys, file, newLength, data, file.ToString());
            ContentSummary cs = fSys.GetContentSummary(dir);

            NUnit.Framework.Assert.AreEqual("Bad disk space usage", cs.GetSpaceConsumed(), newLength
                                            * repl);
            NUnit.Framework.Assert.IsTrue("Deleted", fSys.Delete(dir, true));
        }