/// <summary> /// Verify quota for rename operation where srcInodes[srcInodes.length-1] moves /// dstInodes[dstInodes.length-1] /// </summary> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> private static void VerifyQuotaForRename(FSDirectory fsd, INodesInPath src, INodesInPath dst) { if (!fsd.GetFSNamesystem().IsImageLoaded() || fsd.ShouldSkipQuotaChecks()) { // Do not check quota if edits log is still being processed return; } int i = 0; while (src.GetINode(i) == dst.GetINode(i)) { i++; } // src[i - 1] is the last common ancestor. BlockStoragePolicySuite bsps = fsd.GetBlockStoragePolicySuite(); QuotaCounts delta = src.GetLastINode().ComputeQuotaUsage(bsps); // Reduce the required quota by dst that is being removed INode dstINode = dst.GetLastINode(); if (dstINode != null) { delta.Subtract(dstINode.ComputeQuotaUsage(bsps)); } FSDirectory.VerifyQuota(dst, dst.Length() - 1, delta, src.GetINode(i - 1)); }
/// <summary>Call cleanSubtree(..) recursively down the subtree.</summary> public virtual QuotaCounts CleanSubtreeRecursively(BlockStoragePolicySuite bsps, int snapshot, int prior, INode.BlocksMapUpdateInfo collectedBlocks, IList <INode> removedINodes, IDictionary <INode, INode> excludedNodes) { QuotaCounts counts = new QuotaCounts.Builder().Build(); // in case of deletion snapshot, since this call happens after we modify // the diff list, the snapshot to be deleted has been combined or renamed // to its latest previous snapshot. (besides, we also need to consider nodes // created after prior but before snapshot. this will be done in // DirectoryWithSnapshotFeature) int s = snapshot != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId && prior != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.NoSnapshotId ? prior : snapshot; foreach (INode child in GetChildrenList(s)) { if (snapshot != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId && excludedNodes != null && excludedNodes.Contains(child)) { continue; } else { QuotaCounts childCounts = child.CleanSubtree(bsps, snapshot, prior, collectedBlocks , removedINodes); counts.Add(childCounts); } } return(counts); }
public override QuotaCounts CleanSubtree(BlockStoragePolicySuite bsps, int snapshotId , int priorSnapshotId, INode.BlocksMapUpdateInfo collectedBlocks, IList <INode> removedINodes ) { DirectoryWithSnapshotFeature sf = GetDirectoryWithSnapshotFeature(); // there is snapshot data if (sf != null) { return(sf.CleanDirectory(bsps, this, snapshotId, priorSnapshotId, collectedBlocks , removedINodes)); } // there is no snapshot data if (priorSnapshotId == Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.NoSnapshotId && snapshotId == Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId) { // destroy the whole subtree and collect blocks that should be deleted QuotaCounts counts = new QuotaCounts.Builder().Build(); this.ComputeQuotaUsage(bsps, counts, true); DestroyAndCollectBlocks(bsps, collectedBlocks, removedINodes); return(counts); } else { // process recursively down the subtree QuotaCounts counts = CleanSubtreeRecursively(bsps, snapshotId, priorSnapshotId, collectedBlocks , removedINodes, null); if (IsQuotaSet()) { GetDirectoryWithQuotaFeature().AddSpaceConsumed2Cache(counts.Negation()); } return(counts); } }
private void LoadRootINode(FsImageProto.INodeSection.INode p) { INodeDirectory root = LoadINodeDirectory(p, parent.GetLoaderContext()); QuotaCounts q = root.GetQuotaCounts(); long nsQuota = q.GetNameSpace(); long dsQuota = q.GetStorageSpace(); if (nsQuota != -1 || dsQuota != -1) { dir.rootDir.GetDirectoryWithQuotaFeature().SetQuota(nsQuota, dsQuota); } EnumCounters <StorageType> typeQuotas = q.GetTypeSpaces(); if (typeQuotas.AnyGreaterOrEqual(0)) { dir.rootDir.GetDirectoryWithQuotaFeature().SetQuota(typeQuotas); } dir.rootDir.CloneModificationTime(root); dir.rootDir.ClonePermissionStatus(root); AclFeature af = root.GetFeature(typeof(AclFeature)); if (af != null) { dir.rootDir.AddAclFeature(af); } // root dir supports having extended attributes according to POSIX XAttrFeature f = root.GetXAttrFeature(); if (f != null) { dir.rootDir.AddXAttrFeature(f); } dir.AddRootDirToEncryptionZone(f); }
/// <summary>Set the namespace, storagespace and typespace quota for a directory.</summary> /// <remarks> /// Set the namespace, storagespace and typespace quota for a directory. /// Note: This does not support ".inodes" relative path. /// </remarks> /// <exception cref="System.IO.IOException"/> internal static void SetQuota(FSDirectory fsd, string src, long nsQuota, long ssQuota , StorageType type) { if (fsd.IsPermissionEnabled()) { FSPermissionChecker pc = fsd.GetPermissionChecker(); pc.CheckSuperuserPrivilege(); } fsd.WriteLock(); try { INodeDirectory changed = UnprotectedSetQuota(fsd, src, nsQuota, ssQuota, type); if (changed != null) { QuotaCounts q = changed.GetQuotaCounts(); if (type == null) { fsd.GetEditLog().LogSetQuota(src, q.GetNameSpace(), q.GetStorageSpace()); } else { fsd.GetEditLog().LogSetQuotaByStorageType(src, q.GetTypeSpaces().Get(type), type); } } } finally { fsd.WriteUnlock(); } }
public override QuotaCounts ComputeQuotaUsage(BlockStoragePolicySuite bsps, byte blockStoragePolicyId, QuotaCounts counts, bool useCache, int lastSnapshotId) { DirectoryWithSnapshotFeature sf = GetDirectoryWithSnapshotFeature(); // we are computing the quota usage for a specific snapshot here, i.e., the // computation only includes files/directories that exist at the time of the // given snapshot if (sf != null && lastSnapshotId != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId && !(useCache && IsQuotaSet())) { ReadOnlyList <INode> childrenList = GetChildrenList(lastSnapshotId); foreach (INode child in childrenList) { byte childPolicyId = child.GetStoragePolicyIDForQuota(blockStoragePolicyId); child.ComputeQuotaUsage(bsps, childPolicyId, counts, useCache, lastSnapshotId); } counts.AddNameSpace(1); return(counts); } // compute the quota usage in the scope of the current directory tree DirectoryWithQuotaFeature q = GetDirectoryWithQuotaFeature(); if (useCache && q != null && q.IsQuotaSet()) { // use the cached quota return(q.AddCurrentSpaceUsage(counts)); } else { useCache = q != null && !q.IsQuotaSet() ? false : useCache; return(ComputeDirectoryQuotaUsage(bsps, blockStoragePolicyId, counts, useCache, lastSnapshotId )); } }
public static FsImageProto.INodeSection.INodeDirectory.Builder BuildINodeDirectory (INodeDirectoryAttributes dir, FSImageFormatProtobuf.SaverContext state) { QuotaCounts quota = dir.GetQuotaCounts(); FsImageProto.INodeSection.INodeDirectory.Builder b = FsImageProto.INodeSection.INodeDirectory .NewBuilder().SetModificationTime(dir.GetModificationTime()).SetNsQuota(quota.GetNameSpace ()).SetDsQuota(quota.GetStorageSpace()).SetPermission(BuildPermissionStatus(dir, state.GetStringMap())); if (quota.GetTypeSpaces().AnyGreaterOrEqual(0)) { b.SetTypeQuotas(BuildQuotaByStorageTypeEntries(quota)); } AclFeature f = dir.GetAclFeature(); if (f != null) { b.SetAcl(BuildAclEntries(f, state.GetStringMap())); } XAttrFeature xAttrFeature = dir.GetXAttrFeature(); if (xAttrFeature != null) { b.SetXAttrs(BuildXAttrs(xAttrFeature, state.GetStringMap())); } return(b); }
/// <summary>Check and add namespace/storagespace/storagetype consumed to itself and the ancestors. /// </summary> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException">if quote is violated. /// </exception> internal virtual void AddSpaceConsumed2Parent(QuotaCounts counts, bool verify) { if (parent != null) { parent.AddSpaceConsumed(counts, verify); } }
public CopyWithQuota(INodeDirectory dir) : base(dir) { Preconditions.CheckArgument(dir.IsQuotaSet()); QuotaCounts q = dir.GetQuotaCounts(); this.quota = new QuotaCounts.Builder().QuotaCount(q).Build(); }
public CopyWithQuota(byte[] name, PermissionStatus permissions, AclFeature aclFeature , long modificationTime, long nsQuota, long dsQuota, EnumCounters <StorageType> typeQuotas , XAttrFeature xAttrsFeature) : base(name, permissions, aclFeature, modificationTime, xAttrsFeature) { this.quota = new QuotaCounts.Builder().NameSpace(nsQuota).StorageSpace(dsQuota).TypeSpaces (typeQuotas).Build(); }
public QuotaCounts ComputeQuotaUsage(BlockStoragePolicySuite bsps, QuotaCounts counts , bool useCache) { byte storagePolicyId = IsSymlink() ? BlockStoragePolicySuite.IdUnspecified : GetStoragePolicyID (); return(ComputeQuotaUsage(bsps, storagePolicyId, counts, useCache, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId)); }
/// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> private static void VerifyQuota(FSDirectory fsd, INodesInPath targetIIP, QuotaCounts deltas) { if (!fsd.GetFSNamesystem().IsImageLoaded() || fsd.ShouldSkipQuotaChecks()) { // Do not check quota if editlog is still being processed return; } FSDirectory.VerifyQuota(targetIIP, targetIIP.Length() - 1, deltas, null); }
/// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> internal virtual void UpdateQuotasInSourceTree(BlockStoragePolicySuite bsps) { // update the quota usage in src tree if (isSrcInSnapshot) { // get the counts after rename QuotaCounts newSrcCounts = srcChild.ComputeQuotaUsage(bsps, new QuotaCounts.Builder ().Build(), false); newSrcCounts.Subtract(oldSrcCounts); srcParent.AddSpaceConsumed(newSrcCounts, false); } }
/// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> internal RenameOperation(FSDirectory fsd, string src, string dst, INodesInPath srcIIP , INodesInPath dstIIP) { this.fsd = fsd; this.src = src; this.dst = dst; this.srcIIP = srcIIP; this.dstIIP = dstIIP; this.srcParentIIP = srcIIP.GetParentINodesInPath(); this.dstParentIIP = dstIIP.GetParentINodesInPath(); BlockStoragePolicySuite bsps = fsd.GetBlockStoragePolicySuite(); srcChild = this.srcIIP.GetLastINode(); srcChildName = srcChild.GetLocalNameBytes(); int srcLatestSnapshotId = srcIIP.GetLatestSnapshotId(); isSrcInSnapshot = srcChild.IsInLatestSnapshot(srcLatestSnapshotId); srcChildIsReference = srcChild.IsReference(); srcParent = this.srcIIP.GetINode(-2).AsDirectory(); // Record the snapshot on srcChild. After the rename, before any new // snapshot is taken on the dst tree, changes will be recorded in the // latest snapshot of the src tree. if (isSrcInSnapshot) { srcChild.RecordModification(srcLatestSnapshotId); } // check srcChild for reference srcRefDstSnapshot = srcChildIsReference ? srcChild.AsReference().GetDstSnapshotId () : Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId; oldSrcCounts = new QuotaCounts.Builder().Build(); if (isSrcInSnapshot) { INodeReference.WithName withName = srcParent.ReplaceChild4ReferenceWithName(srcChild , srcLatestSnapshotId); withCount = (INodeReference.WithCount)withName.GetReferredINode(); srcChild = withName; this.srcIIP = INodesInPath.Replace(srcIIP, srcIIP.Length() - 1, srcChild); // get the counts before rename withCount.GetReferredINode().ComputeQuotaUsage(bsps, oldSrcCounts, true); } else { if (srcChildIsReference) { // srcChild is reference but srcChild is not in latest snapshot withCount = (INodeReference.WithCount)srcChild.AsReference().GetReferredINode(); } else { withCount = null; } } }
public virtual void TestQuotaByStorageTypePersistenceInFsImage() { string MethodName = GenericTestUtils.GetMethodName(); Path testDir = new Path(dir, MethodName); Path createdFile1 = new Path(testDir, "created_file1.data"); dfs.Mkdirs(testDir); // set storage policy on testDir to ONESSD dfs.SetStoragePolicy(testDir, HdfsConstants.OnessdStoragePolicyName); // set quota by storage type on testDir long SsdQuota = Blocksize * 4; dfs.SetQuotaByStorageType(testDir, StorageType.Ssd, SsdQuota); INode testDirNode = fsdir.GetINode4Write(testDir.ToString()); NUnit.Framework.Assert.IsTrue(testDirNode.IsDirectory()); NUnit.Framework.Assert.IsTrue(testDirNode.IsQuotaSet()); // Create file of size 2 * BLOCKSIZE under testDir long file1Len = Blocksize * 2; int bufLen = Blocksize / 16; DFSTestUtil.CreateFile(dfs, createdFile1, bufLen, file1Len, Blocksize, Replication , seed); // Verify SSD consumed before namenode restart long ssdConsumed = testDirNode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed ().GetTypeSpaces().Get(StorageType.Ssd); NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumed); // Restart the namenode with checkpoint to make sure fsImage is correct dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter); dfs.SaveNamespace(); dfs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave); cluster.RestartNameNode(true); RefreshClusterState(); INode testDirNodeAfterNNRestart = fsdir.GetINode4Write(testDir.ToString()); NUnit.Framework.Assert.IsTrue(testDirNode.IsDirectory()); NUnit.Framework.Assert.IsTrue(testDirNode.IsQuotaSet()); QuotaCounts qc = testDirNodeAfterNNRestart.GetQuotaCounts(); NUnit.Framework.Assert.AreEqual(SsdQuota, qc.GetTypeSpace(StorageType.Ssd)); foreach (StorageType t in StorageType.GetTypesSupportingQuota()) { if (t != StorageType.Ssd) { NUnit.Framework.Assert.AreEqual(HdfsConstants.QuotaReset, qc.GetTypeSpace(t)); } } long ssdConsumedAfterNNRestart = testDirNodeAfterNNRestart.AsDirectory().GetDirectoryWithQuotaFeature ().GetSpaceConsumed().GetTypeSpaces().Get(StorageType.Ssd); NUnit.Framework.Assert.AreEqual(file1Len, ssdConsumedAfterNNRestart); }
/// <summary> /// Compute /// <see cref="Org.Apache.Hadoop.FS.ContentSummary"/> /// . /// </summary> public ContentSummary ComputeAndConvertContentSummary(ContentSummaryComputationContext summary) { ContentCounts counts = ComputeContentSummary(summary).GetCounts(); QuotaCounts q = GetQuotaCounts(); return(new ContentSummary.Builder().Length(counts.GetLength()).FileCount(counts.GetFileCount () + counts.GetSymlinkCount()).DirectoryCount(counts.GetDirectoryCount()).Quota( q.GetNameSpace()).SpaceConsumed(counts.GetStoragespace()).SpaceQuota(q.GetStorageSpace ()).TypeConsumed(counts.GetTypeSpaces()).TypeQuota(q.GetTypeSpaces().AsArray()). Build()); }
private QuotaCounts ComputeDirectoryQuotaUsage(BlockStoragePolicySuite bsps, byte blockStoragePolicyId, QuotaCounts counts, bool useCache, int lastSnapshotId) { if (children != null) { foreach (INode child in children) { byte childPolicyId = child.GetStoragePolicyIDForQuota(blockStoragePolicyId); child.ComputeQuotaUsage(bsps, childPolicyId, counts, useCache, lastSnapshotId); } } return(ComputeQuotaUsage4CurrentDirectory(bsps, blockStoragePolicyId, counts)); }
/// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> public override void AddSpaceConsumed(QuotaCounts counts, bool verify) { DirectoryWithQuotaFeature q = GetDirectoryWithQuotaFeature(); if (q != null) { q.AddSpaceConsumed(this, counts, verify); } else { AddSpaceConsumed2Parent(counts, verify); } }
/// <summary> /// Delete a path from the name space /// Update the count at each ancestor directory with quota /// </summary> /// <param name="iip">the inodes resolved from the path</param> /// <param name="collectedBlocks">blocks collected from the deleted path</param> /// <param name="removedINodes">inodes that should be removed from inodeMap</param> /// <param name="mtime">the time the inode is removed</param> /// <returns>the number of inodes deleted; 0 if no inodes are deleted.</returns> private static long UnprotectedDelete(FSDirectory fsd, INodesInPath iip, INode.BlocksMapUpdateInfo collectedBlocks, IList <INode> removedINodes, long mtime) { System.Diagnostics.Debug.Assert(fsd.HasWriteLock()); // check if target node exists INode targetNode = iip.GetLastINode(); if (targetNode == null) { return(-1); } // record modification int latestSnapshot = iip.GetLatestSnapshotId(); targetNode.RecordModification(latestSnapshot); // Remove the node from the namespace long removed = fsd.RemoveLastINode(iip); if (removed == -1) { return(-1); } // set the parent's modification time INodeDirectory parent = targetNode.GetParent(); parent.UpdateModificationTime(mtime, latestSnapshot); fsd.UpdateCountForDelete(targetNode, iip); if (removed == 0) { return(0); } // collect block and update quota if (!targetNode.IsInLatestSnapshot(latestSnapshot)) { targetNode.DestroyAndCollectBlocks(fsd.GetBlockStoragePolicySuite(), collectedBlocks , removedINodes); } else { QuotaCounts counts = targetNode.CleanSubtree(fsd.GetBlockStoragePolicySuite(), Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId, latestSnapshot, collectedBlocks, removedINodes); removed = counts.GetNameSpace(); fsd.UpdateCountNoQuotaCheck(iip, iip.Length() - 1, counts.Negation()); } if (NameNode.stateChangeLog.IsDebugEnabled()) { NameNode.stateChangeLog.Debug("DIR* FSDirectory.unprotectedDelete: " + iip.GetPath () + " is removed"); } return(removed); }
/// <summary>Test if the quota can be correctly updated for append</summary> /// <exception cref="System.Exception"/> public virtual void TestUpdateQuotaForAppend() { Path foo = new Path(dir, "foo"); Path bar = new Path(foo, "bar"); long currentFileLen = Blocksize; DFSTestUtil.CreateFile(dfs, bar, currentFileLen, Replication, seed); dfs.SetQuota(foo, long.MaxValue - 1, long.MaxValue - 1); // append half of the block data, the previous file length is at block // boundary DFSTestUtil.AppendFile(dfs, bar, Blocksize / 2); currentFileLen += (Blocksize / 2); INodeDirectory fooNode = fsdir.GetINode4Write(foo.ToString()).AsDirectory(); NUnit.Framework.Assert.IsTrue(fooNode.IsQuotaSet()); QuotaCounts quota = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed(); long ns = quota.GetNameSpace(); long ds = quota.GetStorageSpace(); NUnit.Framework.Assert.AreEqual(2, ns); // foo and bar NUnit.Framework.Assert.AreEqual(currentFileLen * Replication, ds); ContentSummary c = dfs.GetContentSummary(foo); NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), ds); // append another block, the previous file length is not at block boundary DFSTestUtil.AppendFile(dfs, bar, Blocksize); currentFileLen += Blocksize; quota = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed(); ns = quota.GetNameSpace(); ds = quota.GetStorageSpace(); NUnit.Framework.Assert.AreEqual(2, ns); // foo and bar NUnit.Framework.Assert.AreEqual(currentFileLen * Replication, ds); c = dfs.GetContentSummary(foo); NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), ds); // append several blocks DFSTestUtil.AppendFile(dfs, bar, Blocksize * 3 + Blocksize / 8); currentFileLen += (Blocksize * 3 + Blocksize / 8); quota = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed(); ns = quota.GetNameSpace(); ds = quota.GetStorageSpace(); NUnit.Framework.Assert.AreEqual(2, ns); // foo and bar NUnit.Framework.Assert.AreEqual(currentFileLen * Replication, ds); c = dfs.GetContentSummary(foo); NUnit.Framework.Assert.AreEqual(c.GetSpaceConsumed(), ds); }
private static FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder BuildQuotaByStorageTypeEntries (QuotaCounts q) { FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder b = FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto .NewBuilder(); foreach (StorageType t in StorageType.GetTypesSupportingQuota()) { if (q.GetTypeSpace(t) >= 0) { FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder eb = FsImageProto.INodeSection.QuotaByStorageTypeEntryProto .NewBuilder().SetStorageType(PBHelper.ConvertStorageType(t)).SetQuota(q.GetTypeSpace (t)); b.AddQuotas(eb); } } return(b); }
public override void DestroyAndCollectBlocks(BlockStoragePolicySuite bsps, INode.BlocksMapUpdateInfo collectedBlocks, IList <INode> removedINodes) { int snapshot = GetSelfSnapshot(); if (RemoveReference(this) <= 0) { GetReferredINode().DestroyAndCollectBlocks(bsps, collectedBlocks, removedINodes); } else { int prior = GetPriorSnapshot(this); INode referred = GetReferredINode().AsReference().GetReferredINode(); if (snapshot != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.NoSnapshotId) { if (prior != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.NoSnapshotId && snapshot <= prior) { // the snapshot to be deleted has been deleted while traversing // the src tree of the previous rename operation. This usually // happens when rename's src and dst are under the same // snapshottable directory. E.g., the following operation sequence: // 1. create snapshot s1 on /test // 2. rename /test/foo/bar to /test/foo2/bar // 3. create snapshot s2 on /test // 4. rename foo2 again // 5. delete snapshot s2 return; } try { QuotaCounts counts = referred.CleanSubtree(bsps, snapshot, prior, collectedBlocks , removedINodes); INodeReference @ref = GetReferredINode().GetParentReference(); if (@ref != null) { @ref.AddSpaceConsumed(counts.Negation(), true); } } catch (QuotaExceededException e) { Log.Error("should not exceed quota while snapshot deletion", e); } } } }
public override QuotaCounts CleanSubtree(BlockStoragePolicySuite bsps, int snapshot , int prior, INode.BlocksMapUpdateInfo collectedBlocks, IList <INode> removedINodes ) { // since WithName node resides in deleted list acting as a snapshot copy, // the parameter snapshot must be non-null Preconditions.CheckArgument(snapshot != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId); // if prior is NO_SNAPSHOT_ID, we need to check snapshot belonging to the // previous WithName instance if (prior == Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.NoSnapshotId) { prior = GetPriorSnapshot(this); } if (prior != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.NoSnapshotId && Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.IdIntegerComparator .Compare(snapshot, prior) <= 0) { return(new QuotaCounts.Builder().Build()); } QuotaCounts counts = GetReferredINode().CleanSubtree(bsps, snapshot, prior, collectedBlocks , removedINodes); INodeReference @ref = GetReferredINode().GetParentReference(); if (@ref != null) { try { @ref.AddSpaceConsumed(counts.Negation(), true); } catch (QuotaExceededException) { Org.Mortbay.Log.Log.Warn("Should not have QuotaExceededException"); } } if (snapshot < lastSnapshotId) { // for a WithName node, when we compute its quota usage, we only count // in all the nodes existing at the time of the corresponding rename op. // Thus if we are deleting a snapshot before/at the snapshot associated // with lastSnapshotId, we do not need to update the quota upwards. counts = new QuotaCounts.Builder().Build(); } return(counts); }
public override bool Equals(object obj) { if (obj == this) { return(true); } else { if (obj == null || !(obj is QuotaCounts)) { return(false); } } QuotaCounts that = (QuotaCounts)obj; return(this.nsSsCounts.Equals(that.nsSsCounts) && this.tsCounts.Equals(that.tsCounts )); }
/// <summary> /// Undo the rename operation for the dst tree, i.e., if the rename operation /// (with OVERWRITE option) removes a file/dir from the dst tree, add it back /// and delete possible record in the deleted list. /// </summary> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> public virtual void UndoRename4DstParent(BlockStoragePolicySuite bsps, INode deletedChild , int latestSnapshotId) { DirectoryWithSnapshotFeature sf = GetDirectoryWithSnapshotFeature(); Preconditions.CheckState(sf != null, "Directory does not have snapshot feature"); bool removeDeletedChild = sf.GetDiffs().RemoveChild(Diff.ListType.Deleted, deletedChild ); int sid = removeDeletedChild ? Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId : latestSnapshotId; bool added = AddChild(deletedChild, true, sid); // update quota usage if adding is successfully and the old child has not // been stored in deleted list before if (added && !removeDeletedChild) { QuotaCounts counts = deletedChild.ComputeQuotaUsage(bsps); AddSpaceConsumed(counts, false); } }
/// <summary>Test if the quota can be correctly updated for create file</summary> /// <exception cref="System.Exception"/> public virtual void TestQuotaUpdateWithFileCreate() { Path foo = new Path(dir, "foo"); Path createdFile = new Path(foo, "created_file.data"); dfs.Mkdirs(foo); dfs.SetQuota(foo, long.MaxValue - 1, long.MaxValue - 1); long fileLen = Blocksize * 2 + Blocksize / 2; DFSTestUtil.CreateFile(dfs, createdFile, Blocksize / 16, fileLen, Blocksize, Replication , seed); INode fnode = fsdir.GetINode4Write(foo.ToString()); NUnit.Framework.Assert.IsTrue(fnode.IsDirectory()); NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet()); QuotaCounts cnt = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed (); NUnit.Framework.Assert.AreEqual(2, cnt.GetNameSpace()); NUnit.Framework.Assert.AreEqual(fileLen * Replication, cnt.GetStorageSpace()); }
/// <summary> /// Test if the quota can be correctly updated when file length is updated /// through fsync /// </summary> /// <exception cref="System.Exception"/> public virtual void TestUpdateQuotaForFSync() { Path foo = new Path("/foo"); Path bar = new Path(foo, "bar"); DFSTestUtil.CreateFile(dfs, bar, Blocksize, Replication, 0L); dfs.SetQuota(foo, long.MaxValue - 1, long.MaxValue - 1); FSDataOutputStream @out = dfs.Append(bar); @out.Write(new byte[Blocksize / 4]); ((DFSOutputStream)@out.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag .UpdateLength)); INodeDirectory fooNode = fsdir.GetINode4Write(foo.ToString()).AsDirectory(); QuotaCounts quota = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed(); long ns = quota.GetNameSpace(); long ds = quota.GetStorageSpace(); NUnit.Framework.Assert.AreEqual(2, ns); // foo and bar NUnit.Framework.Assert.AreEqual(Blocksize * 2 * Replication, ds); // file is under construction @out.Write(new byte[Blocksize / 4]); @out.Close(); fooNode = fsdir.GetINode4Write(foo.ToString()).AsDirectory(); quota = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed(); ns = quota.GetNameSpace(); ds = quota.GetStorageSpace(); NUnit.Framework.Assert.AreEqual(2, ns); NUnit.Framework.Assert.AreEqual((Blocksize + Blocksize / 2) * Replication, ds); // append another block DFSTestUtil.AppendFile(dfs, bar, Blocksize); quota = fooNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed(); ns = quota.GetNameSpace(); ds = quota.GetStorageSpace(); NUnit.Framework.Assert.AreEqual(2, ns); // foo and bar NUnit.Framework.Assert.AreEqual((Blocksize * 2 + Blocksize / 2) * Replication, ds ); }
/// <summary> /// Both traditional space quota and the storage type quota for SSD are set and /// not exceeded. /// </summary> /// <exception cref="System.Exception"/> public virtual void TestQuotaByStorageTypeWithTraditionalQuota() { Path foo = new Path(dir, "foo"); dfs.Mkdirs(foo); dfs.SetStoragePolicy(foo, HdfsConstants.OnessdStoragePolicyName); dfs.SetQuotaByStorageType(foo, StorageType.Ssd, Blocksize * 10); dfs.SetQuota(foo, long.MaxValue - 1, Replication * Blocksize * 10); INode fnode = fsdir.GetINode4Write(foo.ToString()); NUnit.Framework.Assert.IsTrue(fnode.IsDirectory()); NUnit.Framework.Assert.IsTrue(fnode.IsQuotaSet()); Path createdFile = new Path(foo, "created_file.data"); long fileLen = Blocksize * 2 + Blocksize / 2; DFSTestUtil.CreateFile(dfs, createdFile, Blocksize / 16, fileLen, Blocksize, Replication , seed); QuotaCounts cnt = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed (); NUnit.Framework.Assert.AreEqual(2, cnt.GetNameSpace()); NUnit.Framework.Assert.AreEqual(fileLen * Replication, cnt.GetStorageSpace()); dfs.Delete(createdFile, true); QuotaCounts cntAfterDelete = fnode.AsDirectory().GetDirectoryWithQuotaFeature().GetSpaceConsumed (); NUnit.Framework.Assert.AreEqual(1, cntAfterDelete.GetNameSpace()); NUnit.Framework.Assert.AreEqual(0, cntAfterDelete.GetStorageSpace()); // Validate the computeQuotaUsage() QuotaCounts counts = new QuotaCounts.Builder().Build(); fnode.ComputeQuotaUsage(fsn.GetBlockManager().GetStoragePolicySuite(), counts, true ); NUnit.Framework.Assert.AreEqual(fnode.DumpTreeRecursively().ToString(), 1, counts .GetNameSpace()); NUnit.Framework.Assert.AreEqual(fnode.DumpTreeRecursively().ToString(), 0, counts .GetStorageSpace()); }
/// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> internal void AddSpaceConsumed(INodeDirectory dir, QuotaCounts counts, bool verify ) { if (dir.IsQuotaSet()) { // The following steps are important: // check quotas in this inode and all ancestors before changing counts // so that no change is made if there is any quota violation. // (1) verify quota in this inode if (verify) { VerifyQuota(counts); } // (2) verify quota and then add count in ancestors dir.AddSpaceConsumed2Parent(counts, verify); // (3) add count in this inode AddSpaceConsumed2Cache(counts); } else { dir.AddSpaceConsumed2Parent(counts, verify); } }
internal virtual void SetQuota(BlockStoragePolicySuite bsps, long nsQuota, long ssQuota , StorageType type) { DirectoryWithQuotaFeature quota = GetDirectoryWithQuotaFeature(); if (quota != null) { // already has quota; so set the quota to the new values if (type != null) { quota.SetQuota(ssQuota, type); } else { quota.SetQuota(nsQuota, ssQuota); } if (!IsQuotaSet() && !IsRoot()) { RemoveFeature(quota); } } else { QuotaCounts c = ComputeQuotaUsage(bsps); DirectoryWithQuotaFeature.Builder builder = new DirectoryWithQuotaFeature.Builder ().NameSpaceQuota(nsQuota); if (type != null) { builder.TypeQuota(type, ssQuota); } else { builder.StorageSpaceQuota(ssQuota); } AddDirectoryWithQuotaFeature(builder.Build()).SetSpaceConsumed(c); } }