/// <summary>Get a listing of all the snapshots of a snapshottable directory</summary> /// <exception cref="System.IO.IOException"/> private static DirectoryListing GetSnapshotsListing(FSDirectory fsd, string src, byte[] startAfter) { Preconditions.CheckState(fsd.HasReadLock()); Preconditions.CheckArgument(src.EndsWith(HdfsConstants.SeparatorDotSnapshotDir), "%s does not end with %s", src, HdfsConstants.SeparatorDotSnapshotDir); string dirPath = FSDirectory.NormalizePath(Sharpen.Runtime.Substring(src, 0, src. Length - HdfsConstants.DotSnapshotDir.Length)); INode node = fsd.GetINode(dirPath); INodeDirectory dirNode = INodeDirectory.ValueOf(node, dirPath); DirectorySnapshottableFeature sf = dirNode.GetDirectorySnapshottableFeature(); if (sf == null) { throw new SnapshotException("Directory is not a snapshottable directory: " + dirPath ); } ReadOnlyList <Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot> snapshots = sf.GetSnapshotList(); int skipSize = ReadOnlyList.Util.BinarySearch(snapshots, startAfter); skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1; int numOfListing = Math.Min(snapshots.Size() - skipSize, fsd.GetLsLimit()); HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing]; for (int i = 0; i < numOfListing; i++) { Snapshot.Root sRoot = snapshots.Get(i + skipSize).GetRoot(); listing[i] = CreateFileStatus(fsd, src, sRoot.GetLocalNameBytes(), sRoot, BlockStoragePolicySuite .IdUnspecified, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId , false, INodesInPath.FromINode(sRoot)); } return(new DirectoryListing(listing, snapshots.Size() - skipSize - numOfListing)); }
/// <param name="name">the name of the child</param> /// <param name="snapshotId"> /// if it is not /// <see cref="Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId /// "/> /// , get the result /// from the corresponding snapshot; otherwise, get the result from /// the current directory. /// </param> /// <returns>the child inode.</returns> public virtual INode GetChild(byte[] name, int snapshotId) { DirectoryWithSnapshotFeature sf; if (snapshotId == Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId || (sf = GetDirectoryWithSnapshotFeature()) == null) { ReadOnlyList <INode> c = GetCurrentChildrenList(); int i = ReadOnlyList.Util.BinarySearch(c, name); return(i < 0 ? null : c.Get(i)); } return(sf.GetChild(this, name, snapshotId)); }
protected internal virtual ContentSummaryComputationContext ComputeDirectoryContentSummary (ContentSummaryComputationContext summary, int snapshotId) { ReadOnlyList <INode> childrenList = GetChildrenList(snapshotId); // Explicit traversing is done to enable repositioning after relinquishing // and reacquiring locks. for (int i = 0; i < childrenList.Size(); i++) { INode child = childrenList.Get(i); byte[] childName = child.GetLocalNameBytes(); long lastYieldCount = summary.GetYieldCount(); child.ComputeContentSummary(summary); // Check whether the computation was paused in the subtree. // The counts may be off, but traversing the rest of children // should be made safe. if (lastYieldCount == summary.GetYieldCount()) { continue; } // The locks were released and reacquired. Check parent first. if (GetParent() == null) { // Stop further counting and return whatever we have so far. break; } // Obtain the children list again since it may have been modified. childrenList = GetChildrenList(snapshotId); // Reposition in case the children list is changed. Decrement by 1 // since it will be incremented when loops. i = NextChild(childrenList, childName) - 1; } // Increment the directory count for this directory. summary.GetCounts().AddContent(Content.Directory, 1); // Relinquish and reacquire locks if necessary. summary.Yield(); return(summary); }
/// <summary>Check the correctness of snapshot list within snapshottable dir</summary> private void CheckSnapshotList(INodeDirectory srcRoot, string[] sortedNames, string [] names) { NUnit.Framework.Assert.IsTrue(srcRoot.IsSnapshottable()); ReadOnlyList <Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot> listByName = srcRoot.GetDirectorySnapshottableFeature().GetSnapshotList(); NUnit.Framework.Assert.AreEqual(sortedNames.Length, listByName.Size()); for (int i = 0; i < listByName.Size(); i++) { NUnit.Framework.Assert.AreEqual(sortedNames[i], listByName.Get(i).GetRoot().GetLocalName ()); } IList <DirectoryWithSnapshotFeature.DirectoryDiff> listByTime = srcRoot.GetDiffs() .AsList(); NUnit.Framework.Assert.AreEqual(names.Length, listByTime.Count); for (int i_1 = 0; i_1 < listByTime.Count; i_1++) { Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s = srcRoot.GetDirectorySnapshottableFeature ().GetSnapshotById(listByTime[i_1].GetSnapshotId()); NUnit.Framework.Assert.AreEqual(names[i_1], s.GetRoot().GetLocalName()); } }
/// <summary> /// Get a partial listing of the indicated directory /// We will stop when any of the following conditions is met: /// 1) this.lsLimit files have been added /// 2) needLocation is true AND enough files have been added such /// that at least this.lsLimit block locations are in the response /// </summary> /// <param name="fsd">FSDirectory</param> /// <param name="iip"> /// the INodesInPath instance containing all the INodes along the /// path /// </param> /// <param name="src">the directory name</param> /// <param name="startAfter">the name to start listing after</param> /// <param name="needLocation">if block locations are returned</param> /// <returns>a partial listing starting after startAfter</returns> /// <exception cref="System.IO.IOException"/> private static DirectoryListing GetListing(FSDirectory fsd, INodesInPath iip, string src, byte[] startAfter, bool needLocation, bool isSuperUser) { string srcs = FSDirectory.NormalizePath(src); bool isRawPath = FSDirectory.IsReservedRawName(src); fsd.ReadLock(); try { if (srcs.EndsWith(HdfsConstants.SeparatorDotSnapshotDir)) { return(GetSnapshotsListing(fsd, srcs, startAfter)); } int snapshot = iip.GetPathSnapshotId(); INode targetNode = iip.GetLastINode(); if (targetNode == null) { return(null); } byte parentStoragePolicy = isSuperUser ? targetNode.GetStoragePolicyID() : BlockStoragePolicySuite .IdUnspecified; if (!targetNode.IsDirectory()) { return(new DirectoryListing(new HdfsFileStatus[] { CreateFileStatus(fsd, src, HdfsFileStatus .EmptyName, targetNode, needLocation, parentStoragePolicy, snapshot, isRawPath, iip) }, 0)); } INodeDirectory dirInode = targetNode.AsDirectory(); ReadOnlyList <INode> contents = dirInode.GetChildrenList(snapshot); int startChild = INodeDirectory.NextChild(contents, startAfter); int totalNumChildren = contents.Size(); int numOfListing = Math.Min(totalNumChildren - startChild, fsd.GetLsLimit()); int locationBudget = fsd.GetLsLimit(); int listingCnt = 0; HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing]; for (int i = 0; i < numOfListing && locationBudget > 0; i++) { INode cur = contents.Get(startChild + i); byte curPolicy = isSuperUser && !cur.IsSymlink() ? cur.GetLocalStoragePolicyID() : BlockStoragePolicySuite.IdUnspecified; listing[i] = CreateFileStatus(fsd, src, cur.GetLocalNameBytes(), cur, needLocation , GetStoragePolicyID(curPolicy, parentStoragePolicy), snapshot, isRawPath, iip); listingCnt++; if (needLocation) { // Once we hit lsLimit locations, stop. // This helps to prevent excessively large response payloads. // Approximate #locations with locatedBlockCount() * repl_factor LocatedBlocks blks = ((HdfsLocatedFileStatus)listing[i]).GetBlockLocations(); locationBudget -= (blks == null) ? 0 : blks.LocatedBlockCount() * listing[i].GetReplication (); } } // truncate return array if necessary if (listingCnt < numOfListing) { listing = Arrays.CopyOf(listing, listingCnt); } return(new DirectoryListing(listing, totalNumChildren - startChild - listingCnt)); } finally { fsd.ReadUnlock(); } }