/// <summary> /// Serialize a /// <see cref="INodeDirectory"/> /// </summary> /// <param name="node">The node to write</param> /// <param name="out"> /// The /// <see cref="System.IO.DataOutput"/> /// where the fields are written /// </param> /// <exception cref="System.IO.IOException"/> public static void WriteINodeDirectory(INodeDirectory node, DataOutput @out) { WriteLocalName(node, @out); @out.WriteLong(node.GetId()); @out.WriteShort(0); // replication @out.WriteLong(node.GetModificationTime()); @out.WriteLong(0); // access time @out.WriteLong(0); // preferred block size @out.WriteInt(-1); // # of blocks WriteQuota(node.GetQuotaCounts(), @out); if (node.IsSnapshottable()) { @out.WriteBoolean(true); } else { @out.WriteBoolean(false); @out.WriteBoolean(node.IsWithSnapshot()); } WritePermissionStatus(node, @out); }
/// <summary> /// <inheritDoc/> /// <br/> /// To destroy a DstReference node, we first remove its link with the /// referred node. If the reference number of the referred node is <= 0, we /// destroy the subtree of the referred node. Otherwise, we clean the /// referred node's subtree and delete everything created after the last /// rename operation, i.e., everything outside of the scope of the prior /// WithName nodes. /// </summary> public override void DestroyAndCollectBlocks(BlockStoragePolicySuite bsps, INode.BlocksMapUpdateInfo collectedBlocks, IList <INode> removedINodes) { if (RemoveReference(this) <= 0) { GetReferredINode().DestroyAndCollectBlocks(bsps, collectedBlocks, removedINodes); } else { // we will clean everything, including files, directories, and // snapshots, that were created after this prior snapshot int prior = GetPriorSnapshot(this); // prior must be non-null, otherwise we do not have any previous // WithName nodes, and the reference number will be 0. Preconditions.CheckState(prior != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .NoSnapshotId); // identify the snapshot created after prior int snapshot = GetSelfSnapshot(prior); INode referred = GetReferredINode().AsReference().GetReferredINode(); if (referred.IsFile()) { // if referred is a file, it must be a file with snapshot since we did // recordModification before the rename INodeFile file = referred.AsFile(); Preconditions.CheckState(file.IsWithSnapshot()); // make sure we mark the file as deleted file.GetFileWithSnapshotFeature().DeleteCurrentFile(); // when calling cleanSubtree of the referred node, since we // compute quota usage updates before calling this destroy // function, we use true for countDiffChange referred.CleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes); } else { if (referred.IsDirectory()) { // similarly, if referred is a directory, it must be an // INodeDirectory with snapshot INodeDirectory dir = referred.AsDirectory(); Preconditions.CheckState(dir.IsWithSnapshot()); try { DirectoryWithSnapshotFeature.DestroyDstSubtree(bsps, dir, snapshot, prior, collectedBlocks , removedINodes); } catch (QuotaExceededException e) { Log.Error("should not exceed quota while snapshot deletion", e); } } } } }
/// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> internal virtual void RestoreDst(BlockStoragePolicySuite bsps) { Preconditions.CheckState(oldDstChild != null); INodeDirectory dstParent = dstParentIIP.GetLastINode().AsDirectory(); if (dstParent.IsWithSnapshot()) { dstParent.UndoRename4DstParent(bsps, oldDstChild, dstIIP.GetLatestSnapshotId()); } else { fsd.AddLastINodeNoQuotaCheck(dstParentIIP, oldDstChild); } if (oldDstChild != null && oldDstChild.IsReference()) { INodeReference removedDstRef = oldDstChild.AsReference(); INodeReference.WithCount wc = (INodeReference.WithCount)removedDstRef.GetReferredINode ().AsReference(); wc.AddReference(removedDstRef); } }
/// <summary>Retrieve existing INodes from a path.</summary> /// <remarks> /// Retrieve existing INodes from a path. For non-snapshot path, /// the number of INodes is equal to the number of path components. For /// snapshot path (e.g., /foo/.snapshot/s1/bar), the number of INodes is /// (number_of_path_components - 1). /// An UnresolvedPathException is always thrown when an intermediate path /// component refers to a symbolic link. If the final path component refers /// to a symbolic link then an UnresolvedPathException is only thrown if /// resolveLink is true. /// <p> /// Example: <br /> /// Given the path /c1/c2/c3 where only /c1/c2 exists, resulting in the /// following path components: ["","c1","c2","c3"] /// <p> /// <code>getExistingPathINodes(["","c1","c2"])</code> should fill /// the array with [rootINode,c1,c2], <br /> /// <code>getExistingPathINodes(["","c1","c2","c3"])</code> should /// fill the array with [rootINode,c1,c2,null] /// </remarks> /// <param name="startingDir">the starting directory</param> /// <param name="components">array of path component name</param> /// <param name="resolveLink"> /// indicates whether UnresolvedLinkException should /// be thrown when the path refers to a symbolic link. /// </param> /// <returns>the specified number of existing INodes in the path</returns> /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> internal static Org.Apache.Hadoop.Hdfs.Server.Namenode.INodesInPath Resolve(INodeDirectory startingDir, byte[][] components, bool resolveLink) { Preconditions.CheckArgument(startingDir.CompareTo(components[0]) == 0); INode curNode = startingDir; int count = 0; int inodeNum = 0; INode[] inodes = new INode[components.Length]; bool isSnapshot = false; int snapshotId = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId; while (count < components.Length && curNode != null) { bool lastComp = (count == components.Length - 1); inodes[inodeNum++] = curNode; bool isRef = curNode.IsReference(); bool isDir = curNode.IsDirectory(); INodeDirectory dir = isDir ? curNode.AsDirectory() : null; if (!isRef && isDir && dir.IsWithSnapshot()) { //if the path is a non-snapshot path, update the latest snapshot. if (!isSnapshot && ShouldUpdateLatestId(dir.GetDirectoryWithSnapshotFeature().GetLastSnapshotId (), snapshotId)) { snapshotId = dir.GetDirectoryWithSnapshotFeature().GetLastSnapshotId(); } } else { if (isRef && isDir && !lastComp) { // If the curNode is a reference node, need to check its dstSnapshot: // 1. if the existing snapshot is no later than the dstSnapshot (which // is the latest snapshot in dst before the rename), the changes // should be recorded in previous snapshots (belonging to src). // 2. however, if the ref node is already the last component, we still // need to know the latest snapshot among the ref node's ancestors, // in case of processing a deletion operation. Thus we do not overwrite // the latest snapshot if lastComp is true. In case of the operation is // a modification operation, we do a similar check in corresponding // recordModification method. if (!isSnapshot) { int dstSnapshotId = curNode.AsReference().GetDstSnapshotId(); if (snapshotId == Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId || (dstSnapshotId != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId && dstSnapshotId >= snapshotId)) { // no snapshot in dst tree of rename // the above scenario int lastSnapshot = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId; DirectoryWithSnapshotFeature sf; if (curNode.IsDirectory() && (sf = curNode.AsDirectory().GetDirectoryWithSnapshotFeature ()) != null) { lastSnapshot = sf.GetLastSnapshotId(); } snapshotId = lastSnapshot; } } } } if (curNode.IsSymlink() && (!lastComp || resolveLink)) { string path = ConstructPath(components, 0, components.Length); string preceding = ConstructPath(components, 0, count); string remainder = ConstructPath(components, count + 1, components.Length); string link = DFSUtil.Bytes2String(components[count]); string target = curNode.AsSymlink().GetSymlinkString(); if (Log.IsDebugEnabled()) { Log.Debug("UnresolvedPathException " + " path: " + path + " preceding: " + preceding + " count: " + count + " link: " + link + " target: " + target + " remainder: " + remainder); } throw new UnresolvedPathException(path, preceding, remainder, target); } if (lastComp || !isDir) { break; } byte[] childName = components[count + 1]; // check if the next byte[] in components is for ".snapshot" if (IsDotSnapshotDir(childName) && dir.IsSnapshottable()) { // skip the ".snapshot" in components count++; isSnapshot = true; // check if ".snapshot" is the last element of components if (count == components.Length - 1) { break; } // Resolve snapshot root Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s = dir.GetSnapshot(components [count + 1]); if (s == null) { curNode = null; } else { // snapshot not found curNode = s.GetRoot(); snapshotId = s.GetId(); } } else { // normal case, and also for resolving file/dir under snapshot root curNode = dir.GetChild(childName, isSnapshot ? snapshotId : Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId); } count++; } if (isSnapshot && !IsDotSnapshotDir(components[components.Length - 1])) { // for snapshot path shrink the inode array. however, for path ending with // .snapshot, still keep last the null inode in the array INode[] newNodes = new INode[components.Length - 1]; System.Array.Copy(inodes, 0, newNodes, 0, newNodes.Length); inodes = newNodes; } return(new Org.Apache.Hadoop.Hdfs.Server.Namenode.INodesInPath(inodes, components , isSnapshot, snapshotId)); }