/// <summary>Replace the given child with a new child.</summary> /// <remarks> /// Replace the given child with a new child. Note that we no longer need to /// replace an normal INodeDirectory or INodeFile into an /// INodeDirectoryWithSnapshot or INodeFileUnderConstruction. The only cases /// for child replacement is for reference nodes. /// </remarks> public virtual void ReplaceChild(INode oldChild, INode newChild, INodeMap inodeMap ) { Preconditions.CheckNotNull(children); int i = SearchChildren(newChild.GetLocalNameBytes()); Preconditions.CheckState(i >= 0); Preconditions.CheckState(oldChild == children[i] || oldChild == children[i].AsReference ().GetReferredINode().AsReference().GetReferredINode()); oldChild = children[i]; if (oldChild.IsReference() && newChild.IsReference()) { // both are reference nodes, e.g., DstReference -> WithName INodeReference.WithCount withCount = (INodeReference.WithCount)oldChild.AsReference ().GetReferredINode(); withCount.RemoveReference(oldChild.AsReference()); } children.Set(i, newChild); // replace the instance in the created list of the diff list DirectoryWithSnapshotFeature sf = this.GetDirectoryWithSnapshotFeature(); if (sf != null) { sf.GetDiffs().ReplaceChild(Diff.ListType.Created, oldChild, newChild); } // update the inodeMap if (inodeMap != null) { inodeMap.Put(newChild); } }
/// <summary>Save one inode's attributes to the image.</summary> /// <exception cref="System.IO.IOException"/> public static void SaveINode2Image(INode node, DataOutput @out, bool writeUnderConstruction , SnapshotFSImageFormat.ReferenceMap referenceMap) { if (node.IsReference()) { WriteINodeReference(node.AsReference(), @out, writeUnderConstruction, referenceMap ); } else { if (node.IsDirectory()) { WriteINodeDirectory(node.AsDirectory(), @out); } else { if (node.IsSymlink()) { WriteINodeSymlink(node.AsSymlink(), @out); } else { if (node.IsFile()) { WriteINodeFile(node.AsFile(), @out, writeUnderConstruction); } } } } }
/// <summary>Try to remove the given reference and then return the reference count.</summary> /// <remarks> /// Try to remove the given reference and then return the reference count. /// If the given inode is not a reference, return -1; /// </remarks> public static int TryRemoveReference(INode inode) { if (!inode.IsReference()) { return(-1); } return(RemoveReference(inode.AsReference())); }
/// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> internal RenameOperation(FSDirectory fsd, string src, string dst, INodesInPath srcIIP , INodesInPath dstIIP) { this.fsd = fsd; this.src = src; this.dst = dst; this.srcIIP = srcIIP; this.dstIIP = dstIIP; this.srcParentIIP = srcIIP.GetParentINodesInPath(); this.dstParentIIP = dstIIP.GetParentINodesInPath(); BlockStoragePolicySuite bsps = fsd.GetBlockStoragePolicySuite(); srcChild = this.srcIIP.GetLastINode(); srcChildName = srcChild.GetLocalNameBytes(); int srcLatestSnapshotId = srcIIP.GetLatestSnapshotId(); isSrcInSnapshot = srcChild.IsInLatestSnapshot(srcLatestSnapshotId); srcChildIsReference = srcChild.IsReference(); srcParent = this.srcIIP.GetINode(-2).AsDirectory(); // Record the snapshot on srcChild. After the rename, before any new // snapshot is taken on the dst tree, changes will be recorded in the // latest snapshot of the src tree. if (isSrcInSnapshot) { srcChild.RecordModification(srcLatestSnapshotId); } // check srcChild for reference srcRefDstSnapshot = srcChildIsReference ? srcChild.AsReference().GetDstSnapshotId () : Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId; oldSrcCounts = new QuotaCounts.Builder().Build(); if (isSrcInSnapshot) { INodeReference.WithName withName = srcParent.ReplaceChild4ReferenceWithName(srcChild , srcLatestSnapshotId); withCount = (INodeReference.WithCount)withName.GetReferredINode(); srcChild = withName; this.srcIIP = INodesInPath.Replace(srcIIP, srcIIP.Length() - 1, srcChild); // get the counts before rename withCount.GetReferredINode().ComputeQuotaUsage(bsps, oldSrcCounts, true); } else { if (srcChildIsReference) { // srcChild is reference but srcChild is not in latest snapshot withCount = (INodeReference.WithCount)srcChild.AsReference().GetReferredINode(); } else { withCount = null; } } }
/// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> internal virtual void RestoreSource() { // Rename failed - restore src INode oldSrcChild = srcChild; // put it back if (withCount == null) { srcChild.SetLocalName(srcChildName); } else { if (!srcChildIsReference) { // src must be in snapshot // the withCount node will no longer be used thus no need to update // its reference number here srcChild = withCount.GetReferredINode(); srcChild.SetLocalName(srcChildName); } else { withCount.RemoveReference(oldSrcChild.AsReference()); srcChild = new INodeReference.DstReference(srcParent, withCount, srcRefDstSnapshot ); withCount.GetReferredINode().SetLocalName(srcChildName); } } if (isSrcInSnapshot) { srcParent.UndoRename4ScrParent(oldSrcChild.AsReference(), srcChild); } else { // srcParent is not an INodeDirectoryWithSnapshot, we only need to add // the srcChild back fsd.AddLastINodeNoQuotaCheck(srcParentIIP, srcChild); } }
/// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.QuotaExceededException"/> internal virtual void RestoreDst(BlockStoragePolicySuite bsps) { Preconditions.CheckState(oldDstChild != null); INodeDirectory dstParent = dstParentIIP.GetLastINode().AsDirectory(); if (dstParent.IsWithSnapshot()) { dstParent.UndoRename4DstParent(bsps, oldDstChild, dstIIP.GetLatestSnapshotId()); } else { fsd.AddLastINodeNoQuotaCheck(dstParentIIP, oldDstChild); } if (oldDstChild != null && oldDstChild.IsReference()) { INodeReference removedDstRef = oldDstChild.AsReference(); INodeReference.WithCount wc = (INodeReference.WithCount)removedDstRef.GetReferredINode ().AsReference(); wc.AddReference(removedDstRef); } }
internal virtual INodeReference.WithName ReplaceChild4ReferenceWithName(INode oldChild , int latestSnapshotId) { Preconditions.CheckArgument(latestSnapshotId != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId); if (oldChild is INodeReference.WithName) { return((INodeReference.WithName)oldChild); } INodeReference.WithCount withCount; if (oldChild.IsReference()) { Preconditions.CheckState(oldChild is INodeReference.DstReference); withCount = (INodeReference.WithCount)oldChild.AsReference().GetReferredINode(); } else { withCount = new INodeReference.WithCount(null, oldChild); } INodeReference.WithName @ref = new INodeReference.WithName(this, withCount, oldChild .GetLocalNameBytes(), latestSnapshotId); ReplaceChild(oldChild, @ref, null); return(@ref); }
/// <summary>Retrieve existing INodes from a path.</summary> /// <remarks> /// Retrieve existing INodes from a path. For non-snapshot path, /// the number of INodes is equal to the number of path components. For /// snapshot path (e.g., /foo/.snapshot/s1/bar), the number of INodes is /// (number_of_path_components - 1). /// An UnresolvedPathException is always thrown when an intermediate path /// component refers to a symbolic link. If the final path component refers /// to a symbolic link then an UnresolvedPathException is only thrown if /// resolveLink is true. /// <p> /// Example: <br /> /// Given the path /c1/c2/c3 where only /c1/c2 exists, resulting in the /// following path components: ["","c1","c2","c3"] /// <p> /// <code>getExistingPathINodes(["","c1","c2"])</code> should fill /// the array with [rootINode,c1,c2], <br /> /// <code>getExistingPathINodes(["","c1","c2","c3"])</code> should /// fill the array with [rootINode,c1,c2,null] /// </remarks> /// <param name="startingDir">the starting directory</param> /// <param name="components">array of path component name</param> /// <param name="resolveLink"> /// indicates whether UnresolvedLinkException should /// be thrown when the path refers to a symbolic link. /// </param> /// <returns>the specified number of existing INodes in the path</returns> /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> internal static Org.Apache.Hadoop.Hdfs.Server.Namenode.INodesInPath Resolve(INodeDirectory startingDir, byte[][] components, bool resolveLink) { Preconditions.CheckArgument(startingDir.CompareTo(components[0]) == 0); INode curNode = startingDir; int count = 0; int inodeNum = 0; INode[] inodes = new INode[components.Length]; bool isSnapshot = false; int snapshotId = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId; while (count < components.Length && curNode != null) { bool lastComp = (count == components.Length - 1); inodes[inodeNum++] = curNode; bool isRef = curNode.IsReference(); bool isDir = curNode.IsDirectory(); INodeDirectory dir = isDir ? curNode.AsDirectory() : null; if (!isRef && isDir && dir.IsWithSnapshot()) { //if the path is a non-snapshot path, update the latest snapshot. if (!isSnapshot && ShouldUpdateLatestId(dir.GetDirectoryWithSnapshotFeature().GetLastSnapshotId (), snapshotId)) { snapshotId = dir.GetDirectoryWithSnapshotFeature().GetLastSnapshotId(); } } else { if (isRef && isDir && !lastComp) { // If the curNode is a reference node, need to check its dstSnapshot: // 1. if the existing snapshot is no later than the dstSnapshot (which // is the latest snapshot in dst before the rename), the changes // should be recorded in previous snapshots (belonging to src). // 2. however, if the ref node is already the last component, we still // need to know the latest snapshot among the ref node's ancestors, // in case of processing a deletion operation. Thus we do not overwrite // the latest snapshot if lastComp is true. In case of the operation is // a modification operation, we do a similar check in corresponding // recordModification method. if (!isSnapshot) { int dstSnapshotId = curNode.AsReference().GetDstSnapshotId(); if (snapshotId == Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId || (dstSnapshotId != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId && dstSnapshotId >= snapshotId)) { // no snapshot in dst tree of rename // the above scenario int lastSnapshot = Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId; DirectoryWithSnapshotFeature sf; if (curNode.IsDirectory() && (sf = curNode.AsDirectory().GetDirectoryWithSnapshotFeature ()) != null) { lastSnapshot = sf.GetLastSnapshotId(); } snapshotId = lastSnapshot; } } } } if (curNode.IsSymlink() && (!lastComp || resolveLink)) { string path = ConstructPath(components, 0, components.Length); string preceding = ConstructPath(components, 0, count); string remainder = ConstructPath(components, count + 1, components.Length); string link = DFSUtil.Bytes2String(components[count]); string target = curNode.AsSymlink().GetSymlinkString(); if (Log.IsDebugEnabled()) { Log.Debug("UnresolvedPathException " + " path: " + path + " preceding: " + preceding + " count: " + count + " link: " + link + " target: " + target + " remainder: " + remainder); } throw new UnresolvedPathException(path, preceding, remainder, target); } if (lastComp || !isDir) { break; } byte[] childName = components[count + 1]; // check if the next byte[] in components is for ".snapshot" if (IsDotSnapshotDir(childName) && dir.IsSnapshottable()) { // skip the ".snapshot" in components count++; isSnapshot = true; // check if ".snapshot" is the last element of components if (count == components.Length - 1) { break; } // Resolve snapshot root Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot s = dir.GetSnapshot(components [count + 1]); if (s == null) { curNode = null; } else { // snapshot not found curNode = s.GetRoot(); snapshotId = s.GetId(); } } else { // normal case, and also for resolving file/dir under snapshot root curNode = dir.GetChild(childName, isSnapshot ? snapshotId : Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId); } count++; } if (isSnapshot && !IsDotSnapshotDir(components[components.Length - 1])) { // for snapshot path shrink the inode array. however, for path ending with // .snapshot, still keep last the null inode in the array INode[] newNodes = new INode[components.Length - 1]; System.Array.Copy(inodes, 0, newNodes, 0, newNodes.Length); inodes = newNodes; } return(new Org.Apache.Hadoop.Hdfs.Server.Namenode.INodesInPath(inodes, components , isSnapshot, snapshotId)); }
/// <exception cref="System.IO.IOException"/> private static INodeFile[] VerifySrcFiles(FSDirectory fsd, string[] srcs, INodesInPath targetIIP, FSPermissionChecker pc) { // to make sure no two files are the same ICollection <INodeFile> si = new LinkedHashSet <INodeFile>(); INodeFile targetINode = targetIIP.GetLastINode().AsFile(); INodeDirectory targetParent = targetINode.GetParent(); // now check the srcs foreach (string src in srcs) { INodesInPath iip = fsd.GetINodesInPath4Write(src); // permission check for srcs if (pc != null) { fsd.CheckPathAccess(pc, iip, FsAction.Read); // read the file fsd.CheckParentAccess(pc, iip, FsAction.Write); } // for delete INode srcINode = iip.GetLastINode(); INodeFile srcINodeFile = INodeFile.ValueOf(srcINode, src); // make sure the src file and the target file are in the same dir if (srcINodeFile.GetParent() != targetParent) { throw new HadoopIllegalArgumentException("Source file " + src + " is not in the same directory with the target " + targetIIP.GetPath()); } // make sure all the source files are not in snapshot if (srcINode.IsInLatestSnapshot(iip.GetLatestSnapshotId())) { throw new SnapshotException("Concat: the source file " + src + " is in snapshot"); } // check if the file has other references. if (srcINode.IsReference() && ((INodeReference.WithCount)srcINode.AsReference().GetReferredINode ()).GetReferenceCount() > 1) { throw new SnapshotException("Concat: the source file " + src + " is referred by some other reference in some snapshot." ); } // source file cannot be the same with the target file if (srcINode == targetINode) { throw new HadoopIllegalArgumentException("concat: the src file " + src + " is the same with the target file " + targetIIP.GetPath()); } // source file cannot be under construction or empty if (srcINodeFile.IsUnderConstruction() || srcINodeFile.NumBlocks() == 0) { throw new HadoopIllegalArgumentException("concat: source file " + src + " is invalid or empty or underConstruction" ); } // source file's preferred block size cannot be greater than the target // file if (srcINodeFile.GetPreferredBlockSize() > targetINode.GetPreferredBlockSize()) { throw new HadoopIllegalArgumentException("concat: source file " + src + " has preferred block size " + srcINodeFile.GetPreferredBlockSize() + " which is greater than the target file's preferred block size " + targetINode.GetPreferredBlockSize()); } si.AddItem(srcINodeFile); } // make sure no two files are the same if (si.Count < srcs.Length) { // it means at least two files are the same throw new HadoopIllegalArgumentException("concat: at least two of the source files are the same" ); } return(Sharpen.Collections.ToArray(si, new INodeFile[si.Count])); }