private INodeFile LoadINodeFile(FsImageProto.INodeSection.INode n) { System.Diagnostics.Debug.Assert(n.GetType() == FsImageProto.INodeSection.INode.Type .File); FsImageProto.INodeSection.INodeFile f = n.GetFile(); IList <HdfsProtos.BlockProto> bp = f.GetBlocksList(); short replication = (short)f.GetReplication(); FSImageFormatProtobuf.LoaderContext state = parent.GetLoaderContext(); BlockInfoContiguous[] blocks = new BlockInfoContiguous[bp.Count]; for (int i = 0; i < e; ++i) { blocks[i] = new BlockInfoContiguous(PBHelper.Convert(bp[i]), replication); } PermissionStatus permissions = LoadPermission(f.GetPermission(), parent.GetLoaderContext ().GetStringTable()); INodeFile file = new INodeFile(n.GetId(), n.GetName().ToByteArray(), permissions, f.GetModificationTime(), f.GetAccessTime(), blocks, replication, f.GetPreferredBlockSize (), unchecked ((byte)f.GetStoragePolicyID())); if (f.HasAcl()) { int[] entries = AclEntryStatusFormat.ToInt(LoadAclEntries(f.GetAcl(), state.GetStringTable ())); file.AddAclFeature(new AclFeature(entries)); } if (f.HasXAttrs()) { file.AddXAttrFeature(new XAttrFeature(LoadXAttrs(f.GetXAttrs(), state.GetStringTable ()))); } // under-construction information if (f.HasFileUC()) { FsImageProto.INodeSection.FileUnderConstructionFeature uc = f.GetFileUC(); file.ToUnderConstruction(uc.GetClientName(), uc.GetClientMachine()); if (blocks.Length > 0) { BlockInfoContiguous lastBlk = file.GetLastBlock(); // replace the last block of file file.SetBlock(file.NumBlocks() - 1, new BlockInfoContiguousUnderConstruction(lastBlk , replication)); } } return(file); }
/// <exception cref="System.IO.IOException"/> private static INodeFile[] VerifySrcFiles(FSDirectory fsd, string[] srcs, INodesInPath targetIIP, FSPermissionChecker pc) { // to make sure no two files are the same ICollection <INodeFile> si = new LinkedHashSet <INodeFile>(); INodeFile targetINode = targetIIP.GetLastINode().AsFile(); INodeDirectory targetParent = targetINode.GetParent(); // now check the srcs foreach (string src in srcs) { INodesInPath iip = fsd.GetINodesInPath4Write(src); // permission check for srcs if (pc != null) { fsd.CheckPathAccess(pc, iip, FsAction.Read); // read the file fsd.CheckParentAccess(pc, iip, FsAction.Write); } // for delete INode srcINode = iip.GetLastINode(); INodeFile srcINodeFile = INodeFile.ValueOf(srcINode, src); // make sure the src file and the target file are in the same dir if (srcINodeFile.GetParent() != targetParent) { throw new HadoopIllegalArgumentException("Source file " + src + " is not in the same directory with the target " + targetIIP.GetPath()); } // make sure all the source files are not in snapshot if (srcINode.IsInLatestSnapshot(iip.GetLatestSnapshotId())) { throw new SnapshotException("Concat: the source file " + src + " is in snapshot"); } // check if the file has other references. if (srcINode.IsReference() && ((INodeReference.WithCount)srcINode.AsReference().GetReferredINode ()).GetReferenceCount() > 1) { throw new SnapshotException("Concat: the source file " + src + " is referred by some other reference in some snapshot." ); } // source file cannot be the same with the target file if (srcINode == targetINode) { throw new HadoopIllegalArgumentException("concat: the src file " + src + " is the same with the target file " + targetIIP.GetPath()); } // source file cannot be under construction or empty if (srcINodeFile.IsUnderConstruction() || srcINodeFile.NumBlocks() == 0) { throw new HadoopIllegalArgumentException("concat: source file " + src + " is invalid or empty or underConstruction" ); } // source file's preferred block size cannot be greater than the target // file if (srcINodeFile.GetPreferredBlockSize() > targetINode.GetPreferredBlockSize()) { throw new HadoopIllegalArgumentException("concat: source file " + src + " has preferred block size " + srcINodeFile.GetPreferredBlockSize() + " which is greater than the target file's preferred block size " + targetINode.GetPreferredBlockSize()); } si.AddItem(srcINodeFile); } // make sure no two files are the same if (si.Count < srcs.Length) { // it means at least two files are the same throw new HadoopIllegalArgumentException("concat: at least two of the source files are the same" ); } return(Sharpen.Collections.ToArray(si, new INodeFile[si.Count])); }