// do not store locations of last block /// <summary> /// Serialize a /// <see cref="INodeFile"/> /// node /// </summary> /// <param name="node">The node to write</param> /// <param name="out"> /// The /// <see cref="System.IO.DataOutputStream"/> /// where the fields are written /// </param> /// <param name="writeBlock">Whether to write block information</param> /// <exception cref="System.IO.IOException"/> public static void WriteINodeFile(INodeFile file, DataOutput @out, bool writeUnderConstruction ) { WriteLocalName(file, @out); @out.WriteLong(file.GetId()); @out.WriteShort(file.GetFileReplication()); @out.WriteLong(file.GetModificationTime()); @out.WriteLong(file.GetAccessTime()); @out.WriteLong(file.GetPreferredBlockSize()); WriteBlocks(file.GetBlocks(), @out); SnapshotFSImageFormat.SaveFileDiffList(file, @out); if (writeUnderConstruction) { if (file.IsUnderConstruction()) { @out.WriteBoolean(true); FileUnderConstructionFeature uc = file.GetFileUnderConstructionFeature(); WriteString(uc.GetClientName(), @out); WriteString(uc.GetClientMachine(), @out); } else { @out.WriteBoolean(false); } } WritePermissionStatus(file, @out); }
/// <summary>Get the list of inodes corresponding to valid leases.</summary> /// <returns>list of inodes</returns> internal virtual IDictionary <string, INodeFile> GetINodesUnderConstruction() { IDictionary <string, INodeFile> inodes = new SortedDictionary <string, INodeFile>(); foreach (string p in sortedLeasesByPath.Keys) { // verify that path exists in namespace try { INodeFile node = INodeFile.ValueOf(fsnamesystem.dir.GetINode(p), p); if (node.IsUnderConstruction()) { inodes[p] = node; } else { Log.Warn("Ignore the lease of file " + p + " for checkpoint since the file is not under construction" ); } } catch (IOException ioe) { Log.Error(ioe); } } return(inodes); }
/// <summary>Create FileStatus with location info by file INode</summary> /// <exception cref="System.IO.IOException"/> private static HdfsLocatedFileStatus CreateLocatedFileStatus(FSDirectory fsd, string fullPath, byte[] path, INode node, byte storagePolicy, int snapshot, bool isRawPath , INodesInPath iip) { System.Diagnostics.Debug.Assert(fsd.HasReadLock()); long size = 0; // length is zero for directories short replication = 0; long blocksize = 0; LocatedBlocks loc = null; bool isEncrypted; FileEncryptionInfo feInfo = isRawPath ? null : fsd.GetFileEncryptionInfo(node, snapshot , iip); if (node.IsFile()) { INodeFile fileNode = node.AsFile(); size = fileNode.ComputeFileSize(snapshot); replication = fileNode.GetFileReplication(snapshot); blocksize = fileNode.GetPreferredBlockSize(); bool inSnapshot = snapshot != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId; bool isUc = !inSnapshot && fileNode.IsUnderConstruction(); long fileSize = !inSnapshot && isUc?fileNode.ComputeFileSizeNotIncludingLastUcBlock () : size; loc = fsd.GetFSNamesystem().GetBlockManager().CreateLocatedBlocks(fileNode.GetBlocks (snapshot), fileSize, isUc, 0L, size, false, inSnapshot, feInfo); if (loc == null) { loc = new LocatedBlocks(); } isEncrypted = (feInfo != null) || (isRawPath && fsd.IsInAnEZ(INodesInPath.FromINode (node))); } else { isEncrypted = fsd.IsInAnEZ(INodesInPath.FromINode(node)); } int childrenNum = node.IsDirectory() ? node.AsDirectory().GetChildrenNum(snapshot ) : 0; INodeAttributes nodeAttrs = fsd.GetAttributes(fullPath, path, node, snapshot); HdfsLocatedFileStatus status = new HdfsLocatedFileStatus(size, node.IsDirectory() , replication, blocksize, node.GetModificationTime(snapshot), node.GetAccessTime (snapshot), GetPermissionForFileStatus(nodeAttrs, isEncrypted), nodeAttrs.GetUserName (), nodeAttrs.GetGroupName(), node.IsSymlink() ? node.AsSymlink().GetSymlink() : null, path, node.GetId(), loc, childrenNum, feInfo, storagePolicy); // Set caching information for the located blocks. if (loc != null) { CacheManager cacheManager = fsd.GetFSNamesystem().GetCacheManager(); foreach (LocatedBlock lb in loc.GetLocatedBlocks()) { cacheManager.SetCachedLocations(lb); } } return(status); }
/// <exception cref="System.IO.IOException"/> private void TestPersistHelper(Configuration conf) { MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).Build(); cluster.WaitActive(); FSNamesystem fsn = cluster.GetNamesystem(); DistributedFileSystem fs = cluster.GetFileSystem(); Path dir = new Path("/abc/def"); Path file1 = new Path(dir, "f1"); Path file2 = new Path(dir, "f2"); // create an empty file f1 fs.Create(file1).Close(); // create an under-construction file f2 FSDataOutputStream @out = fs.Create(file2); @out.WriteBytes("hello"); ((DFSOutputStream)@out.GetWrappedStream()).Hsync(EnumSet.Of(HdfsDataOutputStream.SyncFlag .UpdateLength)); // checkpoint fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeEnter); fs.SaveNamespace(); fs.SetSafeMode(HdfsConstants.SafeModeAction.SafemodeLeave); cluster.RestartNameNode(); cluster.WaitActive(); fs = cluster.GetFileSystem(); NUnit.Framework.Assert.IsTrue(fs.IsDirectory(dir)); NUnit.Framework.Assert.IsTrue(fs.Exists(file1)); NUnit.Framework.Assert.IsTrue(fs.Exists(file2)); // check internals of file2 INodeFile file2Node = fsn.dir.GetINode4Write(file2.ToString()).AsFile(); NUnit.Framework.Assert.AreEqual("hello".Length, file2Node.ComputeFileSize()); NUnit.Framework.Assert.IsTrue(file2Node.IsUnderConstruction()); BlockInfoContiguous[] blks = file2Node.GetBlocks(); NUnit.Framework.Assert.AreEqual(1, blks.Length); NUnit.Framework.Assert.AreEqual(HdfsServerConstants.BlockUCState.UnderConstruction , blks[0].GetBlockUCState()); // check lease manager LeaseManager.Lease lease = fsn.leaseManager.GetLeaseByPath(file2.ToString()); NUnit.Framework.Assert.IsNotNull(lease); } finally { if (cluster != null) { cluster.Shutdown(); } } }
/// <exception cref="System.IO.IOException"/> private void VerifyFileBlocks(string file, bool isFileOpen) { FSNamesystem ns = cluster.GetNamesystem(); INodeFile inode = INodeFile.ValueOf(ns.dir.GetINode(file), file); NUnit.Framework.Assert.IsTrue("File " + inode.ToString() + " isUnderConstruction = " + inode.IsUnderConstruction() + " expected to be " + isFileOpen, inode.IsUnderConstruction () == isFileOpen); BlockInfoContiguous[] blocks = inode.GetBlocks(); NUnit.Framework.Assert.IsTrue("File does not have blocks: " + inode.ToString(), blocks != null && blocks.Length > 0); int idx = 0; BlockInfoContiguous curBlock; // all blocks but the last two should be regular blocks for (; idx < blocks.Length - 2; idx++) { curBlock = blocks[idx]; NUnit.Framework.Assert.IsTrue("Block is not complete: " + curBlock, curBlock.IsComplete ()); NUnit.Framework.Assert.IsTrue("Block is not in BlocksMap: " + curBlock, ns.GetBlockManager ().GetStoredBlock(curBlock) == curBlock); } // the penultimate block is either complete or // committed if the file is not closed if (idx > 0) { curBlock = blocks[idx - 1]; // penultimate block NUnit.Framework.Assert.IsTrue("Block " + curBlock + " isUnderConstruction = " + inode .IsUnderConstruction() + " expected to be " + isFileOpen, (isFileOpen && curBlock .IsComplete()) || (!isFileOpen && !curBlock.IsComplete() == (curBlock.GetBlockUCState () == HdfsServerConstants.BlockUCState.Committed))); NUnit.Framework.Assert.IsTrue("Block is not in BlocksMap: " + curBlock, ns.GetBlockManager ().GetStoredBlock(curBlock) == curBlock); } // The last block is complete if the file is closed. // If the file is open, the last block may be complete or not. curBlock = blocks[idx]; // last block if (!isFileOpen) { NUnit.Framework.Assert.IsTrue("Block " + curBlock + ", isFileOpen = " + isFileOpen , curBlock.IsComplete()); } NUnit.Framework.Assert.IsTrue("Block is not in BlocksMap: " + curBlock, ns.GetBlockManager ().GetStoredBlock(curBlock) == curBlock); }
/// <exception cref="System.IO.IOException"/> private static void VerifyTargetFile(FSDirectory fsd, string target, INodesInPath targetIIP) { // check the target if (fsd.GetEZForPath(targetIIP) != null) { throw new HadoopIllegalArgumentException("concat can not be called for files in an encryption zone." ); } INodeFile targetINode = INodeFile.ValueOf(targetIIP.GetLastINode(), target); if (targetINode.IsUnderConstruction()) { throw new HadoopIllegalArgumentException("concat: target file " + target + " is under construction" ); } }
/// <summary> /// Test append over a specific type of storage quota does not mark file as /// UC or create a lease /// </summary> /// <exception cref="System.Exception"/> public virtual void TestAppendOverTypeQuota() { Path dir = new Path("/TestAppendOverTypeQuota"); Path file = new Path(dir, "file"); // create partial block file dfs.Mkdirs(dir); // set the storage policy on dir dfs.SetStoragePolicy(dir, HdfsConstants.OnessdStoragePolicyName); DFSTestUtil.CreateFile(dfs, file, Blocksize / 2, Replication, seed); // set quota of SSD to 1L dfs.SetQuotaByStorageType(dir, StorageType.Ssd, 1L); INodeDirectory dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory(); long spaceUsed = dirNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed().GetStorageSpace (); try { DFSTestUtil.AppendFile(dfs, file, Blocksize); NUnit.Framework.Assert.Fail("append didn't fail"); } catch (RemoteException e) { NUnit.Framework.Assert.IsTrue(e.GetClassName().Contains("QuotaByStorageTypeExceededException" )); } // check that the file exists, isn't UC, and has no dangling lease INodeFile inode = fsdir.GetINode(file.ToString()).AsFile(); NUnit.Framework.Assert.IsNotNull(inode); NUnit.Framework.Assert.IsFalse("should not be UC", inode.IsUnderConstruction()); NUnit.Framework.Assert.IsNull("should not have a lease", cluster.GetNamesystem(). GetLeaseManager().GetLeaseByPath(file.ToString())); // make sure the quota usage is unchanged long newSpaceUsed = dirNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed().GetStorageSpace (); NUnit.Framework.Assert.AreEqual(spaceUsed, newSpaceUsed); // make sure edits aren't corrupted dfs.RecoverLease(file); cluster.RestartNameNodes(); }
/// <summary>Test truncate over quota does not mark file as UC or create a lease</summary> /// <exception cref="System.Exception"/> public virtual void TestTruncateOverQuota() { Path dir = new Path("/TestTruncateOverquota"); Path file = new Path(dir, "file"); // create partial block file dfs.Mkdirs(dir); DFSTestUtil.CreateFile(dfs, file, Blocksize / 2, Replication, seed); // lower quota to cause exception when appending to partial block dfs.SetQuota(dir, long.MaxValue - 1, 1); INodeDirectory dirNode = fsdir.GetINode4Write(dir.ToString()).AsDirectory(); long spaceUsed = dirNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed().GetStorageSpace (); try { dfs.Truncate(file, Blocksize / 2 - 1); NUnit.Framework.Assert.Fail("truncate didn't fail"); } catch (RemoteException e) { NUnit.Framework.Assert.IsTrue(e.GetClassName().Contains("DSQuotaExceededException" )); } // check that the file exists, isn't UC, and has no dangling lease INodeFile inode = fsdir.GetINode(file.ToString()).AsFile(); NUnit.Framework.Assert.IsNotNull(inode); NUnit.Framework.Assert.IsFalse("should not be UC", inode.IsUnderConstruction()); NUnit.Framework.Assert.IsNull("should not have a lease", cluster.GetNamesystem(). GetLeaseManager().GetLeaseByPath(file.ToString())); // make sure the quota usage is unchanged long newSpaceUsed = dirNode.GetDirectoryWithQuotaFeature().GetSpaceConsumed().GetStorageSpace (); NUnit.Framework.Assert.AreEqual(spaceUsed, newSpaceUsed); // make sure edits aren't corrupted dfs.RecoverLease(file); cluster.RestartNameNodes(); }
/// <exception cref="System.IO.IOException"/> private static INodeFile[] VerifySrcFiles(FSDirectory fsd, string[] srcs, INodesInPath targetIIP, FSPermissionChecker pc) { // to make sure no two files are the same ICollection <INodeFile> si = new LinkedHashSet <INodeFile>(); INodeFile targetINode = targetIIP.GetLastINode().AsFile(); INodeDirectory targetParent = targetINode.GetParent(); // now check the srcs foreach (string src in srcs) { INodesInPath iip = fsd.GetINodesInPath4Write(src); // permission check for srcs if (pc != null) { fsd.CheckPathAccess(pc, iip, FsAction.Read); // read the file fsd.CheckParentAccess(pc, iip, FsAction.Write); } // for delete INode srcINode = iip.GetLastINode(); INodeFile srcINodeFile = INodeFile.ValueOf(srcINode, src); // make sure the src file and the target file are in the same dir if (srcINodeFile.GetParent() != targetParent) { throw new HadoopIllegalArgumentException("Source file " + src + " is not in the same directory with the target " + targetIIP.GetPath()); } // make sure all the source files are not in snapshot if (srcINode.IsInLatestSnapshot(iip.GetLatestSnapshotId())) { throw new SnapshotException("Concat: the source file " + src + " is in snapshot"); } // check if the file has other references. if (srcINode.IsReference() && ((INodeReference.WithCount)srcINode.AsReference().GetReferredINode ()).GetReferenceCount() > 1) { throw new SnapshotException("Concat: the source file " + src + " is referred by some other reference in some snapshot." ); } // source file cannot be the same with the target file if (srcINode == targetINode) { throw new HadoopIllegalArgumentException("concat: the src file " + src + " is the same with the target file " + targetIIP.GetPath()); } // source file cannot be under construction or empty if (srcINodeFile.IsUnderConstruction() || srcINodeFile.NumBlocks() == 0) { throw new HadoopIllegalArgumentException("concat: source file " + src + " is invalid or empty or underConstruction" ); } // source file's preferred block size cannot be greater than the target // file if (srcINodeFile.GetPreferredBlockSize() > targetINode.GetPreferredBlockSize()) { throw new HadoopIllegalArgumentException("concat: source file " + src + " has preferred block size " + srcINodeFile.GetPreferredBlockSize() + " which is greater than the target file's preferred block size " + targetINode.GetPreferredBlockSize()); } si.AddItem(srcINodeFile); } // make sure no two files are the same if (si.Count < srcs.Length) { // it means at least two files are the same throw new HadoopIllegalArgumentException("concat: at least two of the source files are the same" ); } return(Sharpen.Collections.ToArray(si, new INodeFile[si.Count])); }