/// <summary>Get a listing of all the snapshots of a snapshottable directory</summary> /// <exception cref="System.IO.IOException"/> private static DirectoryListing GetSnapshotsListing(FSDirectory fsd, string src, byte[] startAfter) { Preconditions.CheckState(fsd.HasReadLock()); Preconditions.CheckArgument(src.EndsWith(HdfsConstants.SeparatorDotSnapshotDir), "%s does not end with %s", src, HdfsConstants.SeparatorDotSnapshotDir); string dirPath = FSDirectory.NormalizePath(Sharpen.Runtime.Substring(src, 0, src. Length - HdfsConstants.DotSnapshotDir.Length)); INode node = fsd.GetINode(dirPath); INodeDirectory dirNode = INodeDirectory.ValueOf(node, dirPath); DirectorySnapshottableFeature sf = dirNode.GetDirectorySnapshottableFeature(); if (sf == null) { throw new SnapshotException("Directory is not a snapshottable directory: " + dirPath ); } ReadOnlyList <Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot> snapshots = sf.GetSnapshotList(); int skipSize = ReadOnlyList.Util.BinarySearch(snapshots, startAfter); skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1; int numOfListing = Math.Min(snapshots.Size() - skipSize, fsd.GetLsLimit()); HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing]; for (int i = 0; i < numOfListing; i++) { Snapshot.Root sRoot = snapshots.Get(i + skipSize).GetRoot(); listing[i] = CreateFileStatus(fsd, src, sRoot.GetLocalNameBytes(), sRoot, BlockStoragePolicySuite .IdUnspecified, Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot.CurrentStateId , false, INodesInPath.FromINode(sRoot)); } return(new DirectoryListing(listing, snapshots.Size() - skipSize - numOfListing)); }
/// <summary>Create FileStatus with location info by file INode</summary> /// <exception cref="System.IO.IOException"/> private static HdfsLocatedFileStatus CreateLocatedFileStatus(FSDirectory fsd, string fullPath, byte[] path, INode node, byte storagePolicy, int snapshot, bool isRawPath , INodesInPath iip) { System.Diagnostics.Debug.Assert(fsd.HasReadLock()); long size = 0; // length is zero for directories short replication = 0; long blocksize = 0; LocatedBlocks loc = null; bool isEncrypted; FileEncryptionInfo feInfo = isRawPath ? null : fsd.GetFileEncryptionInfo(node, snapshot , iip); if (node.IsFile()) { INodeFile fileNode = node.AsFile(); size = fileNode.ComputeFileSize(snapshot); replication = fileNode.GetFileReplication(snapshot); blocksize = fileNode.GetPreferredBlockSize(); bool inSnapshot = snapshot != Org.Apache.Hadoop.Hdfs.Server.Namenode.Snapshot.Snapshot .CurrentStateId; bool isUc = !inSnapshot && fileNode.IsUnderConstruction(); long fileSize = !inSnapshot && isUc?fileNode.ComputeFileSizeNotIncludingLastUcBlock () : size; loc = fsd.GetFSNamesystem().GetBlockManager().CreateLocatedBlocks(fileNode.GetBlocks (snapshot), fileSize, isUc, 0L, size, false, inSnapshot, feInfo); if (loc == null) { loc = new LocatedBlocks(); } isEncrypted = (feInfo != null) || (isRawPath && fsd.IsInAnEZ(INodesInPath.FromINode (node))); } else { isEncrypted = fsd.IsInAnEZ(INodesInPath.FromINode(node)); } int childrenNum = node.IsDirectory() ? node.AsDirectory().GetChildrenNum(snapshot ) : 0; INodeAttributes nodeAttrs = fsd.GetAttributes(fullPath, path, node, snapshot); HdfsLocatedFileStatus status = new HdfsLocatedFileStatus(size, node.IsDirectory() , replication, blocksize, node.GetModificationTime(snapshot), node.GetAccessTime (snapshot), GetPermissionForFileStatus(nodeAttrs, isEncrypted), nodeAttrs.GetUserName (), nodeAttrs.GetGroupName(), node.IsSymlink() ? node.AsSymlink().GetSymlink() : null, path, node.GetId(), loc, childrenNum, feInfo, storagePolicy); // Set caching information for the located blocks. if (loc != null) { CacheManager cacheManager = fsd.GetFSNamesystem().GetCacheManager(); foreach (LocatedBlock lb in loc.GetLocatedBlocks()) { cacheManager.SetCachedLocations(lb); } } return(status); }
/// <summary> /// Relinquish locks held during computation for a short while /// and reacquire them. /// </summary> /// <remarks> /// Relinquish locks held during computation for a short while /// and reacquire them. This will give other threads a chance /// to acquire the contended locks and run. /// </remarks> /// <returns>true if locks were released and reacquired.</returns> public virtual bool Yield() { // Are we set up to do this? if (limitPerRun <= 0 || dir == null || fsn == null) { return(false); } // Have we reached the limit? long currentCount = counts.GetFileCount() + counts.GetSymlinkCount() + counts.GetDirectoryCount () + counts.GetSnapshotableDirectoryCount(); if (currentCount <= nextCountLimit) { return(false); } // Update the next limit nextCountLimit = currentCount + limitPerRun; bool hadDirReadLock = dir.HasReadLock(); bool hadDirWriteLock = dir.HasWriteLock(); bool hadFsnReadLock = fsn.HasReadLock(); bool hadFsnWriteLock = fsn.HasWriteLock(); // sanity check. if (!hadDirReadLock || !hadFsnReadLock || hadDirWriteLock || hadFsnWriteLock || dir .GetReadHoldCount() != 1 || fsn.GetReadHoldCount() != 1) { // cannot relinquish return(false); } // unlock dir.ReadUnlock(); fsn.ReadUnlock(); try { Sharpen.Thread.Sleep(sleepMilliSec, sleepNanoSec); } catch (Exception) { } finally { // reacquire fsn.ReadLock(); dir.ReadLock(); } yieldCount++; return(true); }
/// <summary>Returns true if an IIP is within an encryption zone.</summary> /// <remarks> /// Returns true if an IIP is within an encryption zone. /// <p/> /// Called while holding the FSDirectory lock. /// </remarks> /// <exception cref="Org.Apache.Hadoop.FS.UnresolvedLinkException"/> /// <exception cref="Org.Apache.Hadoop.Hdfs.Protocol.SnapshotAccessControlException"/ /// > internal virtual bool IsInAnEZ(INodesInPath iip) { System.Diagnostics.Debug.Assert(dir.HasReadLock()); return(GetEncryptionZoneForPath(iip) != null); }