/// <summary> /// Compute /// <see cref="Org.Apache.Hadoop.FS.ContentSummary"/> /// . /// </summary> public ContentSummary ComputeAndConvertContentSummary(ContentSummaryComputationContext summary) { ContentCounts counts = ComputeContentSummary(summary).GetCounts(); QuotaCounts q = GetQuotaCounts(); return(new ContentSummary.Builder().Length(counts.GetLength()).FileCount(counts.GetFileCount () + counts.GetSymlinkCount()).DirectoryCount(counts.GetDirectoryCount()).Quota( q.GetNameSpace()).SpaceConsumed(counts.GetStoragespace()).SpaceQuota(q.GetStorageSpace ()).TypeConsumed(counts.GetTypeSpaces()).TypeQuota(q.GetTypeSpaces().AsArray()). Build()); }
/// <summary> /// Relinquish locks held during computation for a short while /// and reacquire them. /// </summary> /// <remarks> /// Relinquish locks held during computation for a short while /// and reacquire them. This will give other threads a chance /// to acquire the contended locks and run. /// </remarks> /// <returns>true if locks were released and reacquired.</returns> public virtual bool Yield() { // Are we set up to do this? if (limitPerRun <= 0 || dir == null || fsn == null) { return(false); } // Have we reached the limit? long currentCount = counts.GetFileCount() + counts.GetSymlinkCount() + counts.GetDirectoryCount () + counts.GetSnapshotableDirectoryCount(); if (currentCount <= nextCountLimit) { return(false); } // Update the next limit nextCountLimit = currentCount + limitPerRun; bool hadDirReadLock = dir.HasReadLock(); bool hadDirWriteLock = dir.HasWriteLock(); bool hadFsnReadLock = fsn.HasReadLock(); bool hadFsnWriteLock = fsn.HasWriteLock(); // sanity check. if (!hadDirReadLock || !hadFsnReadLock || hadDirWriteLock || hadFsnWriteLock || dir .GetReadHoldCount() != 1 || fsn.GetReadHoldCount() != 1) { // cannot relinquish return(false); } // unlock dir.ReadUnlock(); fsn.ReadUnlock(); try { Sharpen.Thread.Sleep(sleepMilliSec, sleepNanoSec); } catch (Exception) { } finally { // reacquire fsn.ReadLock(); dir.ReadLock(); } yieldCount++; return(true); }