internal Lucene40DocValuesReader(SegmentReadState state, string filename, string legacyKey) { this.state = state; this.legacyKey = legacyKey; this.dir = new CompoundFileDirectory(state.Directory, filename, state.Context, false); ramBytesUsed = new AtomicInt64(RamUsageEstimator.ShallowSizeOf(this.GetType())); }
public override long GetSizeInBytes() { long mem = RamUsageEstimator.ShallowSizeOf(this); try { if (m_searcherMgr != null) { IndexSearcher searcher = m_searcherMgr.Acquire(); try { foreach (AtomicReaderContext context in searcher.IndexReader.Leaves) { AtomicReader reader = FilterAtomicReader.Unwrap(context.AtomicReader); if (reader is SegmentReader) { mem += ((SegmentReader)context.Reader).RamBytesUsed(); } } } finally { m_searcherMgr.Release(searcher); } } return(mem); } catch (IOException ioe) { throw new Exception(ioe.ToString(), ioe); } }
public long RamBytesUsed() { long mem = RamUsageEstimator.ShallowSizeOf(this) + RamUsageEstimator.SizeOf(Offsets); if (Offsets != Ordinals) { mem += RamUsageEstimator.SizeOf(Ordinals); } return(mem); }
/// <summary> /// Returns byte size of the underlying TST /// </summary> public override long GetSizeInBytes() { long mem = RamUsageEstimator.ShallowSizeOf(this); if (root != null) { mem += root.GetSizeInBytes(); } return(mem); }
/// <summary> /// Return an approximate memory usage for this trie. /// </summary> public virtual long GetSizeInBytes() { long mem = RamUsageEstimator.ShallowSizeOf(this); TSTNode root = Root; if (root != null) { mem += root.GetSizeInBytes(); } return(mem); }
/// <summary> /// Return an approximate memory usage for this node and its sub-nodes. </summary> public long SizeInBytes() { long mem = RamUsageEstimator.ShallowSizeOf(this) + RamUsageEstimator.ShallowSizeOf(relatives); foreach (TSTNode node in relatives) { if (node != null) { mem += node.SizeInBytes(); } } return(mem); }
public override long GetSizeInBytes() { long mem = RamUsageEstimator.ShallowSizeOf(this) + RamUsageEstimator.ShallowSizeOf(normalCompletion) + RamUsageEstimator.ShallowSizeOf(higherWeightsCompletion); if (normalCompletion != null) { mem += normalCompletion.FST.GetSizeInBytes(); } if (higherWeightsCompletion != null && (normalCompletion == null || normalCompletion.FST != higherWeightsCompletion.FST)) { // the fst should be shared between the 2 completion instances, don't count it twice mem += higherWeightsCompletion.FST.GetSizeInBytes(); } return(mem); }
/// <summary> /// Return an approximate memory usage for this node and its sub-nodes. </summary> public long GetSizeInBytes() { long mem = RamUsageEstimator.ShallowSizeOf(this) + RamUsageEstimator.ShallowSizeOf(relatives); foreach (TSTNode node in relatives) { // LUCENENET NOTE: Going with the summary of this method, which says it should not // include the parent node. When we include the parent node, it results in overflowing // the thread stack because we have infinite recursion. // // However, in version 6.2 of Lucene (latest) it mentions we should also estimate the parent node. // https://github.com/apache/lucene-solr/blob/764d0f19151dbff6f5fcd9fc4b2682cf934590c5/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java#L104 // Not sure what the reason for that is, but it seems like a recipe for innaccuracy. if (node != null && node != relatives[PARENT]) { mem += node.GetSizeInBytes(); } } return(mem); }
internal virtual long GetSizeInBytes() { long mem = RamUsageEstimator.ShallowSizeOf(this); if (loKid != null) { mem += loKid.GetSizeInBytes(); } if (eqKid != null) { mem += eqKid.GetSizeInBytes(); } if (hiKid != null) { mem += hiKid.GetSizeInBytes(); } if (token != null) { mem += RamUsageEstimator.ShallowSizeOf(token) + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + RamUsageEstimator.NUM_BYTES_CHAR * token.Length; } mem += RamUsageEstimator.ShallowSizeOf(val); return(mem); }