public TermsHashPerThread(DocInverterPerThread docInverterPerThread, TermsHash termsHash, TermsHash nextTermsHash, TermsHashPerThread primaryPerThread) { docState = docInverterPerThread.docState; this.termsHash = termsHash; this.consumer = termsHash.consumer.AddThread(this); if (nextTermsHash != null) { // We are primary charPool = new CharBlockPool(termsHash.docWriter); primary = true; } else { charPool = primaryPerThread.charPool; primary = false; } intPool = new IntBlockPool(termsHash.docWriter, termsHash.trackAllocations); bytePool = new ByteBlockPool(termsHash.docWriter.byteBlockAllocator, termsHash.trackAllocations); if (nextTermsHash != null) { nextPerThread = nextTermsHash.AddThread(docInverterPerThread, this); } else { nextPerThread = null; } }
public TermsHashPerThread(DocInverterPerThread docInverterPerThread, TermsHash termsHash, TermsHash nextTermsHash, TermsHashPerThread primaryPerThread) { docState = docInverterPerThread.docState; this.termsHash = termsHash; this.consumer = termsHash.consumer.AddThread(this); if (nextTermsHash != null) { // We are primary charPool = new CharBlockPool(termsHash.docWriter); primary = true; } else { charPool = primaryPerThread.charPool; primary = false; } intPool = new IntBlockPool(termsHash.docWriter, termsHash.trackAllocations); bytePool = new ByteBlockPool(termsHash.docWriter.byteBlockAllocator, termsHash.trackAllocations); if (nextTermsHash != null) nextPerThread = nextTermsHash.AddThread(docInverterPerThread, this); else nextPerThread = null; }
/// <summary> /// Expert: This constructor accepts an upper limit for the number of bytes that should be reused if this instance is <see cref="Reset()"/>. /// </summary> /// <param name="storeOffsets"> <c>true</c> if offsets should be stored </param> /// <param name="maxReusedBytes"> the number of bytes that should remain in the internal memory pools after <see cref="Reset()"/> is called </param> internal MemoryIndex(bool storeOffsets, long maxReusedBytes) { this.storeOffsets = storeOffsets; this.bytesUsed = Counter.NewCounter(); int maxBufferedByteBlocks = (int)((maxReusedBytes / 2) / ByteBlockPool.BYTE_BLOCK_SIZE); int maxBufferedIntBlocks = (int)((maxReusedBytes - (maxBufferedByteBlocks * ByteBlockPool.BYTE_BLOCK_SIZE)) / (IntBlockPool.INT_BLOCK_SIZE * RamUsageEstimator.NUM_BYTES_INT)); Debug.Assert((maxBufferedByteBlocks * ByteBlockPool.BYTE_BLOCK_SIZE) + (maxBufferedIntBlocks * IntBlockPool.INT_BLOCK_SIZE * RamUsageEstimator.NUM_BYTES_INT) <= maxReusedBytes); byteBlockPool = new ByteBlockPool(new RecyclingByteBlockAllocator(ByteBlockPool.BYTE_BLOCK_SIZE, maxBufferedByteBlocks, bytesUsed)); intBlockPool = new IntBlockPool(new RecyclingIntBlockAllocator(IntBlockPool.INT_BLOCK_SIZE, maxBufferedIntBlocks, bytesUsed)); postingsWriter = new IntBlockPool.SliceWriter(intBlockPool); }
public TermsHashPerField(DocInverterPerField docInverterPerField, TermsHashPerThread perThread, TermsHashPerThread nextPerThread, FieldInfo fieldInfo) { InitBlock(); this.perThread = perThread; intPool = perThread.intPool; charPool = perThread.charPool; bytePool = perThread.bytePool; docState = perThread.docState; fieldState = docInverterPerField.fieldState; this.consumer = perThread.consumer.AddField(this, fieldInfo); streamCount = consumer.GetStreamCount(); numPostingInt = 2 * streamCount; this.fieldInfo = fieldInfo; if (nextPerThread != null) nextPerField = (TermsHashPerField) nextPerThread.AddField(docInverterPerField, fieldInfo); else nextPerField = null; }