public TermsHashPerThread(DocInverterPerThread docInverterPerThread, TermsHash termsHash, TermsHash nextTermsHash, TermsHashPerThread primaryPerThread)
        {
            docState = docInverterPerThread.docState;

            this.termsHash = termsHash;
            this.consumer  = termsHash.consumer.AddThread(this);

            if (nextTermsHash != null)
            {
                // We are primary
                charPool = new CharBlockPool(termsHash.docWriter);
                primary  = true;
            }
            else
            {
                charPool = primaryPerThread.charPool;
                primary  = false;
            }

            intPool  = new IntBlockPool(termsHash.docWriter, termsHash.trackAllocations);
            bytePool = new ByteBlockPool(termsHash.docWriter.byteBlockAllocator, termsHash.trackAllocations);

            if (nextTermsHash != null)
            {
                nextPerThread = nextTermsHash.AddThread(docInverterPerThread, this);
            }
            else
            {
                nextPerThread = null;
            }
        }
Beispiel #2
0
        }                              // LUCENENET specific - made constructor internal since this class was meant to be internal

        public void Init(ByteBlockPool pool, int startIndex, int endIndex)
        {
            Debug.Assert(endIndex - startIndex >= 0);
            Debug.Assert(startIndex >= 0);
            Debug.Assert(endIndex >= 0);

            this.pool     = pool;
            this.EndIndex = endIndex;

            level        = 0;
            bufferUpto   = startIndex / ByteBlockPool.BYTE_BLOCK_SIZE;
            BufferOffset = bufferUpto * ByteBlockPool.BYTE_BLOCK_SIZE;
            buffer       = pool.Buffers[bufferUpto];
            upto         = startIndex & ByteBlockPool.BYTE_BLOCK_MASK;

            int firstSize = ByteBlockPool.LEVEL_SIZE_ARRAY[0];

            if (startIndex + firstSize >= endIndex)
            {
                // There is only this one slice to read
                limit = endIndex & ByteBlockPool.BYTE_BLOCK_MASK;
            }
            else
            {
                limit = upto + firstSize - 4;
            }
        }
Beispiel #3
0
        public TermsHashPerField(DocInverterPerField docInverterPerField, TermsHash termsHash, TermsHash nextTermsHash, FieldInfo fieldInfo)
        {
            intPool        = termsHash.intPool;
            bytePool       = termsHash.bytePool;
            termBytePool   = termsHash.termBytePool;
            docState       = termsHash.docState;
            this.termsHash = termsHash;
            bytesUsed      = termsHash.bytesUsed;
            fieldState     = docInverterPerField.fieldState;
            this.consumer  = termsHash.consumer.AddField(this, fieldInfo);
            PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);

            bytesHash      = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
            streamCount    = consumer.StreamCount;
            numPostingInt  = 2 * streamCount;
            this.fieldInfo = fieldInfo;
            if (nextTermsHash != null)
            {
                nextPerField = (TermsHashPerField)nextTermsHash.AddField(docInverterPerField, fieldInfo);
            }
            else
            {
                nextPerField = null;
            }
        }
		public TermsHashPerThread(DocInverterPerThread docInverterPerThread, TermsHash termsHash, TermsHash nextTermsHash, TermsHashPerThread primaryPerThread)
		{
			docState = docInverterPerThread.docState;
			
			this.termsHash = termsHash;
			this.consumer = termsHash.consumer.AddThread(this);
			
			if (nextTermsHash != null)
			{
				// We are primary
				charPool = new CharBlockPool(termsHash.docWriter);
				primary = true;
			}
			else
			{
				charPool = primaryPerThread.charPool;
				primary = false;
			}
			
			intPool = new IntBlockPool(termsHash.docWriter, termsHash.trackAllocations);
			bytePool = new ByteBlockPool(termsHash.docWriter.byteBlockAllocator, termsHash.trackAllocations);
			
			if (nextTermsHash != null)
				nextPerThread = nextTermsHash.AddThread(docInverterPerThread, this);
			else
				nextPerThread = null;
		}
Beispiel #5
0
        /// <summary>
        /// Expert: This constructor accepts an upper limit for the number of bytes that should be reused if this instance is <see cref="Reset()"/>.
        /// </summary>
        /// <param name="storeOffsets"> <c>true</c> if offsets should be stored </param>
        /// <param name="maxReusedBytes"> the number of bytes that should remain in the internal memory pools after <see cref="Reset()"/> is called </param>
        internal MemoryIndex(bool storeOffsets, long maxReusedBytes)
        {
            this.storeOffsets = storeOffsets;
            this.bytesUsed    = Counter.NewCounter();
            int maxBufferedByteBlocks = (int)((maxReusedBytes / 2) / ByteBlockPool.BYTE_BLOCK_SIZE);
            int maxBufferedIntBlocks  = (int)((maxReusedBytes - (maxBufferedByteBlocks * ByteBlockPool.BYTE_BLOCK_SIZE)) / (Int32BlockPool.INT32_BLOCK_SIZE * RamUsageEstimator.NUM_BYTES_INT32));

            Debug.Assert((maxBufferedByteBlocks * ByteBlockPool.BYTE_BLOCK_SIZE) + (maxBufferedIntBlocks * Int32BlockPool.INT32_BLOCK_SIZE * RamUsageEstimator.NUM_BYTES_INT32) <= maxReusedBytes);
            byteBlockPool  = new ByteBlockPool(new RecyclingByteBlockAllocator(ByteBlockPool.BYTE_BLOCK_SIZE, maxBufferedByteBlocks, bytesUsed));
            intBlockPool   = new Int32BlockPool(new RecyclingInt32BlockAllocator(Int32BlockPool.INT32_BLOCK_SIZE, maxBufferedIntBlocks, bytesUsed));
            postingsWriter = new Int32BlockPool.SliceWriter(intBlockPool);
        }
Beispiel #6
0
        public TermsHash(DocumentsWriterPerThread docWriter, TermsHashConsumer consumer, bool trackAllocations, TermsHash nextTermsHash)
        {
            this.docState         = docWriter.docState;
            this.consumer         = consumer;
            this.trackAllocations = trackAllocations;
            this.nextTermsHash    = nextTermsHash;
            this.bytesUsed        = trackAllocations ? docWriter.bytesUsed : Counter.NewCounter();
            intPool  = new Int32BlockPool(docWriter.intBlockAllocator);
            bytePool = new ByteBlockPool(docWriter.byteBlockAllocator);

            if (nextTermsHash != null)
            {
                // We are primary
                primary      = true;
                termBytePool = bytePool;
                nextTermsHash.termBytePool = bytePool;
            }
            else
            {
                primary = false;
            }
        }
Beispiel #7
0
        internal void FinishDocument()
        {
            Debug.Assert(docState.TestPoint("TermVectorsTermsWriterPerField.finish start"));

            int numPostings = termsHashPerField.bytesHash.Count;

            BytesRef flushTerm = termsWriter.flushTerm;

            Debug.Assert(numPostings >= 0);

            if (numPostings > maxNumPostings)
            {
                maxNumPostings = numPostings;
            }

            // this is called once, after inverting all occurrences
            // of a given field in the doc.  At this point we flush
            // our hash into the DocWriter.

            Debug.Assert(termsWriter.VectorFieldsInOrder(fieldInfo));

            TermVectorsPostingsArray postings = (TermVectorsPostingsArray)termsHashPerField.postingsArray;
            TermVectorsWriter        tv       = termsWriter.writer;

            int[] termIDs = termsHashPerField.SortPostings(tv.Comparer);

            tv.StartField(fieldInfo, numPostings, doVectorPositions, doVectorOffsets, hasPayloads);

            ByteSliceReader posReader = doVectorPositions ? termsWriter.vectorSliceReaderPos : null;
            ByteSliceReader offReader = doVectorOffsets ? termsWriter.vectorSliceReaderOff : null;

            ByteBlockPool termBytePool = termsHashPerField.termBytePool;

            for (int j = 0; j < numPostings; j++)
            {
                int termID = termIDs[j];
                int freq   = postings.freqs[termID];

                // Get BytesRef
                termBytePool.SetBytesRef(flushTerm, postings.textStarts[termID]);
                tv.StartTerm(flushTerm, freq);

                if (doVectorPositions || doVectorOffsets)
                {
                    if (posReader != null)
                    {
                        termsHashPerField.InitReader(posReader, termID, 0);
                    }
                    if (offReader != null)
                    {
                        termsHashPerField.InitReader(offReader, termID, 1);
                    }
                    tv.AddProx(freq, posReader, offReader);
                }
                tv.FinishTerm();
            }
            tv.FinishField();

            termsHashPerField.Reset();

            fieldInfo.SetStoreTermVectors();
        }
Beispiel #8
0
 public ByteSliceWriter(ByteBlockPool pool)
 {
     this.pool = pool;
 }
 public ByteSliceWriter(ByteBlockPool pool)
 {
     this.pool = pool;
 }