public NumericDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed, bool trackDocsWithField) { Pending = new AppendingDeltaPackedLongBuffer(PackedInts.COMPACT); DocsWithField = trackDocsWithField ? new FixedBitSet(64) : null; BytesUsed = Pending.RamBytesUsed() + DocsWithFieldBytesUsed(); this.FieldInfo = fieldInfo; this.IwBytesUsed = iwBytesUsed; iwBytesUsed.AddAndGet(BytesUsed); }
private AppendingDeltaPackedLongBuffer PendingCounts; // termIDs per doc #endregion Fields #region Constructors public SortedSetDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) { this.FieldInfo = fieldInfo; this.IwBytesUsed = iwBytesUsed; Hash = new BytesRefHash(new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(iwBytesUsed)), BytesRefHash.DEFAULT_CAPACITY, new DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, iwBytesUsed)); Pending = new AppendingPackedLongBuffer(PackedInts.COMPACT); PendingCounts = new AppendingDeltaPackedLongBuffer(PackedInts.COMPACT); BytesUsed = Pending.RamBytesUsed() + PendingCounts.RamBytesUsed(); iwBytesUsed.AddAndGet(BytesUsed); }
public TermsHash(DocumentsWriterPerThread docWriter, TermsHashConsumer consumer, bool trackAllocations, TermsHash nextTermsHash) { this.DocState = docWriter.docState; this.Consumer = consumer; this.TrackAllocations = trackAllocations; this.NextTermsHash = nextTermsHash; this.BytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.NewCounter(); IntPool = new IntBlockPool(docWriter.intBlockAllocator); BytePool = new ByteBlockPool(docWriter.ByteBlockAllocator); if (nextTermsHash != null) { // We are primary Primary = true; TermBytePool = BytePool; nextTermsHash.TermBytePool = BytePool; } else { Primary = false; } }
public TermsHashPerField(DocInverterPerField docInverterPerField, TermsHash termsHash, TermsHash nextTermsHash, FieldInfo fieldInfo) { IntPool = termsHash.IntPool; BytePool = termsHash.BytePool; TermBytePool = termsHash.TermBytePool; DocState = termsHash.DocState; this.TermsHash = termsHash; BytesUsed = termsHash.BytesUsed; FieldState = docInverterPerField.FieldState; this.Consumer = termsHash.Consumer.AddField(this, fieldInfo); PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, BytesUsed); BytesHash = new BytesRefHash(TermBytePool, HASH_INIT_SIZE, byteStarts); StreamCount = Consumer.StreamCount; NumPostingInt = 2 * StreamCount; this.FieldInfo = fieldInfo; if (nextTermsHash != null) { NextPerField = (TermsHashPerField)nextTermsHash.AddField(docInverterPerField, fieldInfo); } else { NextPerField = null; } }
/// <summary> /// Create a TimeLimitedCollector wrapper over another <seealso cref="Collector"/> with a specified timeout. </summary> /// <param name="collector"> the wrapped <seealso cref="Collector"/> </param> /// <param name="clock"> the timer clock </param> /// <param name="ticksAllowed"> max time allowed for collecting /// hits after which <seealso cref="TimeExceededException"/> is thrown </param> public TimeLimitingCollector(Collector collector, Counter clock, long ticksAllowed) { this.collector = collector; this.Clock = clock; this.TicksAllowed = ticksAllowed; }
public TimerThread(Counter counter) : this(DEFAULT_RESOLUTION, counter) { }
public TimerThread(long resolution, Counter counter) : base(THREAD_NAME) { this.resolution = resolution; this.Counter = counter; this.SetDaemon(true); }
public ByteTrackingAllocator(int blockSize, Counter bytesUsed) : base(blockSize) { this.BytesUsed = bytesUsed; }
public ByteTrackingAllocator(Counter bytesUsed) : this(IntBlockPool.INT_BLOCK_SIZE, bytesUsed) { }
internal PostingsBytesStartArray(TermsHashPerField perField, Counter bytesUsed) { this.PerField = perField; this.BytesUsed_Renamed = bytesUsed; }
public DocValuesProcessor(Counter bytesUsed) { this.BytesUsed = bytesUsed; }