public TermsHashPerThread(DocInverterPerThread docInverterPerThread, TermsHash termsHash, TermsHash nextTermsHash, TermsHashPerThread primaryPerThread) { docState = docInverterPerThread.docState; this.termsHash = termsHash; this.consumer = termsHash.consumer.AddThread(this); if (nextTermsHash != null) { // We are primary charPool = new CharBlockPool(termsHash.docWriter); primary = true; } else { charPool = primaryPerThread.charPool; primary = false; } intPool = new IntBlockPool(termsHash.docWriter, termsHash.trackAllocations); bytePool = new ByteBlockPool(termsHash.docWriter.byteBlockAllocator, termsHash.trackAllocations); if (nextTermsHash != null) { nextPerThread = nextTermsHash.AddThread(docInverterPerThread, this); } else { nextPerThread = null; } }
public TermsHashPerThread(DocInverterPerThread docInverterPerThread, TermsHash termsHash, TermsHash nextTermsHash, TermsHashPerThread primaryPerThread) { docState = docInverterPerThread.docState; this.termsHash = termsHash; this.consumer = termsHash.consumer.AddThread(this); if (nextTermsHash != null) { // We are primary charPool = new CharBlockPool(termsHash.docWriter); primary = true; } else { charPool = primaryPerThread.charPool; primary = false; } intPool = new IntBlockPool(termsHash.docWriter, termsHash.trackAllocations); bytePool = new ByteBlockPool(termsHash.docWriter.byteBlockAllocator, termsHash.trackAllocations); if (nextTermsHash != null) nextPerThread = nextTermsHash.AddThread(docInverterPerThread, this); else nextPerThread = null; }
public TermsHashPerField(DocInverterPerField docInverterPerField, TermsHashPerThread perThread, TermsHashPerThread nextPerThread, FieldInfo fieldInfo) { InitBlock(); this.perThread = perThread; intPool = perThread.intPool; charPool = perThread.charPool; bytePool = perThread.bytePool; docState = perThread.docState; fieldState = docInverterPerField.fieldState; // Sorter requires the char pool. _sorter = new Sorter <RawPostingList, PostingComparer>(new PostingComparer(this)); this.consumer = perThread.consumer.AddField(this, fieldInfo); streamCount = consumer.GetStreamCount(); numPostingInt = 2 * streamCount; this.fieldInfo = fieldInfo; if (nextPerThread != null) { nextPerField = (TermsHashPerField)nextPerThread.AddField(docInverterPerField, fieldInfo); } else { nextPerField = null; } }
public TermsHashPerField(DocInverterPerField docInverterPerField, TermsHash termsHash, TermsHash nextTermsHash, FieldInfo fieldInfo) { IntPool = termsHash.IntPool; BytePool = termsHash.BytePool; TermBytePool = termsHash.TermBytePool; DocState = termsHash.DocState; this.TermsHash = termsHash; BytesUsed = termsHash.BytesUsed; FieldState = docInverterPerField.FieldState; this.Consumer = termsHash.Consumer.AddField(this, fieldInfo); PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, BytesUsed); BytesHash = new BytesRefHash(TermBytePool, HASH_INIT_SIZE, byteStarts); StreamCount = Consumer.StreamCount; NumPostingInt = 2 * StreamCount; this.FieldInfo = fieldInfo; if (nextTermsHash != null) { NextPerField = (TermsHashPerField)nextTermsHash.AddField(docInverterPerField, fieldInfo); } else { NextPerField = null; } }
public virtual void TestSingleWriterReader() { Counter bytesUsed = Util.Counter.NewCounter(); IntBlockPool pool = new IntBlockPool(new ByteTrackingAllocator(bytesUsed)); for (int j = 0; j < 2; j++) { IntBlockPool.SliceWriter writer = new IntBlockPool.SliceWriter(pool); int start = writer.StartNewSlice(); int num = AtLeast(100); for (int i = 0; i < num; i++) { writer.WriteInt(i); } int upto = writer.CurrentOffset; IntBlockPool.SliceReader reader = new IntBlockPool.SliceReader(pool); reader.Reset(start, upto); for (int i = 0; i < num; i++) { Assert.AreEqual(i, reader.ReadInt()); } Assert.IsTrue(reader.EndOfSlice()); if (Random().NextBoolean()) { pool.Reset(true, false); Assert.AreEqual(0, bytesUsed.Get()); } else { pool.Reset(true, true); Assert.AreEqual(IntBlockPool.INT_BLOCK_SIZE * RamUsageEstimator.NUM_BYTES_INT, bytesUsed.Get()); } } }
public virtual void TestMultipleWriterReader() { Counter bytesUsed = Util.Counter.NewCounter(); IntBlockPool pool = new IntBlockPool(new ByteTrackingAllocator(bytesUsed)); for (int j = 0; j < 2; j++) { IList <StartEndAndValues> holders = new List <StartEndAndValues>(); int num = AtLeast(4); for (int i = 0; i < num; i++) { holders.Add(new StartEndAndValues(Random().Next(1000))); } IntBlockPool.SliceWriter writer = new IntBlockPool.SliceWriter(pool); IntBlockPool.SliceReader reader = new IntBlockPool.SliceReader(pool); int numValuesToWrite = AtLeast(10000); for (int i = 0; i < numValuesToWrite; i++) { StartEndAndValues values = holders[Random().Next(holders.Count)]; if (values.ValueCount == 0) { values.Start = writer.StartNewSlice(); } else { writer.Reset(values.End); } writer.WriteInt(values.NextValue()); values.End = writer.CurrentOffset; if (Random().Next(5) == 0) { // pick one and reader the ints AssertReader(reader, holders[Random().Next(holders.Count)]); } } while (holders.Count > 0) { int randIndex = Random().Next(holders.Count); StartEndAndValues values = holders[randIndex]; holders.RemoveAt(randIndex); AssertReader(reader, values); } if (Random().NextBoolean()) { pool.Reset(true, false); Assert.AreEqual(0, bytesUsed.Get()); } else { pool.Reset(true, true); Assert.AreEqual(IntBlockPool.INT_BLOCK_SIZE * RamUsageEstimator.NUM_BYTES_INT, bytesUsed.Get()); } } }
public TermsHashPerField(DocInverterPerField docInverterPerField, TermsHashPerThread perThread, TermsHashPerThread nextPerThread, FieldInfo fieldInfo) { this.perThread = perThread; intPool = perThread.intPool; charPool = perThread.charPool; bytePool = perThread.bytePool; docState = perThread.docState; fieldState = docInverterPerField.fieldState; this.consumer = perThread.consumer.addField(this, fieldInfo); streamCount = consumer.getStreamCount(); numPostingInt = 2 * streamCount; this.fieldInfo = fieldInfo; if (nextPerThread != null) nextPerField = (TermsHashPerField)nextPerThread.addField(docInverterPerField, fieldInfo); else nextPerField = null; }
public TermsHashPerField(DocInverterPerField docInverterPerField, TermsHashPerThread perThread, TermsHashPerThread nextPerThread, FieldInfo fieldInfo) { this.perThread = perThread; intPool = perThread.intPool; charPool = perThread.charPool; bytePool = perThread.bytePool; docState = perThread.docState; fieldState = docInverterPerField.fieldState; this.consumer = perThread.consumer.addField(this, fieldInfo); streamCount = consumer.getStreamCount(); numPostingInt = 2 * streamCount; this.fieldInfo = fieldInfo; if (nextPerThread != null) { nextPerField = (TermsHashPerField)nextPerThread.addField(docInverterPerField, fieldInfo); } else { nextPerField = null; } }
public TermsHash(DocumentsWriterPerThread docWriter, TermsHashConsumer consumer, bool trackAllocations, TermsHash nextTermsHash) { this.DocState = docWriter.docState; this.Consumer = consumer; this.TrackAllocations = trackAllocations; this.NextTermsHash = nextTermsHash; this.BytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.NewCounter(); IntPool = new IntBlockPool(docWriter.intBlockAllocator); BytePool = new ByteBlockPool(docWriter.ByteBlockAllocator); if (nextTermsHash != null) { // We are primary Primary = true; TermBytePool = BytePool; nextTermsHash.TermBytePool = BytePool; } else { Primary = false; } }