public virtual void TestMultipleWriterReader() { Counter bytesUsed = Util.Counter.NewCounter(); IntBlockPool pool = new IntBlockPool(new ByteTrackingAllocator(bytesUsed)); for (int j = 0; j < 2; j++) { IList<StartEndAndValues> holders = new List<StartEndAndValues>(); int num = AtLeast(4); for (int i = 0; i < num; i++) { holders.Add(new StartEndAndValues(Random().Next(1000))); } IntBlockPool.SliceWriter writer = new IntBlockPool.SliceWriter(pool); IntBlockPool.SliceReader reader = new IntBlockPool.SliceReader(pool); int numValuesToWrite = AtLeast(10000); for (int i = 0; i < numValuesToWrite; i++) { StartEndAndValues values = holders[Random().Next(holders.Count)]; if (values.ValueCount == 0) { values.Start = writer.StartNewSlice(); } else { writer.Reset(values.End); } writer.WriteInt(values.NextValue()); values.End = writer.CurrentOffset; if (Random().Next(5) == 0) { // pick one and reader the ints AssertReader(reader, holders[Random().Next(holders.Count)]); } } while (holders.Count > 0) { int randIndex = Random().Next(holders.Count); StartEndAndValues values = holders[randIndex]; holders.RemoveAt(randIndex); AssertReader(reader, values); } if (Random().NextBoolean()) { pool.Reset(true, false); Assert.AreEqual(0, bytesUsed.Get()); } else { pool.Reset(true, true); Assert.AreEqual(IntBlockPool.INT_BLOCK_SIZE * RamUsageEstimator.NUM_BYTES_INT, bytesUsed.Get()); } } }
public TermsHash(DocumentsWriterPerThread docWriter, TermsHashConsumer consumer, bool trackAllocations, TermsHash nextTermsHash) { this.DocState = docWriter.docState; this.Consumer = consumer; this.TrackAllocations = trackAllocations; this.NextTermsHash = nextTermsHash; this.BytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.NewCounter(); IntPool = new IntBlockPool(docWriter.intBlockAllocator); BytePool = new ByteBlockPool(docWriter.ByteBlockAllocator); if (nextTermsHash != null) { // We are primary Primary = true; TermBytePool = BytePool; nextTermsHash.TermBytePool = BytePool; } else { Primary = false; } }
public TermsHashPerField(DocInverterPerField docInverterPerField, TermsHash termsHash, TermsHash nextTermsHash, FieldInfo fieldInfo) { IntPool = termsHash.IntPool; BytePool = termsHash.BytePool; TermBytePool = termsHash.TermBytePool; DocState = termsHash.DocState; this.TermsHash = termsHash; BytesUsed = termsHash.BytesUsed; FieldState = docInverterPerField.FieldState; this.Consumer = termsHash.Consumer.AddField(this, fieldInfo); PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, BytesUsed); BytesHash = new BytesRefHash(TermBytePool, HASH_INIT_SIZE, byteStarts); StreamCount = Consumer.StreamCount; NumPostingInt = 2 * StreamCount; this.FieldInfo = fieldInfo; if (nextTermsHash != null) { NextPerField = (TermsHashPerField)nextTermsHash.AddField(docInverterPerField, fieldInfo); } else { NextPerField = null; } }
public virtual void TestSingleWriterReader() { Counter bytesUsed = Util.Counter.NewCounter(); IntBlockPool pool = new IntBlockPool(new ByteTrackingAllocator(bytesUsed)); for (int j = 0; j < 2; j++) { IntBlockPool.SliceWriter writer = new IntBlockPool.SliceWriter(pool); int start = writer.StartNewSlice(); int num = AtLeast(100); for (int i = 0; i < num; i++) { writer.WriteInt(i); } int upto = writer.CurrentOffset; IntBlockPool.SliceReader reader = new IntBlockPool.SliceReader(pool); reader.Reset(start, upto); for (int i = 0; i < num; i++) { Assert.AreEqual(i, reader.ReadInt()); } Assert.IsTrue(reader.EndOfSlice()); if (Random().NextBoolean()) { pool.Reset(true, false); Assert.AreEqual(0, bytesUsed.Get()); } else { pool.Reset(true, true); Assert.AreEqual(IntBlockPool.INT_BLOCK_SIZE * RamUsageEstimator.NUM_BYTES_INT, bytesUsed.Get()); } } }
private void AssertReader(IntBlockPool.SliceReader reader, StartEndAndValues values) { reader.Reset(values.Start, values.End); for (int i = 0; i < values.ValueCount; i++) { Assert.AreEqual(values.ValueOffset + i, reader.ReadInt()); } Assert.IsTrue(reader.EndOfSlice()); }