コード例 #1
0
 public DocMapAnonymousInnerClassHelper(int maxDoc, MonotonicAppendingLongBuffer newToOld, MonotonicAppendingLongBuffer oldToNew)
 {
     this.maxDoc   = maxDoc;
     this.newToOld = newToOld;
     this.oldToNew = oldToNew;
 }
コード例 #2
0
        private readonly MonotonicAppendingLongBuffer Positions, WordNums; // wordNums[i] starts at the sequence at positions[i]

        internal WAH8DocIdSet(byte[] data, int cardinality, int indexInterval, MonotonicAppendingLongBuffer positions, MonotonicAppendingLongBuffer wordNums)
        {
            this.Data = data;
            this.Cardinality_Renamed = cardinality;
            this.IndexInterval       = indexInterval;
            this.Positions           = positions;
            this.WordNums            = wordNums;
        }
コード例 #3
0
 internal Iterator(sbyte[] data, int cardinality, int indexInterval, MonotonicAppendingLongBuffer docIDs, MonotonicAppendingLongBuffer offsets)
 {
     this.Data = data;
     this.Cardinality = cardinality;
     this.IndexInterval = indexInterval;
     this.DocIDs = docIDs;
     this.Offsets = offsets;
     Offset = 0;
     NextDocs = new int[BLOCK_SIZE];
     Arrays.Fill(NextDocs, -1);
     i = BLOCK_SIZE;
     NextExceptions = new int[BLOCK_SIZE];
     BlockIdx = -1;
     DocID_Renamed = -1;
 }
コード例 #4
0
        /// <summary>
        /// Computes the old-to-new permutation over the given comparator. </summary>
//JAVA TO C# CONVERTER WARNING: 'final' parameters are not available in .NET:
//ORIGINAL LINE: private static Sorter.DocMap sort(final int maxDoc, DocComparator comparator)
        private static Sorter.DocMap sort(int maxDoc, DocComparator comparator)
        {
            // check if the index is sorted
            bool sorted = true;

            for (int i = 1; i < maxDoc; ++i)
            {
                if (comparator.compare(i - 1, i) > 0)
                {
                    sorted = false;
                    break;
                }
            }
            if (sorted)
            {
                return(null);
            }

            // sort doc IDs
//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
//ORIGINAL LINE: final int[] docs = new int[maxDoc];
            int[] docs = new int[maxDoc];
            for (int i = 0; i < maxDoc; i++)
            {
                docs[i] = i;
            }

            DocValueSorter sorter = new DocValueSorter(docs, comparator);

            // It can be common to sort a reader, add docs, sort it again, ... and in
            // that case timSort can save a lot of time
            sorter.sort(0, docs.Length);     // docs is now the newToOld mapping

            // The reason why we use MonotonicAppendingLongBuffer here is that it
            // wastes very little memory if the index is in random order but can save
            // a lot of memory if the index is already "almost" sorted
//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
//ORIGINAL LINE: final org.apache.lucene.util.packed.MonotonicAppendingLongBuffer newToOld = new org.apache.lucene.util.packed.MonotonicAppendingLongBuffer();
            MonotonicAppendingLongBuffer newToOld = new MonotonicAppendingLongBuffer();

            for (int i = 0; i < maxDoc; ++i)
            {
                newToOld.add(docs[i]);
            }
            newToOld.freeze();

            for (int i = 0; i < maxDoc; ++i)
            {
                docs[(int)newToOld.get(i)] = i;
            }     // docs is now the oldToNew mapping

//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
//ORIGINAL LINE: final org.apache.lucene.util.packed.MonotonicAppendingLongBuffer oldToNew = new org.apache.lucene.util.packed.MonotonicAppendingLongBuffer();
            MonotonicAppendingLongBuffer oldToNew = new MonotonicAppendingLongBuffer();

            for (int i = 0; i < maxDoc; ++i)
            {
                oldToNew.add(docs[i]);
            }
            oldToNew.freeze();

            return(new DocMapAnonymousInnerClassHelper(maxDoc, newToOld, oldToNew));
        }
コード例 #5
0
            /// <summary>
            /// Build the <seealso cref="PForDeltaDocIdSet"/> instance. </summary>
            public virtual PForDeltaDocIdSet Build()
            {
                Debug.Assert(BufferSize < BLOCK_SIZE);

                if (Cardinality == 0)
                {
                    Debug.Assert(PreviousDoc == -1);
                    return EMPTY;
                }

                EncodeBlock();
                sbyte[] dataArr = Arrays.CopyOf(Data.Bytes, Data.Length + MAX_BYTE_BLOCK_COUNT);

                int indexSize = (NumBlocks - 1) / IndexInterval_Renamed + 1;
                MonotonicAppendingLongBuffer docIDs, offsets;
                if (indexSize <= 1)
                {
                    docIDs = offsets = SINGLE_ZERO_BUFFER;
                }
                else
                {
                    const int pageSize = 128;
                    int initialPageCount = (indexSize + pageSize - 1) / pageSize;
                    docIDs = new MonotonicAppendingLongBuffer(initialPageCount, pageSize, PackedInts.COMPACT);
                    offsets = new MonotonicAppendingLongBuffer(initialPageCount, pageSize, PackedInts.COMPACT);
                    // Now build the index
                    Iterator it = new Iterator(dataArr, Cardinality, int.MaxValue, SINGLE_ZERO_BUFFER, SINGLE_ZERO_BUFFER);
                    for (int k = 0; k < indexSize; ++k)
                    {
                        docIDs.Add(it.DocID() + 1);
                        offsets.Add(it.Offset);
                        for (int i = 0; i < IndexInterval_Renamed; ++i)
                        {
                            it.SkipBlock();
                            if (it.DocID() == DocIdSetIterator.NO_MORE_DOCS)
                            {
                                goto indexBreak;
                            }
                        }
                    indexContinue: ;
                    }
                indexBreak:
                    docIDs.Freeze();
                    offsets.Freeze();
                }

                return new PForDeltaDocIdSet(dataArr, Cardinality, IndexInterval_Renamed, docIDs, offsets);
            }
コード例 #6
0
 internal PForDeltaDocIdSet(sbyte[] data, int cardinality, int indexInterval, MonotonicAppendingLongBuffer docIDs, MonotonicAppendingLongBuffer offsets)
 {
     this.Data = data;
     this.Cardinality_Renamed = cardinality;
     this.IndexInterval = indexInterval;
     this.DocIDs = docIDs;
     this.Offsets = offsets;
 }
コード例 #7
0
 internal Iterator(byte[] data, int cardinality, int indexInterval, MonotonicAppendingLongBuffer docIDs, MonotonicAppendingLongBuffer offsets)
 {
     this.Data          = data;
     this.Cardinality   = cardinality;
     this.IndexInterval = indexInterval;
     this.DocIDs        = docIDs;
     this.Offsets       = offsets;
     Offset             = 0;
     NextDocs           = new int[BLOCK_SIZE];
     Arrays.Fill(NextDocs, -1);
     i = BLOCK_SIZE;
     NextExceptions = new int[BLOCK_SIZE];
     BlockIdx       = -1;
     DocID_Renamed  = -1;
 }
コード例 #8
0
ファイル: SortingMergePolicy.cs プロジェクト: wwb/lucenenet
 public DocMapAnonymousInnerClassHelper(SortingOneMerge outerInstance, MergeState mergeState, MonotonicAppendingLongBuffer deletes)
 {
     this.outerInstance = outerInstance;
     this.mergeState    = mergeState;
     this.deletes       = deletes;
 }
コード例 #9
0
 internal PForDeltaDocIdSet(byte[] data, int cardinality, int indexInterval, MonotonicAppendingLongBuffer docIDs, MonotonicAppendingLongBuffer offsets)
 {
     this.Data = data;
     this.Cardinality_Renamed = cardinality;
     this.IndexInterval       = indexInterval;
     this.DocIDs  = docIDs;
     this.Offsets = offsets;
 }
コード例 #10
0
ファイル: MultiDocValues.cs プロジェクト: Cefa68000/lucenenet
 /// <summary>
 /// Creates an ordinal map that allows mapping ords to/from a merged
 /// space from <code>subs</code>. </summary>
 /// <param name="owner"> a cache key </param>
 /// <param name="subs"> TermsEnums that support <seealso cref="TermsEnum#ord()"/>. They need
 ///             not be dense (e.g. can be FilteredTermsEnums}. </param>
 /// <exception cref="IOException"> if an I/O error occurred. </exception>
 public OrdinalMap(object owner, TermsEnum[] subs)
 {
     // create the ordinal mappings by pulling a termsenum over each sub's
     // unique terms, and walking a multitermsenum over those
     this.Owner = owner;
     GlobalOrdDeltas = new MonotonicAppendingLongBuffer(PackedInts.COMPACT);
     FirstSegments = new AppendingPackedLongBuffer(PackedInts.COMPACT);
     OrdDeltas = new MonotonicAppendingLongBuffer[subs.Length];
     for (int i = 0; i < OrdDeltas.Length; i++)
     {
         OrdDeltas[i] = new MonotonicAppendingLongBuffer();
     }
     long[] segmentOrds = new long[subs.Length];
     ReaderSlice[] slices = new ReaderSlice[subs.Length];
     TermsEnumIndex[] indexes = new TermsEnumIndex[slices.Length];
     for (int i = 0; i < slices.Length; i++)
     {
         slices[i] = new ReaderSlice(0, 0, i);
         indexes[i] = new TermsEnumIndex(subs[i], i);
     }
     MultiTermsEnum mte = new MultiTermsEnum(slices);
     mte.Reset(indexes);
     long globalOrd = 0;
     while (mte.Next() != null)
     {
         TermsEnumWithSlice[] matches = mte.MatchArray;
         for (int i = 0; i < mte.MatchCount; i++)
         {
             int segmentIndex = matches[i].Index;
             long segmentOrd = matches[i].Terms.Ord();
             long delta = globalOrd - segmentOrd;
             // for each unique term, just mark the first segment index/delta where it occurs
             if (i == 0)
             {
                 FirstSegments.Add(segmentIndex);
                 GlobalOrdDeltas.Add(delta);
             }
             // for each per-segment ord, map it back to the global term.
             while (segmentOrds[segmentIndex] <= segmentOrd)
             {
                 OrdDeltas[segmentIndex].Add(delta);
                 segmentOrds[segmentIndex]++;
             }
         }
         globalOrd++;
     }
     FirstSegments.Freeze();
     GlobalOrdDeltas.Freeze();
     for (int i = 0; i < OrdDeltas.Length; ++i)
     {
         OrdDeltas[i].Freeze();
     }
 }
コード例 #11
0
ファイル: MergeState.cs プロジェクト: joyanta/lucene.net
 public static DocMap Build(int maxDoc, Bits liveDocs)
 {
     Debug.Assert(liveDocs != null);
     MonotonicAppendingLongBuffer docMap = new MonotonicAppendingLongBuffer();
     int del = 0;
     for (int i = 0; i < maxDoc; ++i)
     {
         docMap.Add(i - del);
         if (!liveDocs.Get(i))
         {
             ++del;
         }
     }
     docMap.Freeze();
     int numDeletedDocs = del;
     Debug.Assert(docMap.Size() == maxDoc);
     return new DocMapAnonymousInnerClassHelper(maxDoc, liveDocs, docMap, numDeletedDocs);
 }
コード例 #12
0
ファイル: MergeState.cs プロジェクト: joyanta/lucene.net
 public DocMapAnonymousInnerClassHelper(int maxDoc, Bits liveDocs, MonotonicAppendingLongBuffer docMap, int numDeletedDocs)
 {
     this.maxDoc = maxDoc;
     this.LiveDocs = liveDocs;
     this.DocMap = docMap;
     this.numDeletedDocs = numDeletedDocs;
 }
コード例 #13
0
ファイル: WAH8DocIdSet.cs プロジェクト: leotohill/lucene.net
 internal Iterator(byte[] data, int cardinality, int indexInterval, MonotonicAppendingLongBuffer positions, MonotonicAppendingLongBuffer wordNums)
 {
     this.@in           = new ByteArrayDataInput(data);
     this.Cardinality   = cardinality;
     this.IndexInterval = indexInterval;
     this.Positions     = positions;
     this.WordNums      = wordNums;
     WordNum            = -1;
     Word                   = 0;
     BitList                = 0;
     SequenceNum            = -1;
     DocID_Renamed          = -1;
     IndexThreshold_Renamed = IndexThreshold(cardinality, indexInterval);
 }