private void Flush()
        {
            IndexWriter.WriteIndex(NumBufferedDocs, FieldsStream.FilePointer);

            // transform end offsets into lengths
            int[] lengths = EndOffsets;
            for (int i = NumBufferedDocs - 1; i > 0; --i)
            {
                lengths[i] = EndOffsets[i] - EndOffsets[i - 1];
                Debug.Assert(lengths[i] >= 0);
            }
            WriteHeader(DocBase, NumBufferedDocs, NumStoredFields, lengths);

            // compress stored fields to fieldsStream
            if (BufferedDocs.Length >= 2 * ChunkSize)
            {
                // big chunk, slice it
                for (int compressed = 0; compressed < BufferedDocs.Length; compressed += ChunkSize)
                {
                    Compressor.Compress(BufferedDocs.Bytes, compressed, Math.Min(ChunkSize, BufferedDocs.Length - compressed), FieldsStream);
                }
            }
            else
            {
                Compressor.Compress(BufferedDocs.Bytes, 0, BufferedDocs.Length, FieldsStream);
            }

            // reset
            DocBase            += NumBufferedDocs;
            NumBufferedDocs     = 0;
            BufferedDocs.Length = 0;
        }
        private void Flush()
        {
            indexWriter.WriteIndex(numBufferedDocs, fieldsStream.Position); // LUCENENET specific: Renamed from getFilePointer() to match FileStream

            // transform end offsets into lengths
            int[] lengths = endOffsets;
            for (int i = numBufferedDocs - 1; i > 0; --i)
            {
                lengths[i] = endOffsets[i] - endOffsets[i - 1];
                if (Debugging.AssertsEnabled)
                {
                    Debugging.Assert(lengths[i] >= 0);
                }
            }
            WriteHeader(docBase, numBufferedDocs, numStoredFields, lengths);

            // compress stored fields to fieldsStream
            if (bufferedDocs.Length >= 2 * chunkSize)
            {
                // big chunk, slice it
                for (int compressed = 0; compressed < bufferedDocs.Length; compressed += chunkSize)
                {
                    compressor.Compress(bufferedDocs.Bytes, compressed, Math.Min(chunkSize, bufferedDocs.Length - compressed), fieldsStream);
                }
            }
            else
            {
                compressor.Compress(bufferedDocs.Bytes, 0, bufferedDocs.Length, fieldsStream);
            }

            // reset
            docBase            += numBufferedDocs;
            numBufferedDocs     = 0;
            bufferedDocs.Length = 0;
        }
示例#3
0
        private void Flush()
        {
            int chunkDocs = pendingDocs.Count;

            if (Debugging.AssertsEnabled)
            {
                Debugging.Assert(chunkDocs > 0, "{0}", chunkDocs);
            }

            // write the index file
            indexWriter.WriteIndex(chunkDocs, vectorsStream.GetFilePointer());

            int docBase = numDocs - chunkDocs;

            vectorsStream.WriteVInt32(docBase);
            vectorsStream.WriteVInt32(chunkDocs);

            // total number of fields of the chunk
            int totalFields = FlushNumFields(chunkDocs);

            if (totalFields > 0)
            {
                // unique field numbers (sorted)
                int[] fieldNums = FlushFieldNums();
                // offsets in the array of unique field numbers
                FlushFields(totalFields, fieldNums);
                // flags (does the field have positions, offsets, payloads?)
                FlushFlags(totalFields, fieldNums);
                // number of terms of each field
                FlushNumTerms(totalFields);
                // prefix and suffix lengths for each field
                FlushTermLengths();
                // term freqs - 1 (because termFreq is always >=1) for each term
                FlushTermFreqs();
                // positions for all terms, when enabled
                FlushPositions();
                // offsets for all terms, when enabled
                FlushOffsets(fieldNums);
                // payload lengths for all terms, when enabled
                FlushPayloadLengths();

                // compress terms and payloads and write them to the output
                compressor.Compress(termSuffixes.Bytes, 0, termSuffixes.Length, vectorsStream);
            }

            // reset
            pendingDocs.Clear();
            curDoc              = null;
            curField            = null;
            termSuffixes.Length = 0;
        }