示例#1
0
        // Tokenizes the fields of a document into Postings.
        private void  InvertDocument(Document doc)
        {
            foreach (Field field in doc.Fields())
            {
                System.String fieldName   = field.Name();
                int           fieldNumber = fieldInfos.FieldNumber(fieldName);

                int length   = fieldLengths[fieldNumber];               // length of field
                int position = fieldPositions[fieldNumber];             // position in field
                if (length > 0)
                {
                    position += analyzer.GetPositionIncrementGap(fieldName);
                }
                int offset = fieldOffsets[fieldNumber];                 // offset field

                if (field.IsIndexed())
                {
                    if (!field.IsTokenized())
                    {
                        // un-tokenized field
                        System.String stringValue = field.StringValue();
                        if (field.IsStoreOffsetWithTermVector())
                        {
                            AddPosition(fieldName, stringValue, position++, new TermVectorOffsetInfo(offset, offset + stringValue.Length));
                        }
                        else
                        {
                            AddPosition(fieldName, stringValue, position++, null);
                        }
                        offset += stringValue.Length;
                        length++;
                    }
                    else
                    {
                        System.IO.TextReader reader;                         // find or make Reader
                        if (field.ReaderValue() != null)
                        {
                            reader = field.ReaderValue();
                        }
                        else if (field.StringValue() != null)
                        {
                            reader = new System.IO.StringReader(field.StringValue());
                        }
                        else
                        {
                            throw new System.ArgumentException("field must have either String or Reader value");
                        }

                        // Tokenize field and add to postingTable
                        TokenStream stream = analyzer.TokenStream(fieldName, reader);
                        try
                        {
                            Token lastToken = null;
                            for (Token t = stream.Next(); t != null; t = stream.Next())
                            {
                                position += (t.GetPositionIncrement() - 1);

                                if (field.IsStoreOffsetWithTermVector())
                                {
                                    AddPosition(fieldName, t.TermText(), position++, new TermVectorOffsetInfo(offset + t.StartOffset(), offset + t.EndOffset()));
                                }
                                else
                                {
                                    AddPosition(fieldName, t.TermText(), position++, null);
                                }

                                lastToken = t;
                                if (++length > maxFieldLength)
                                {
                                    if (infoStream != null)
                                    {
                                        infoStream.WriteLine("maxFieldLength " + maxFieldLength + " reached, ignoring following tokens");
                                    }
                                    break;
                                }
                            }

                            if (lastToken != null)
                            {
                                offset += lastToken.EndOffset() + 1;
                            }
                        }
                        finally
                        {
                            stream.Close();
                        }
                    }

                    fieldLengths[fieldNumber]   = length;                   // save field length
                    fieldPositions[fieldNumber] = position;                 // save field position
                    fieldBoosts[fieldNumber]   *= field.GetBoost();
                    fieldOffsets[fieldNumber]   = offset;
                }
            }
        }
示例#2
0
		/// <summary> Low level api to get the most relevant (formatted) sections of the document.
		/// This method has been made public to allow visibility of score information held in TextFragment objects.
		/// Thanks to Jason Calabrese for help in redefining the interface.  
		/// </summary>
		/// <param name="">tokenStream
		/// </param>
		/// <param name="">text
		/// </param>
		/// <param name="">maxNumFragments
		/// </param>
		/// <param name="">mergeContiguousFragments
		/// </param>
		/// <throws>  IOException </throws>
		public TextFragment[] GetBestTextFragments(TokenStream tokenStream, System.String text, bool mergeContiguousFragments, int maxNumFragments)
		{
			System.Collections.ArrayList docFrags = new System.Collections.ArrayList();
			System.Text.StringBuilder newText = new System.Text.StringBuilder();
			
			TextFragment currentFrag = new TextFragment(newText, newText.Length, docFrags.Count);
			fragmentScorer.StartFragment(currentFrag);
			docFrags.Add(currentFrag);
			
			FragmentQueue fragQueue = new FragmentQueue(maxNumFragments);
			
			try
			{
				Lucene.Net.Analysis.Token token;
				System.String tokenText;
				int startOffset;
				int endOffset;
				int lastEndOffset = 0;
				textFragmenter.Start(text);
				
				TokenGroup tokenGroup = new TokenGroup();
				token = tokenStream.Next();
				while ((token != null) && (token.StartOffset() < maxDocBytesToAnalyze))
				{
					if ((tokenGroup.numTokens > 0) && (tokenGroup.IsDistinct(token)))
					{
						//the current token is distinct from previous tokens - 
						// markup the cached token group info
						startOffset = tokenGroup.matchStartOffset;
						endOffset = tokenGroup.matchEndOffset;
						tokenText = text.Substring(startOffset, (endOffset) - (startOffset));
						System.String markedUpText = formatter.HighlightTerm(encoder.EncodeText(tokenText), tokenGroup);
						//store any whitespace etc from between this and last group
						if (startOffset > lastEndOffset)
							newText.Append(encoder.EncodeText(text.Substring(lastEndOffset, (startOffset) - (lastEndOffset))));
						newText.Append(markedUpText);
						lastEndOffset = System.Math.Max(endOffset, lastEndOffset);
						tokenGroup.Clear();
						
						//check if current token marks the start of a new fragment						
						if (textFragmenter.IsNewFragment(token))
						{
							currentFrag.SetScore(fragmentScorer.GetFragmentScore());
							//record stats for a new fragment
							currentFrag.textEndPos = newText.Length;
							currentFrag = new TextFragment(newText, newText.Length, docFrags.Count);
							fragmentScorer.StartFragment(currentFrag);
							docFrags.Add(currentFrag);
						}
					}
					
					tokenGroup.AddToken(token, fragmentScorer.GetTokenScore(token));
					
					//				if(lastEndOffset>maxDocBytesToAnalyze)
					//				{
					//					break;
					//				}
					token = tokenStream.Next();
				}
				currentFrag.SetScore(fragmentScorer.GetFragmentScore());
				
				if (tokenGroup.numTokens > 0)
				{
					//flush the accumulated text (same code as in above loop)
					startOffset = tokenGroup.matchStartOffset;
					endOffset = tokenGroup.matchEndOffset;
					tokenText = text.Substring(startOffset, (endOffset) - (startOffset));
					System.String markedUpText = formatter.HighlightTerm(encoder.EncodeText(tokenText), tokenGroup);
					//store any whitespace etc from between this and last group
					if (startOffset > lastEndOffset)
						newText.Append(encoder.EncodeText(text.Substring(lastEndOffset, (startOffset) - (lastEndOffset))));
					newText.Append(markedUpText);
					lastEndOffset = System.Math.Max(lastEndOffset, endOffset);
				}
				
				//Test what remains of the original text beyond the point where we stopped analyzing 
				if ((lastEndOffset < text.Length) && (text.Length < maxDocBytesToAnalyze))
				{
					//append it to the last fragment
					newText.Append(encoder.EncodeText(text.Substring(lastEndOffset)));
				}
				
				currentFrag.textEndPos = newText.Length;
				
				//sort the most relevant sections of the text
				for (System.Collections.IEnumerator i = docFrags.GetEnumerator(); i.MoveNext(); )
				{
					currentFrag = (TextFragment) i.Current;
					
					//If you are running with a version of Lucene before 11th Sept 03
					// you do not have PriorityQueue.insert() - so uncomment the code below					
					/*
					if (currentFrag.getScore() >= minScore)
					{
					fragQueue.put(currentFrag);
					if (fragQueue.size() > maxNumFragments)
					{ // if hit queue overfull
					fragQueue.pop(); // remove lowest in hit queue
					minScore = ((TextFragment) fragQueue.top()).getScore(); // reset minScore
					}
					
					
					}
					*/
					//The above code caused a problem as a result of Christoph Goller's 11th Sept 03
					//fix to PriorityQueue. The correct method to use here is the new "insert" method
					// USE ABOVE CODE IF THIS DOES NOT COMPILE!
					fragQueue.Insert(currentFrag);
				}
				
				//return the most relevant fragments
				TextFragment[] frag = new TextFragment[fragQueue.Size()];
				for (int i = frag.Length - 1; i >= 0; i--)
				{
					frag[i] = (TextFragment) fragQueue.Pop();
				}
				
				//merge any contiguous fragments to improve readability
				if (mergeContiguousFragments)
				{
					MergeContiguousFragments(frag);
					System.Collections.ArrayList fragTexts = new System.Collections.ArrayList();
					for (int i = 0; i < frag.Length; i++)
					{
						if ((frag[i] != null) && (frag[i].GetScore() > 0))
						{
							fragTexts.Add(frag[i]);
						}
					}
					frag = (TextFragment[]) fragTexts.ToArray(typeof(TextFragment));
				}
				
				return frag;
			}
			finally
			{
				if (tokenStream != null)
				{
					try
					{
						tokenStream.Close();
					}
					catch (System.Exception e)
					{
					}
				}
			}
		}
示例#3
0
        /// <summary> Low level api to get the most relevant (formatted) sections of the document.
        /// This method has been made public to allow visibility of score information held in TextFragment objects.
        /// Thanks to Jason Calabrese for help in redefining the interface.
        /// </summary>
        /// <param name="">tokenStream
        /// </param>
        /// <param name="">text
        /// </param>
        /// <param name="">maxNumFragments
        /// </param>
        /// <param name="">mergeContiguousFragments
        /// </param>
        /// <throws>  IOException </throws>
        public TextFragment[] GetBestTextFragments(TokenStream tokenStream, System.String text, bool mergeContiguousFragments, int maxNumFragments)
        {
            System.Collections.ArrayList docFrags = new System.Collections.ArrayList();
            System.Text.StringBuilder    newText  = new System.Text.StringBuilder();

            TextFragment currentFrag = new TextFragment(newText, newText.Length, docFrags.Count);

            fragmentScorer.StartFragment(currentFrag);
            docFrags.Add(currentFrag);

            FragmentQueue fragQueue = new FragmentQueue(maxNumFragments);

            try
            {
                Lucene.Net.Analysis.Token token;
                System.String             tokenText;
                int startOffset;
                int endOffset;
                int lastEndOffset = 0;
                textFragmenter.Start(text);

                TokenGroup tokenGroup = new TokenGroup();
                token = tokenStream.Next();
                while ((token != null) && (token.StartOffset() < maxDocBytesToAnalyze))
                {
                    if ((tokenGroup.numTokens > 0) && (tokenGroup.IsDistinct(token)))
                    {
                        //the current token is distinct from previous tokens -
                        // markup the cached token group info
                        startOffset = tokenGroup.matchStartOffset;
                        endOffset   = tokenGroup.matchEndOffset;
                        tokenText   = text.Substring(startOffset, (endOffset) - (startOffset));
                        System.String markedUpText = formatter.HighlightTerm(encoder.EncodeText(tokenText), tokenGroup);
                        //store any whitespace etc from between this and last group
                        if (startOffset > lastEndOffset)
                        {
                            newText.Append(encoder.EncodeText(text.Substring(lastEndOffset, (startOffset) - (lastEndOffset))));
                        }
                        newText.Append(markedUpText);
                        lastEndOffset = System.Math.Max(endOffset, lastEndOffset);
                        tokenGroup.Clear();

                        //check if current token marks the start of a new fragment
                        if (textFragmenter.IsNewFragment(token))
                        {
                            currentFrag.SetScore(fragmentScorer.GetFragmentScore());
                            //record stats for a new fragment
                            currentFrag.textEndPos = newText.Length;
                            currentFrag            = new TextFragment(newText, newText.Length, docFrags.Count);
                            fragmentScorer.StartFragment(currentFrag);
                            docFrags.Add(currentFrag);
                        }
                    }

                    tokenGroup.AddToken(token, fragmentScorer.GetTokenScore(token));

                    //				if(lastEndOffset>maxDocBytesToAnalyze)
                    //				{
                    //					break;
                    //				}
                    token = tokenStream.Next();
                }
                currentFrag.SetScore(fragmentScorer.GetFragmentScore());

                if (tokenGroup.numTokens > 0)
                {
                    //flush the accumulated text (same code as in above loop)
                    startOffset = tokenGroup.matchStartOffset;
                    endOffset   = tokenGroup.matchEndOffset;
                    tokenText   = text.Substring(startOffset, (endOffset) - (startOffset));
                    System.String markedUpText = formatter.HighlightTerm(encoder.EncodeText(tokenText), tokenGroup);
                    //store any whitespace etc from between this and last group
                    if (startOffset > lastEndOffset)
                    {
                        newText.Append(encoder.EncodeText(text.Substring(lastEndOffset, (startOffset) - (lastEndOffset))));
                    }
                    newText.Append(markedUpText);
                    lastEndOffset = System.Math.Max(lastEndOffset, endOffset);
                }

                //Test what remains of the original text beyond the point where we stopped analyzing
                if ((lastEndOffset < text.Length) && (text.Length < maxDocBytesToAnalyze))
                {
                    //append it to the last fragment
                    newText.Append(encoder.EncodeText(text.Substring(lastEndOffset)));
                }

                currentFrag.textEndPos = newText.Length;

                //sort the most relevant sections of the text
                for (System.Collections.IEnumerator i = docFrags.GetEnumerator(); i.MoveNext();)
                {
                    currentFrag = (TextFragment)i.Current;

                    //If you are running with a version of Lucene before 11th Sept 03
                    // you do not have PriorityQueue.insert() - so uncomment the code below

                    /*
                     * if (currentFrag.getScore() >= minScore)
                     * {
                     * fragQueue.put(currentFrag);
                     * if (fragQueue.size() > maxNumFragments)
                     * { // if hit queue overfull
                     * fragQueue.pop(); // remove lowest in hit queue
                     * minScore = ((TextFragment) fragQueue.top()).getScore(); // reset minScore
                     * }
                     *
                     *
                     * }
                     */
                    //The above code caused a problem as a result of Christoph Goller's 11th Sept 03
                    //fix to PriorityQueue. The correct method to use here is the new "insert" method
                    // USE ABOVE CODE IF THIS DOES NOT COMPILE!
                    fragQueue.Insert(currentFrag);
                }

                //return the most relevant fragments
                TextFragment[] frag = new TextFragment[fragQueue.Size()];
                for (int i = frag.Length - 1; i >= 0; i--)
                {
                    frag[i] = (TextFragment)fragQueue.Pop();
                }

                //merge any contiguous fragments to improve readability
                if (mergeContiguousFragments)
                {
                    MergeContiguousFragments(frag);
                    System.Collections.ArrayList fragTexts = new System.Collections.ArrayList();
                    for (int i = 0; i < frag.Length; i++)
                    {
                        if ((frag[i] != null) && (frag[i].GetScore() > 0))
                        {
                            fragTexts.Add(frag[i]);
                        }
                    }
                    frag = (TextFragment[])fragTexts.ToArray(typeof(TextFragment));
                }

                return(frag);
            }
            finally
            {
                if (tokenStream != null)
                {
                    try
                    {
                        tokenStream.Close();
                    }
                    catch (System.Exception e)
                    {
                    }
                }
            }
        }