예제 #1
0
        /// <summary>
        /// Low level api to get the most relevant (formatted) sections of the document.
        /// This method has been made public to allow visibility of score information held in TextFragment objects.
        /// Thanks to Jason Calabrese for help in redefining the interface.
        /// </summary>
        public TextFragment[] GetBestTextFragments(
            TokenStream tokenStream,
            String text,
            bool mergeContiguousFragments,
            int maxNumFragments)
        {
            var docFrags = new List <TextFragment>();
            var newText  = new StringBuilder();

            var termAtt   = tokenStream.AddAttribute <ITermAttribute>();
            var offsetAtt = tokenStream.AddAttribute <IOffsetAttribute>();

            tokenStream.AddAttribute <IPositionIncrementAttribute>();
            tokenStream.Reset();

            var currentFrag = new TextFragment(newText, newText.Length, docFrags.Count);
            var newStream   = _fragmentScorer.Init(tokenStream);

            if (newStream != null)
            {
                tokenStream = newStream;
            }
            _fragmentScorer.StartFragment(currentFrag);
            docFrags.Add(currentFrag);

            var fragQueue = new FragmentQueue(maxNumFragments);

            try
            {
                String tokenText;
                int    startOffset;
                int    endOffset;
                int    lastEndOffset = 0;
                _textFragmenter.Start(text, tokenStream);

                var tokenGroup = new TokenGroup(tokenStream);

                for (bool next = tokenStream.IncrementToken();
                     next && (offsetAtt.StartOffset < _maxDocCharsToAnalyze);
                     next = tokenStream.IncrementToken())
                {
                    if ((offsetAtt.EndOffset > text.Length)
                        ||
                        (offsetAtt.StartOffset > text.Length)
                        )
                    {
                        throw new InvalidTokenOffsetsException("Token " + termAtt.Term
                                                               + " exceeds length of provided text sized " + text.Length);
                    }
                    if ((tokenGroup.NumTokens > 0) && (tokenGroup.IsDistinct()))
                    {
                        //the current token is distinct from previous tokens -
                        // markup the cached token group info
                        startOffset = tokenGroup.MatchStartOffset;
                        endOffset   = tokenGroup.MatchEndOffset;
                        tokenText   = text.Substring(startOffset, endOffset - startOffset);
                        String markedUpText = _formatter.HighlightTerm(_encoder.EncodeText(tokenText), tokenGroup);
                        //store any whitespace etc from between this and last group
                        if (startOffset > lastEndOffset)
                        {
                            newText.Append(_encoder.EncodeText(text.Substring(lastEndOffset, startOffset - lastEndOffset)));
                        }
                        newText.Append(markedUpText);
                        lastEndOffset = Math.Max(endOffset, lastEndOffset);
                        tokenGroup.Clear();

                        //check if current token marks the start of a new fragment
                        if (_textFragmenter.IsNewFragment())
                        {
                            currentFrag.Score = _fragmentScorer.FragmentScore;
                            //record stats for a new fragment
                            currentFrag.TextEndPos = newText.Length;
                            currentFrag            = new TextFragment(newText, newText.Length, docFrags.Count);
                            _fragmentScorer.StartFragment(currentFrag);
                            docFrags.Add(currentFrag);
                        }
                    }

                    tokenGroup.AddToken(_fragmentScorer.GetTokenScore());

                    //                if(lastEndOffset>maxDocBytesToAnalyze)
                    //                {
                    //                    break;
                    //                }
                }
                currentFrag.Score = _fragmentScorer.FragmentScore;

                if (tokenGroup.NumTokens > 0)
                {
                    //flush the accumulated text (same code as in above loop)
                    startOffset = tokenGroup.MatchStartOffset;
                    endOffset   = tokenGroup.MatchEndOffset;
                    tokenText   = text.Substring(startOffset, endOffset - startOffset);
                    var markedUpText = _formatter.HighlightTerm(_encoder.EncodeText(tokenText), tokenGroup);
                    //store any whitespace etc from between this and last group
                    if (startOffset > lastEndOffset)
                    {
                        newText.Append(_encoder.EncodeText(text.Substring(lastEndOffset, startOffset - lastEndOffset)));
                    }
                    newText.Append(markedUpText);
                    lastEndOffset = Math.Max(lastEndOffset, endOffset);
                }

                //Test what remains of the original text beyond the point where we stopped analyzing
                if (
                    //                    if there is text beyond the last token considered..
                    (lastEndOffset < text.Length)
                    &&
                    //                    and that text is not too large...
                    (text.Length <= _maxDocCharsToAnalyze)
                    )
                {
                    //append it to the last fragment
                    newText.Append(_encoder.EncodeText(text.Substring(lastEndOffset)));
                }

                currentFrag.TextEndPos = newText.Length;

                //sort the most relevant sections of the text
                foreach (var f in docFrags)
                {
                    currentFrag = f;

                    //If you are running with a version of Lucene before 11th Sept 03
                    // you do not have PriorityQueue.insert() - so uncomment the code below

                    /*
                     *                  if (currentFrag.getScore() >= minScore)
                     *                  {
                     *                      fragQueue.put(currentFrag);
                     *                      if (fragQueue.size() > maxNumFragments)
                     *                      { // if hit queue overfull
                     *                          fragQueue.pop(); // remove lowest in hit queue
                     *                          minScore = ((TextFragment) fragQueue.top()).getScore(); // reset minScore
                     *                      }
                     *
                     *
                     *                  }
                     */
                    //The above code caused a problem as a result of Christoph Goller's 11th Sept 03
                    //fix to PriorityQueue. The correct method to use here is the new "insert" method
                    // USE ABOVE CODE IF THIS DOES NOT COMPILE!
                    fragQueue.InsertWithOverflow(currentFrag);
                }

                //return the most relevant fragments
                var frag = new TextFragment[fragQueue.Size()];
                for (int i = frag.Length - 1; i >= 0; i--)
                {
                    frag[i] = fragQueue.Pop();
                }

                //merge any contiguous fragments to improve readability
                if (mergeContiguousFragments)
                {
                    MergeContiguousFragments(frag);
                    frag = frag.Where(t => (t != null) && (t.Score > 0)).ToArray();
                }

                return(frag);
            }
            finally
            {
                if (tokenStream != null)
                {
                    try
                    {
                        tokenStream.Close();
                    }
                    catch (Exception)
                    {
                    }
                }
            }
        }
예제 #2
0
        /// <summary> Low level api to get the most relevant (formatted) sections of the document.
        /// This method has been made public to allow visibility of score information held in TextFragment objects.
        /// Thanks to Jason Calabrese for help in redefining the interface.  
        /// </summary>
        /// <param name="">tokenStream
        /// </param>
        /// <param name="">text
        /// </param>
        /// <param name="">maxNumFragments
        /// </param>
        /// <param name="">mergeContiguousFragments
        /// </param>
        /// <throws>  IOException </throws>
        public TextFragment[] GetBestTextFragments(TokenStream tokenStream, string text, bool mergeContiguousFragments, int maxNumFragments)
        {
            ArrayList docFrags = new ArrayList();
            StringBuilder newText = new StringBuilder();

            TextFragment currentFrag = new TextFragment(newText, newText.Length, docFrags.Count);
            fragmentScorer.StartFragment(currentFrag);
            docFrags.Add(currentFrag);

            FragmentQueue fragQueue = new FragmentQueue(maxNumFragments);

            try
            {
                Lucene.Net.Analysis.Token token;
                string tokenText;
                int startOffset;
                int endOffset;
                int lastEndOffset = 0;
                textFragmenter.Start(text);

                TokenGroup tokenGroup = new TokenGroup();

                while ((token = tokenStream.Next()) != null)
                {
                    if ((tokenGroup.numTokens > 0) && (tokenGroup.IsDistinct(token)))
                    {
                        //the current token is distinct from previous tokens -
                        // markup the cached token group info
                        startOffset = tokenGroup.startOffset;
                        endOffset = tokenGroup.endOffset;
                        tokenText = text.Substring(startOffset, (endOffset) - (startOffset));
                        string markedUpText = formatter.HighlightTerm(encoder.EncodeText(tokenText), tokenGroup);
                        //store any whitespace etc from between this and last group
                        if (startOffset > lastEndOffset)
                            newText.Append(encoder.EncodeText(text.Substring(lastEndOffset, (startOffset) - (lastEndOffset))));
                        newText.Append(markedUpText);
                        lastEndOffset = endOffset;
                        tokenGroup.Clear();

                        //check if current token marks the start of a new fragment
                        if (textFragmenter.IsNewFragment(token))
                        {
                            currentFrag.SetScore(fragmentScorer.FragmentScore);
                            //record stats for a new fragment
                            currentFrag.textEndPos = newText.Length;
                            currentFrag = new TextFragment(newText, newText.Length, docFrags.Count);
                            fragmentScorer.StartFragment(currentFrag);
                            docFrags.Add(currentFrag);
                        }
                    }

                    tokenGroup.AddToken(token, fragmentScorer.GetTokenScore(token));

                    if (lastEndOffset > maxDocBytesToAnalyze)
                    {
                        break;
                    }
                }
                currentFrag.SetScore(fragmentScorer.FragmentScore);

                if (tokenGroup.numTokens > 0)
                {
                    //flush the accumulated text (same code as in above loop)
                    startOffset = tokenGroup.startOffset;
                    endOffset = tokenGroup.endOffset;
                    tokenText = text.Substring(startOffset, (endOffset) - (startOffset));
                    string markedUpText = formatter.HighlightTerm(encoder.EncodeText(tokenText), tokenGroup);
                    //store any whitespace etc from between this and last group
                    if (startOffset > lastEndOffset)
                        newText.Append(encoder.EncodeText(text.Substring(lastEndOffset, (startOffset) - (lastEndOffset))));
                    newText.Append(markedUpText);
                    lastEndOffset = endOffset;
                }

                // append text after end of last token
                //			if (lastEndOffset < text.length())
                //				newText.append(encoder.encodeText(text.substring(lastEndOffset)));

                currentFrag.textEndPos = newText.Length;

                //sort the most relevant sections of the text
                for (IEnumerator i = docFrags.GetEnumerator(); i.MoveNext(); )
                {
                    currentFrag = (TextFragment) i.Current;

                    //If you are running with a version of Lucene before 11th Sept 03
                    // you do not have PriorityQueue.insert() - so uncomment the code below
                    /*
                    if (currentFrag.getScore() >= minScore)
                    {
                    fragQueue.put(currentFrag);
                    if (fragQueue.size() > maxNumFragments)
                    { // if hit queue overfull
                    fragQueue.pop(); // remove lowest in hit queue
                    minScore = ((TextFragment) fragQueue.top()).getScore(); // reset minScore
                    }

                    }
                    */
                    //The above code caused a problem as a result of Christoph Goller's 11th Sept 03
                    //fix to PriorityQueue. The correct method to use here is the new "insert" method
                    // USE ABOVE CODE IF THIS DOES NOT COMPILE!
                    fragQueue.Insert(currentFrag);
                }

                //return the most relevant fragments
                TextFragment[] frag = new TextFragment[fragQueue.Size()];
                for (int i = frag.Length - 1; i >= 0; i--)
                {
                    frag[i] = (TextFragment) fragQueue.Pop();
                }

                //merge any contiguous fragments to improve readability
                if (mergeContiguousFragments)
                {
                    MergeContiguousFragments(frag);
                    ArrayList fragTexts = new ArrayList();
                    for (int i = 0; i < frag.Length; i++)
                    {
                        if ((frag[i] != null) && (frag[i].GetScore() > 0))
                        {
                            fragTexts.Add(frag[i]);
                        }
                    }
                    //frag = (TextFragment[]) ICollectionSupport.ToArray(fragTexts, new TextFragment[0]);
                    frag = (TextFragment[]) fragTexts.ToArray(typeof(TextFragment));
                }

                return frag;
            }
            finally
            {
                if (tokenStream != null)
                {
                    try
                    {
                        tokenStream.Close();
                    }
                    catch (Exception e)
                    {
                        throw e;
                    }
                }
            }
        }