A TokenFilter that applies ArabicNormalizer to normalize the orthography.
Inheritance: TokenFilter
        private void Check(string input, string expected)
        {
            ArabicLetterTokenizer     tokenStream = new ArabicLetterTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
            ArabicNormalizationFilter filter      = new ArabicNormalizationFilter(tokenStream);

            AssertTokenStreamContents(filter, new string[] { expected });
        }
示例#2
0
        /// <summary>
        /// Creates
        /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
        /// used to tokenize all the text in the provided <seealso cref="Reader"/>.
        /// </summary>
        /// <returns> <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
        ///         built from an <seealso cref="StandardTokenizer"/> filtered with
        ///         <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>,
        ///         <seealso cref="ArabicNormalizationFilter"/>, <seealso cref="SetKeywordMarkerFilter"/>
        ///         if a stem exclusion set is provided and <seealso cref="ArabicStemFilter"/>. </returns>
        public override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
        {
            Tokenizer   source = matchVersion.OnOrAfter(LuceneVersion.LUCENE_31) ? new StandardTokenizer(matchVersion, reader) : (Tokenizer) new ArabicLetterTokenizer(matchVersion, reader);
            TokenStream result = new LowerCaseFilter(matchVersion, source);

            // the order here is important: the stopword list is not normalized!
            result = new StopFilter(matchVersion, result, stopwords);
            // TODO maybe we should make ArabicNormalization filter also KeywordAttribute aware?!
            result = new ArabicNormalizationFilter(result);
            if (stemExclusionSet.Count > 0)
            {
                result = new SetKeywordMarkerFilter(result, stemExclusionSet);
            }
            return(new TokenStreamComponents(source, new ArabicStemFilter(result)));
        }
        private void Check(string input, string expected)
        {
#pragma warning disable 612, 618
            ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
#pragma warning restore 612, 618
            ArabicNormalizationFilter filter = new ArabicNormalizationFilter(tokenStream);
            AssertTokenStreamContents(filter, new string[] { expected });
        }
        /// <summary>
        /// Creates
        /// <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
        /// used to tokenize all the text in the provided <seealso cref="Reader"/>.
        /// </summary>
        /// <returns> <seealso cref="org.apache.lucene.analysis.Analyzer.TokenStreamComponents"/>
        ///         built from an <seealso cref="StandardTokenizer"/> filtered with
        ///         <seealso cref="LowerCaseFilter"/>, <seealso cref="StopFilter"/>,
        ///         <seealso cref="ArabicNormalizationFilter"/>, <seealso cref="SetKeywordMarkerFilter"/>
        ///         if a stem exclusion set is provided and <seealso cref="ArabicStemFilter"/>. </returns>
        public override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
        {
#pragma warning disable 612, 618
            Tokenizer source = matchVersion.OnOrAfter(LuceneVersion.LUCENE_31) 
                ? new StandardTokenizer(matchVersion, reader) 
                : (Tokenizer)new ArabicLetterTokenizer(matchVersion, reader);
#pragma warning restore 612, 618
            TokenStream result = new LowerCaseFilter(matchVersion, source);
            // the order here is important: the stopword list is not normalized!
            result = new StopFilter(matchVersion, result, stopwords);
            // TODO maybe we should make ArabicNormalization filter also KeywordAttribute aware?!
            result = new ArabicNormalizationFilter(result);
            if (stemExclusionSet.Count > 0)
            {
                result = new SetKeywordMarkerFilter(result, stemExclusionSet);
            }
            return new TokenStreamComponents(source, new ArabicStemFilter(result));
        }