/// <summary>Constructs a {@link StandardTokenizer} filtered by a {@link
        /// StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}.
        /// </summary>
        public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
        {
            StandardTokenizer tokenStream = new StandardTokenizer(reader, replaceInvalidAcronym);

            tokenStream.SetMaxTokenLength(maxTokenLength);
            TokenStream result = new StandardFilter(tokenStream);

            result = new LowerCaseFilter(result);
            result = new StopFilter(result, stopSet);
            return(result);
        }
Пример #2
0
        /// <summary>Constructs a {@link StandardTokenizer} filtered by a {@link
        /// StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}.
        /// </summary>
        public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
        {
            StandardTokenizer tokenStream = new StandardTokenizer(reader, replaceInvalidAcronym);

            tokenStream.SetMaxTokenLength(maxTokenLength);
            TokenStream result = new StandardFilter(tokenStream);

            result = new LowerCaseFilter(result);
            if (useDefaultStopPositionIncrements)
            {
                result = new StopFilter(result, stopSet);
            }
            else
            {
                result = new StopFilter(enableStopPositionIncrements, result, stopSet);
            }
            return(result);
        }
Пример #3
0
			public override TokenStream TokenStream(string fieldName, TextReader reader)
			{
				var tokenizer = new StandardTokenizer(Version.LUCENE_29, reader);
				tokenizer.SetMaxTokenLength(255);
				TokenStream filter = new StandardFilter(tokenizer);
				filter = new LowerCaseFilter(filter);
				filter = new StopFilter(false,filter, StandardAnalyzer.STOP_WORDS_SET);
				return new NGramTokenFilter(filter, 2, 6);
			}
Пример #4
0
		/// <summary>Constructs a {@link StandardTokenizer} filtered by a {@link
		/// StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}. 
		/// </summary>
		public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
		{
			StandardTokenizer tokenStream = new StandardTokenizer(reader, replaceInvalidAcronym);
			tokenStream.SetMaxTokenLength(maxTokenLength);
			TokenStream result = new StandardFilter(tokenStream);
			result = new LowerCaseFilter(result);
			if (useDefaultStopPositionIncrements)
			{
				result = new StopFilter(result, stopSet);
			}
			else
			{
				result = new StopFilter(enableStopPositionIncrements, result, stopSet);
			}
			return result;
		}
 /// <summary>Constructs a {@link StandardTokenizer} filtered by a {@link
 /// StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}. 
 /// </summary>
 public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
 {
     StandardTokenizer tokenStream = new StandardTokenizer(reader, replaceInvalidAcronym);
     tokenStream.SetMaxTokenLength(maxTokenLength);
     TokenStream result = new StandardFilter(tokenStream);
     result = new LowerCaseFilter(result);
     result = new StopFilter(result, stopSet);
     return result;
 }