/// <summary>Constructs a {@link StandardTokenizer} filtered by a {@link
        /// StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}.
        /// </summary>
        public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
        {
            StandardTokenizer tokenStream = new StandardTokenizer(reader, replaceInvalidAcronym);

            tokenStream.SetMaxTokenLength(maxTokenLength);
            TokenStream result = new StandardFilter(tokenStream);

            result = new LowerCaseFilter(result);
            result = new StopFilter(result, stopSet);
            return(result);
        }
예제 #2
0
        /// <summary>Constructs a {@link StandardTokenizer} filtered by a {@link
        /// StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}.
        /// </summary>
        public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
        {
            StandardTokenizer tokenStream = new StandardTokenizer(reader, replaceInvalidAcronym);

            tokenStream.SetMaxTokenLength(maxTokenLength);
            TokenStream result = new StandardFilter(tokenStream);

            result = new LowerCaseFilter(result);
            if (useDefaultStopPositionIncrements)
            {
                result = new StopFilter(result, stopSet);
            }
            else
            {
                result = new StopFilter(enableStopPositionIncrements, result, stopSet);
            }
            return(result);
        }
예제 #3
0
파일: NGramSearch.cs 프로젝트: neiz/ravendb
			public override TokenStream TokenStream(string fieldName, TextReader reader)
			{
				var tokenizer = new StandardTokenizer(Version.LUCENE_29, reader);
				tokenizer.SetMaxTokenLength(255);
				TokenStream filter = new StandardFilter(tokenizer);
				filter = new LowerCaseFilter(filter);
				filter = new StopFilter(false,filter, StandardAnalyzer.STOP_WORDS_SET);
				return new NGramTokenFilter(filter, 2, 6);
			}
예제 #4
0
		/// <summary>Constructs a {@link StandardTokenizer} filtered by a {@link
		/// StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}. 
		/// </summary>
		public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
		{
			StandardTokenizer tokenStream = new StandardTokenizer(reader, replaceInvalidAcronym);
			tokenStream.SetMaxTokenLength(maxTokenLength);
			TokenStream result = new StandardFilter(tokenStream);
			result = new LowerCaseFilter(result);
			if (useDefaultStopPositionIncrements)
			{
				result = new StopFilter(result, stopSet);
			}
			else
			{
				result = new StopFilter(enableStopPositionIncrements, result, stopSet);
			}
			return result;
		}
 /// <summary>Constructs a {@link StandardTokenizer} filtered by a {@link
 /// StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}. 
 /// </summary>
 public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
 {
     StandardTokenizer tokenStream = new StandardTokenizer(reader, replaceInvalidAcronym);
     tokenStream.SetMaxTokenLength(maxTokenLength);
     TokenStream result = new StandardFilter(tokenStream);
     result = new LowerCaseFilter(result);
     result = new StopFilter(result, stopSet);
     return result;
 }