Joins two token streams and leaves the last token of the first stream available to be used when updating the token values in the second stream based on that token. The default implementation adds last prefix token end offset to the suffix token start and end offsets.

NOTE: This filter might not behave correctly if used with custom Attributes, i.e. Attributes other than the ones located in Lucene.Net.Analysis.TokenAttributes.

Inheritance: Lucene.Net.Analysis.TokenStream
 public PrefixAndSuffixAwareTokenFilter(TokenStream prefix, TokenStream input, TokenStream suffix) : base(suffix)
 {
     _suffix =
         new InjectablePrefixAwareTokenFilter(
             new InjectablePrefixAwareTokenFilter(prefix, input)
                 {
                     UpdateAction = UpdateInputToken
                 },
             suffix)
             {
                 UpdateAction = UpdateSuffixToken
             };
 }
        public virtual void Test()
        {
            PrefixAwareTokenFilter ts;

            ts = new PrefixAwareTokenFilter(new SingleTokenTokenStream(CreateToken("a", 0, 1)), new SingleTokenTokenStream(CreateToken("b", 0, 1)));
            AssertTokenStreamContents(ts, new string[] { "a", "b" }, new int[] { 0, 1 }, new int[] { 1, 2 });

            // prefix and suffix using 2x prefix

            ts = new PrefixAwareTokenFilter(new SingleTokenTokenStream(CreateToken("^", 0, 0)), new MockTokenizer(new StringReader("hello world"), MockTokenizer.WHITESPACE, false));
            ts = new PrefixAwareTokenFilter(ts, new SingleTokenTokenStream(CreateToken("$", 0, 0)));

            AssertTokenStreamContents(ts, new string[] { "^", "hello", "world", "$" }, new int[] { 0, 0, 6, 11 }, new int[] { 0, 5, 11, 11 });
        }
        public virtual void Test()
        {

            PrefixAwareTokenFilter ts;

            ts = new PrefixAwareTokenFilter(new SingleTokenTokenStream(CreateToken("a", 0, 1)), new SingleTokenTokenStream(CreateToken("b", 0, 1)));
            AssertTokenStreamContents(ts, new string[] { "a", "b" }, new int[] { 0, 1 }, new int[] { 1, 2 });

            // prefix and suffix using 2x prefix

            ts = new PrefixAwareTokenFilter(new SingleTokenTokenStream(CreateToken("^", 0, 0)), new MockTokenizer(new StringReader("hello world"), MockTokenizer.WHITESPACE, false));
            ts = new PrefixAwareTokenFilter(ts, new SingleTokenTokenStream(CreateToken("$", 0, 0)));

            AssertTokenStreamContents(ts, new string[] { "^", "hello", "world", "$" }, new int[] { 0, 0, 6, 11 }, new int[] { 0, 5, 11, 11 });
        }
        public void TestTokenStreamContents()
        {
            var ts = new PrefixAwareTokenFilter(
                new SingleTokenTokenStream(CreateToken("a", 0, 1)),
                new SingleTokenTokenStream(CreateToken("b", 0, 1)));

            AssertTokenStreamContents(ts,
                                      new[] {"a", "b"},
                                      new[] {0, 1},
                                      new[] {1, 2});

            // prefix and suffix using 2x prefix

            ts = new PrefixAwareTokenFilter(new SingleTokenTokenStream(CreateToken("^", 0, 0)),
                                            new WhitespaceTokenizer(new StringReader("hello world")));
            ts = new PrefixAwareTokenFilter(ts, new SingleTokenTokenStream(CreateToken("$", 0, 0)));

            AssertTokenStreamContents(ts,
                                      new[] {"^", "hello", "world", "$"},
                                      new[] {0, 0, 6, 11},
                                      new[] {0, 5, 11, 11});
        }