public virtual void TestFilterPositions()
        {
            TokenStream          ts        = new MockTokenizer(new StringReader("abcde vwxyz"), MockTokenizer.WHITESPACE, false);
            EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, ts, EdgeNGramTokenFilter.Side.FRONT, 1, 3);

            AssertTokenStreamContents(tokenizer, new string[] { "a", "ab", "abc", "v", "vw", "vwx" }, new int[] { 0, 0, 0, 6, 6, 6 }, new int[] { 5, 5, 5, 11, 11, 11 }, null, new int[] { 1, 0, 0, 1, 0, 0 }, null, null, false);
        }
Пример #2
0
        public virtual void TestFrontUnigram()
        {
#pragma warning disable 612, 618
            EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 1, 1);
#pragma warning restore 612, 618
            AssertTokenStreamContents(tokenizer, new string[] { "a" }, new int[] { 0 }, new int[] { 5 });
        }
 public virtual void TestBackRangeOfNgrams()
 {
     #pragma warning disable 612, 618
     EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(LuceneVersion.LUCENE_43, input, EdgeNGramTokenFilter.Side.BACK, 1, 3);
     #pragma warning restore 612, 618
     AssertTokenStreamContents(tokenizer, new string[] { "e", "de", "cde" }, new int[] { 4, 3, 2 }, new int[] { 5, 5, 5 }, null, null, null, null, false);
 }
 public virtual void TestBackUnigram()
 {
     #pragma warning disable 612, 618
     EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(LuceneVersion.LUCENE_43, input, EdgeNGramTokenFilter.Side.BACK, 1, 1);
     #pragma warning restore 612, 618
     AssertTokenStreamContents(tokenizer, new string[] { "e" }, new int[] { 4 }, new int[] { 5 });
 }
Пример #5
0
        public virtual void TestBackUnigram()
        {
#pragma warning disable 612, 618
            EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(LuceneVersion.LUCENE_43, input, EdgeNGramTokenFilter.Side.BACK, 1, 1);
#pragma warning restore 612, 618
            AssertTokenStreamContents(tokenizer, new string[] { "e" }, new int[] { 4 }, new int[] { 5 });
        }
Пример #6
0
        public virtual void TestOversizedNgrams()
        {
#pragma warning disable 612, 618
            EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 6, 6);
#pragma warning restore 612, 618
            AssertTokenStreamContents(tokenizer, new string[0], new int[0], new int[0]);
        }
Пример #7
0
        public virtual void TestBackRangeOfNgrams()
        {
#pragma warning disable 612, 618
            EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(LuceneVersion.LUCENE_43, input, EdgeNGramTokenFilter.Side.BACK, 1, 3);
#pragma warning restore 612, 618
            AssertTokenStreamContents(tokenizer, new string[] { "e", "de", "cde" }, new int[] { 4, 3, 2 }, new int[] { 5, 5, 5 }, null, null, null, null, false);
        }
        public virtual void TestSmallTokenInStream()
        {
            input = new MockTokenizer(new StringReader("abc de fgh"), MockTokenizer.WHITESPACE, false);
            EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 3, 3);

            AssertTokenStreamContents(tokenizer, new string[] { "abc", "fgh" }, new int[] { 0, 7 }, new int[] { 3, 10 });
        }
            public override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
            {
                Tokenizer   tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
                TokenFilter filters   = new ASCIIFoldingFilter(tokenizer);

                filters = new EdgeNGramTokenFilter(LuceneVersion.LUCENE_43, filters, EdgeNGramTokenFilter.Side.FRONT, 2, 15);
                return(new TokenStreamComponents(tokenizer, filters));
            }
 public virtual void TestFilterPositions()
 {
     TokenStream ts = new MockTokenizer(new StringReader("abcde vwxyz"), MockTokenizer.WHITESPACE, false);
     #pragma warning disable 612, 618
     EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, ts, EdgeNGramTokenFilter.Side.FRONT, 1, 3);
     #pragma warning restore 612, 618
     AssertTokenStreamContents(tokenizer, new string[] { "a", "ab", "abc", "v", "vw", "vwx" }, new int[] { 0, 0, 0, 6, 6, 6 }, new int[] { 5, 5, 5, 11, 11, 11 }, null, new int[] { 1, 0, 0, 1, 0, 0 }, null, null, false);
 }
Пример #11
0
        public virtual void TestGraphs()
        {
            TokenStream tk = new LetterTokenizer(TEST_VERSION_CURRENT, new StringReader("abc d efgh ij klmno p q"));

            tk = new ShingleFilter(tk);
            tk = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tk, 7, 10);
            AssertTokenStreamContents(tk, new string[] { "efgh ij", "ij klmn", "ij klmno", "klmno p" }, new int[] { 6, 11, 11, 14 }, new int[] { 13, 19, 19, 21 }, new int[] { 3, 1, 0, 1 }, new int[] { 2, 2, 2, 2 }, 23);
        }
        public virtual void TestReset()
        {
            WhitespaceTokenizer  tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"));
            EdgeNGramTokenFilter filter    = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tokenizer, EdgeNGramTokenFilter.Side.FRONT, 1, 3);

            AssertTokenStreamContents(filter, new string[] { "a", "ab", "abc" }, new int[] { 0, 0, 0 }, new int[] { 5, 5, 5 });
            tokenizer.Reader = new StringReader("abcde");
            AssertTokenStreamContents(filter, new string[] { "a", "ab", "abc" }, new int[] { 0, 0, 0 }, new int[] { 5, 5, 5 });
        }
        public virtual void TestFirstTokenPositionIncrement()
        {
            TokenStream ts = new MockTokenizer(new StringReader("a abc"), MockTokenizer.WHITESPACE, false);

            ts = new PositionFilter(ts); // All but first token will get 0 position increment
            EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, ts, EdgeNGramTokenFilter.Side.FRONT, 2, 3);

            // The first token "a" will not be output, since it's smaller than the mingram size of 2.
            // The second token on input to EdgeNGramTokenFilter will have position increment of 0,
            // which should be increased to 1, since this is the first output token in the stream.
            AssertTokenStreamContents(filter, new string[] { "ab", "abc" }, new int[] { 2, 2 }, new int[] { 5, 5 }, new int[] { 1, 0 });
        }
Пример #14
0
        public virtual void TestSupplementaryCharacters()
        {
            string      s = TestUtil.RandomUnicodeString(Random(), 10);
            int         codePointCount = Character.CodePointCount(s, 0, s.Length);
            int         minGram        = TestUtil.NextInt(Random(), 1, 3);
            int         maxGram        = TestUtil.NextInt(Random(), minGram, 10);
            TokenStream tk             = new KeywordTokenizer(new StringReader(s));

            tk = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tk, minGram, maxGram);
            ICharTermAttribute termAtt   = tk.AddAttribute <ICharTermAttribute>();
            IOffsetAttribute   offsetAtt = tk.AddAttribute <IOffsetAttribute>();

            tk.Reset();
            for (int i = minGram; i <= Math.Min(codePointCount, maxGram); ++i)
            {
                assertTrue(tk.IncrementToken());
                assertEquals(0, offsetAtt.StartOffset());
                assertEquals(s.Length, offsetAtt.EndOffset());
                int end = Character.OffsetByCodePoints(s, 0, i);
                assertEquals(s.Substring(0, end), termAtt.ToString());
            }
            assertFalse(tk.IncrementToken());
        }
 public virtual void TestGraphs()
 {
     TokenStream tk = new LetterTokenizer(TEST_VERSION_CURRENT, new StringReader("abc d efgh ij klmno p q"));
     tk = new ShingleFilter(tk);
     tk = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tk, 7, 10);
     AssertTokenStreamContents(tk, new string[] { "efgh ij", "ij klmn", "ij klmno", "klmno p" }, new int[] { 6, 11, 11, 14 }, new int[] { 13, 19, 19, 21 }, new int[] { 3, 1, 0, 1 }, new int[] { 2, 2, 2, 2 }, 23);
 }
 public virtual void TestSmallTokenInStream()
 {
     input = new MockTokenizer(new StringReader("abc de fgh"), MockTokenizer.WHITESPACE, false);
     #pragma warning disable 612, 618
     EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 3, 3);
     #pragma warning restore 612, 618
     AssertTokenStreamContents(tokenizer, new string[] { "abc", "fgh" }, new int[] { 0, 7 }, new int[] { 3, 10 });
 }
 public virtual void TestSupplementaryCharacters()
 {
     string s = TestUtil.RandomUnicodeString(Random(), 10);
     int codePointCount = Character.CodePointCount(s, 0, s.Length);
     int minGram = TestUtil.NextInt(Random(), 1, 3);
     int maxGram = TestUtil.NextInt(Random(), minGram, 10);
     TokenStream tk = new KeywordTokenizer(new StringReader(s));
     tk = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tk, minGram, maxGram);
     ICharTermAttribute termAtt = tk.AddAttribute<ICharTermAttribute>();
     IOffsetAttribute offsetAtt = tk.AddAttribute<IOffsetAttribute>();
     tk.Reset();
     for (int i = minGram; i <= Math.Min(codePointCount, maxGram); ++i)
     {
         assertTrue(tk.IncrementToken());
         assertEquals(0, offsetAtt.StartOffset());
         assertEquals(s.Length, offsetAtt.EndOffset());
         int end = Character.OffsetByCodePoints(s, 0, i);
         assertEquals(s.Substring(0, end), termAtt.ToString());
     }
     assertFalse(tk.IncrementToken());
 }
 public override TokenStreamComponents CreateComponents(string fieldName, TextReader reader)
 {
     Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
     TokenFilter filters = new ASCIIFoldingFilter(tokenizer);
     #pragma warning disable 612, 618
     filters = new EdgeNGramTokenFilter(LuceneVersion.LUCENE_43, filters, EdgeNGramTokenFilter.Side.FRONT, 2, 15);
     #pragma warning restore 612, 618
     return new TokenStreamComponents(tokenizer, filters);
 }
        public virtual void TestBackUnigram()
        {
            EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(LuceneVersion.LUCENE_43, input, EdgeNGramTokenFilter.Side.BACK, 1, 1);

            AssertTokenStreamContents(tokenizer, new string[] { "e" }, new int[] { 4 }, new int[] { 5 });
        }
        public virtual void TestFrontUnigram()
        {
            EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 1, 1);

            AssertTokenStreamContents(tokenizer, new string[] { "a" }, new int[] { 0 }, new int[] { 5 });
        }
 public virtual void TestFirstTokenPositionIncrement()
 {
     TokenStream ts = new MockTokenizer(new StringReader("a abc"), MockTokenizer.WHITESPACE, false);
     ts = new PositionFilter(ts); // All but first token will get 0 position increment
     #pragma warning disable 612, 618
     EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, ts, EdgeNGramTokenFilter.Side.FRONT, 2, 3);
     #pragma warning restore 612, 618
     // The first token "a" will not be output, since it's smaller than the mingram size of 2.
     // The second token on input to EdgeNGramTokenFilter will have position increment of 0,
     // which should be increased to 1, since this is the first output token in the stream.
     AssertTokenStreamContents(filter, new string[] { "ab", "abc" }, new int[] { 2, 2 }, new int[] { 5, 5 }, new int[] { 1, 0 });
 }
        public virtual void TestOversizedNgrams()
        {
            EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 6, 6);

            AssertTokenStreamContents(tokenizer, new string[0], new int[0], new int[0]);
        }
        public virtual void TestFrontRangeOfNgrams()
        {
            EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 1, 3);

            AssertTokenStreamContents(tokenizer, new string[] { "a", "ab", "abc" }, new int[] { 0, 0, 0 }, new int[] { 5, 5, 5 });
        }
 public virtual void TestReset()
 {
     WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"));
     #pragma warning disable 612, 618
     EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tokenizer, EdgeNGramTokenFilter.Side.FRONT, 1, 3);
     #pragma warning restore 612, 618
     AssertTokenStreamContents(filter, new string[] { "a", "ab", "abc" }, new int[] { 0, 0, 0 }, new int[] { 5, 5, 5 });
     tokenizer.Reader = new StringReader("abcde");
     AssertTokenStreamContents(filter, new string[] { "a", "ab", "abc" }, new int[] { 0, 0, 0 }, new int[] { 5, 5, 5 });
 }
 public virtual void TestFrontUnigram()
 {
     #pragma warning disable 612, 618
     EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 1, 1);
     #pragma warning restore 612, 618
     AssertTokenStreamContents(tokenizer, new string[] { "a" }, new int[] { 0 }, new int[] { 5 });
 }
 public virtual void TestOversizedNgrams()
 {
     #pragma warning disable 612, 618
     EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 6, 6);
     #pragma warning restore 612, 618
     AssertTokenStreamContents(tokenizer, new string[0], new int[0], new int[0]);
 }