private ChineseCharacterBasedLexicon.Symbol UnknownCharClass(ChineseCharacterBasedLexicon.Symbol ch) { if (useUnknownCharacterModel) { return(new ChineseCharacterBasedLexicon.Symbol(char.ToString(RadicalMap.GetRadical(ch.GetCh()))).Intern()); } else { return(ChineseCharacterBasedLexicon.Symbol.Unknown); } }
public static void PrintStats(ICollection <Tree> trees, PrintWriter pw) { ClassicCounter <int> wordLengthCounter = new ClassicCounter <int>(); ClassicCounter <TaggedWord> wordCounter = new ClassicCounter <TaggedWord>(); ClassicCounter <ChineseCharacterBasedLexicon.Symbol> charCounter = new ClassicCounter <ChineseCharacterBasedLexicon.Symbol>(); int counter = 0; foreach (Tree tree in trees) { counter++; IList <TaggedWord> taggedWords = tree.TaggedYield(); foreach (TaggedWord taggedWord in taggedWords) { string word = taggedWord.Word(); if (word.Equals(LexiconConstants.Boundary)) { continue; } wordCounter.IncrementCount(taggedWord); wordLengthCounter.IncrementCount(int.Parse(word.Length)); for (int j = 0; j < length; j++) { ChineseCharacterBasedLexicon.Symbol sym = ChineseCharacterBasedLexicon.Symbol.CannonicalSymbol(word[j]); charCounter.IncrementCount(sym); } charCounter.IncrementCount(ChineseCharacterBasedLexicon.Symbol.EndWord); } } ICollection <ChineseCharacterBasedLexicon.Symbol> singletonChars = Counters.KeysBelow(charCounter, 1.5); ICollection <TaggedWord> singletonWords = Counters.KeysBelow(wordCounter, 1.5); ClassicCounter <string> singletonWordPOSes = new ClassicCounter <string>(); foreach (TaggedWord taggedWord_1 in singletonWords) { singletonWordPOSes.IncrementCount(taggedWord_1.Tag()); } Distribution <string> singletonWordPOSDist = Distribution.GetDistribution(singletonWordPOSes); ClassicCounter <char> singletonCharRads = new ClassicCounter <char>(); foreach (ChineseCharacterBasedLexicon.Symbol s in singletonChars) { singletonCharRads.IncrementCount(char.ValueOf(RadicalMap.GetRadical(s.GetCh()))); } Distribution <char> singletonCharRadDist = Distribution.GetDistribution(singletonCharRads); Distribution <int> wordLengthDist = Distribution.GetDistribution(wordLengthCounter); NumberFormat percent = new DecimalFormat("##.##%"); pw.Println("There are " + singletonChars.Count + " singleton chars out of " + (int)charCounter.TotalCount() + " tokens and " + charCounter.Size() + " types found in " + counter + " trees."); pw.Println("Thus singletonChars comprise " + percent.Format(singletonChars.Count / charCounter.TotalCount()) + " of tokens and " + percent.Format((double)singletonChars.Count / charCounter.Size()) + " of types."); pw.Println(); pw.Println("There are " + singletonWords.Count + " singleton words out of " + (int)wordCounter.TotalCount() + " tokens and " + wordCounter.Size() + " types."); pw.Println("Thus singletonWords comprise " + percent.Format(singletonWords.Count / wordCounter.TotalCount()) + " of tokens and " + percent.Format((double)singletonWords.Count / wordCounter.Size()) + " of types."); pw.Println(); pw.Println("Distribution over singleton word POS:"); pw.Println(singletonWordPOSDist.ToString()); pw.Println(); pw.Println("Distribution over singleton char radicals:"); pw.Println(singletonCharRadDist.ToString()); pw.Println(); pw.Println("Distribution over word length:"); pw.Println(wordLengthDist); }
//end of CnC /// <summary>Second order clique features</summary> /// <param name="cInfo">The list of characters</param> /// <param name="loc">Position of c in list</param> /// <returns>Collection of String features (sparse set of boolean features</returns> protected internal virtual ICollection <string> FeaturesCpCp2C <_T0>(PaddedList <_T0> cInfo, int loc) where _T0 : CoreLabel { ICollection <string> features = new List <string>(); CoreLabel c = cInfo[loc]; CoreLabel c2 = cInfo[loc + 1]; CoreLabel c3 = cInfo[loc + 2]; CoreLabel p = cInfo[loc - 1]; CoreLabel p2 = cInfo[loc - 2]; CoreLabel p3 = cInfo[loc - 3]; string charc = c.GetString <CoreAnnotations.CharAnnotation>(); string charc2 = c2.GetString <CoreAnnotations.CharAnnotation>(); string charc3 = c3.GetString <CoreAnnotations.CharAnnotation>(); string charp = p.GetString <CoreAnnotations.CharAnnotation>(); string charp2 = p2.GetString <CoreAnnotations.CharAnnotation>(); string charp3 = p3.GetString <CoreAnnotations.CharAnnotation>(); // N-gram features. N is up to 3 if (flags.useWord3) { features.Add(charc + "::c"); features.Add(charc2 + "::n"); features.Add(charp + "::p"); features.Add(charp2 + "::p2"); // trying to restore the features that Huihsin described in SIGHAN 2005 paper features.Add(charc + charc2 + "::cn"); features.Add(charc + charc2 + charc3 + "::cnn2"); features.Add(charp + charc + "::pc"); features.Add(charp + charc2 + "::pn"); features.Add(charp2 + charp + "::p2p"); features.Add(charp3 + charp2 + charp + "::p3p2p"); features.Add(charp2 + charc + "::p2c"); features.Add(charc + charc3 + "::cn2"); } if (flags.useShapeStrings) { if (flags.useShapeStrings1) { features.Add(p.GetString <CoreAnnotations.ShapeAnnotation>() + "ps"); features.Add(c.GetString <CoreAnnotations.ShapeAnnotation>() + "cs"); features.Add(c2.GetString <CoreAnnotations.ShapeAnnotation>() + "c2s"); } if (flags.useShapeStrings3) { features.Add(p.GetString <CoreAnnotations.ShapeAnnotation>() + c.GetString <CoreAnnotations.ShapeAnnotation>() + c2.GetString <CoreAnnotations.ShapeAnnotation>() + "pscsc2s"); } if (flags.useShapeStrings4) { features.Add(p2.GetString <CoreAnnotations.ShapeAnnotation>() + p.GetString <CoreAnnotations.ShapeAnnotation>() + c.GetString <CoreAnnotations.ShapeAnnotation>() + c2.GetString <CoreAnnotations.ShapeAnnotation>() + "p2spscsc2s"); } if (flags.useShapeStrings5) { features.Add(p2.GetString <CoreAnnotations.ShapeAnnotation>() + p.GetString <CoreAnnotations.ShapeAnnotation>() + c.GetString <CoreAnnotations.ShapeAnnotation>() + c2.GetString <CoreAnnotations.ShapeAnnotation>() + c3.GetString <CoreAnnotations.ShapeAnnotation >() + "p2spscsc2sc3s"); } if (flags.useWordShapeConjunctions2) { features.Add(p.GetString <CoreAnnotations.ShapeAnnotation>() + charc + "pscc"); features.Add(charp + c.GetString <CoreAnnotations.ShapeAnnotation>() + "pccs"); } if (flags.useWordShapeConjunctions3) { features.Add(p2.GetString <CoreAnnotations.ShapeAnnotation>() + p.GetString <CoreAnnotations.ShapeAnnotation>() + charc + "p2spscc"); features.Add(p.GetString <CoreAnnotations.ShapeAnnotation>() + charc + c2.GetString <CoreAnnotations.ShapeAnnotation>() + "psccc2s"); features.Add(charc + c2.GetString <CoreAnnotations.ShapeAnnotation>() + c3.GetString <CoreAnnotations.ShapeAnnotation>() + "ccc2sc3s"); } } /* * Radical N-gram features. N is upto 4. * Smoothing method of N-gram, because there are too many characters in Chinese. * (It works better than N-gram when they are used individually. less sparse) */ char rcharc; char rcharc2; char rcharp; char rcharp2; if (charc.Length == 0) { rcharc = 'n'; } else { rcharc = RadicalMap.GetRadical(charc[0]); } if (charc2.Length == 0) { rcharc2 = 'n'; } else { rcharc2 = RadicalMap.GetRadical(charc2[0]); } if (charp.Length == 0) { rcharp = 'n'; } else { rcharp = RadicalMap.GetRadical(charp[0]); } if (charp2.Length == 0) { rcharp2 = 'n'; } else { rcharp2 = RadicalMap.GetRadical(charp2[0]); } if (flags.useRad2) { features.Add(rcharc + "rc"); features.Add(rcharc2 + "rc2"); features.Add(rcharp + "rp"); features.Add(rcharp + rcharc + "rprc"); features.Add(rcharc + rcharc2 + "rcrc2"); features.Add(rcharp + rcharc + rcharc2 + "rprcrc2"); } if (flags.useRad2b) { features.Add(rcharc + "rc"); features.Add(rcharc2 + "rc2"); features.Add(rcharp + "rp"); features.Add(rcharp + rcharc + "rprc"); features.Add(rcharc + rcharc2 + "rcrc2"); features.Add(rcharp2 + rcharp + "rp2rp"); } features.Add("cliqueCpCp2C"); return(features); }
protected internal virtual ICollection <string> FeaturesCpC <_T0>(PaddedList <_T0> cInfo, int loc) where _T0 : CoreLabel { ICollection <string> features = new List <string>(); CoreLabel c = cInfo[loc]; CoreLabel c2 = cInfo[loc + 1]; CoreLabel c3 = cInfo[loc + 2]; CoreLabel p = cInfo[loc - 1]; CoreLabel p2 = cInfo[loc - 2]; CoreLabel p3 = cInfo[loc - 3]; string charc = c.GetString <CoreAnnotations.CharAnnotation>(); string charc2 = c2.GetString <CoreAnnotations.CharAnnotation>(); string charc3 = c3.GetString <CoreAnnotations.CharAnnotation>(); string charp = p.GetString <CoreAnnotations.CharAnnotation>(); string charp2 = p2.GetString <CoreAnnotations.CharAnnotation>(); string charp3 = p3.GetString <CoreAnnotations.CharAnnotation>(); int cI = c.Get(typeof(CoreAnnotations.UTypeAnnotation)); string uTypec = (cI != null ? cI.ToString() : string.Empty); int c2I = c2.Get(typeof(CoreAnnotations.UTypeAnnotation)); string uTypec2 = (c2I != null ? c2I.ToString() : string.Empty); int c3I = c3.Get(typeof(CoreAnnotations.UTypeAnnotation)); string uTypec3 = (c3I != null ? c3I.ToString() : string.Empty); int pI = p.Get(typeof(CoreAnnotations.UTypeAnnotation)); string uTypep = (pI != null ? pI.ToString() : string.Empty); int p2I = p2.Get(typeof(CoreAnnotations.UTypeAnnotation)); string uTypep2 = (p2I != null ? p2I.ToString() : string.Empty); if (flags.dictionary != null || flags.serializedDictionary != null) { DictionaryFeaturesCpC(typeof(CoreAnnotations.LBeginAnnotation), typeof(CoreAnnotations.LMiddleAnnotation), typeof(CoreAnnotations.LEndAnnotation), string.Empty, features, p2, p, c, c2); } if (flags.dictionary2 != null) { DictionaryFeaturesCpC(typeof(CoreAnnotations.D2_LBeginAnnotation), typeof(CoreAnnotations.D2_LMiddleAnnotation), typeof(CoreAnnotations.D2_LEndAnnotation), "-D2-", features, p2, p, c, c2); } /* * N-gram features. N is upto 2. */ if (flags.useWord2) { // features.add(charc +"c"); // features.add(charc2+"c2"); // features.add(charp +"p"); // features.add(charp + charc +"pc"); // features.add(charc + charc2 +"cc2"); // // cdm: need hyphen so you can see which of charp or charc2 is null.... // features.add(charp + "-" + charc2 + "pc2"); features.Add(charc + "::c"); features.Add(charc2 + "::c1"); features.Add(charp + "::p"); features.Add(charp2 + "::p2"); // trying to restore the features that Huihsin described in SIGHAN 2005 paper features.Add(charc + charc2 + "::cn"); // (*) features.Add(charp + charc + "::pc"); features.Add(charp + charc2 + "::pn"); features.Add(charp2 + charp + "::p2p"); features.Add(charp2 + charc + "::p2c"); features.Add(charc2 + charc + "::n2c"); } // todo: this is messed up: Same as one above at (*); should be cn2 = charc + charc3 + "::cn2" if (flags.useFeaturesCpC4gram || flags.useFeaturesCpC5gram || flags.useFeaturesCpC6gram) { // todo: Both these features duplicate ones already in useWord2 features.Add(charp2 + charp + "p2p"); features.Add(charp2 + "p2"); } if (flags.useFeaturesCpC5gram || flags.useFeaturesCpC6gram) { features.Add(charc3 + "c3"); features.Add(charc2 + charc3 + "c2c3"); } if (flags.useFeaturesCpC6gram) { features.Add(charp3 + "p3"); features.Add(charp3 + charp2 + "p3p2"); } if (flags.useGoodForNamesCpC) { // these 2 features should be distinctively good at biasing from // picking up a Chinese family name in the p2 or p3 positions: // familyName X X startWord AND familyName X startWord // But actually they seem to have negative value. features.Add(charp2 + "p2"); features.Add(charp3 + "p3"); } if (flags.useUnicodeType || flags.useUnicodeType4gram || flags.useUnicodeType5gram) { features.Add(uTypep + "-" + uTypec + "-" + uTypec2 + "-uType3"); } if (flags.useUnicodeType4gram || flags.useUnicodeType5gram) { features.Add(uTypep2 + "-" + uTypep + "-" + uTypec + "-" + uTypec2 + "-uType4"); } if (flags.useUnicodeType5gram) { features.Add(uTypep2 + "-" + uTypep + "-" + uTypec + "-" + uTypec2 + "-" + uTypec3 + "-uType5"); } if (flags.useWordUTypeConjunctions2) { features.Add(uTypep + charc + "putcc"); features.Add(charp + uTypec + "pccut"); } if (flags.useWordUTypeConjunctions3) { features.Add(uTypep2 + uTypep + charc + "p2utputcc"); features.Add(uTypep + charc + uTypec2 + "putccc2ut"); features.Add(charc + uTypec2 + uTypec3 + "ccc2utc3ut"); } if (flags.useUnicodeBlock) { features.Add(p.GetString <CoreAnnotations.UBlockAnnotation>() + "-" + c.GetString <CoreAnnotations.UBlockAnnotation>() + "-" + c2.GetString <CoreAnnotations.UBlockAnnotation>() + "-uBlock"); } if (flags.useShapeStrings) { if (flags.useShapeStrings1) { features.Add(p.GetString <CoreAnnotations.ShapeAnnotation>() + "ps"); features.Add(c.GetString <CoreAnnotations.ShapeAnnotation>() + "cs"); features.Add(c2.GetString <CoreAnnotations.ShapeAnnotation>() + "c2s"); } if (flags.useShapeStrings3) { features.Add(p.GetString <CoreAnnotations.ShapeAnnotation>() + c.GetString <CoreAnnotations.ShapeAnnotation>() + c2.GetString <CoreAnnotations.ShapeAnnotation>() + "pscsc2s"); } if (flags.useShapeStrings4) { features.Add(p2.GetString <CoreAnnotations.ShapeAnnotation>() + p.GetString <CoreAnnotations.ShapeAnnotation>() + c.GetString <CoreAnnotations.ShapeAnnotation>() + c2.GetString <CoreAnnotations.ShapeAnnotation>() + "p2spscsc2s"); } if (flags.useShapeStrings5) { features.Add(p2.GetString <CoreAnnotations.ShapeAnnotation>() + p.GetString <CoreAnnotations.ShapeAnnotation>() + c.GetString <CoreAnnotations.ShapeAnnotation>() + c2.GetString <CoreAnnotations.ShapeAnnotation>() + c3.GetString <CoreAnnotations.ShapeAnnotation >() + "p2spscsc2sc3s"); } if (flags.useWordShapeConjunctions2) { features.Add(p.GetString <CoreAnnotations.ShapeAnnotation>() + charc + "pscc"); features.Add(charp + c.GetString <CoreAnnotations.ShapeAnnotation>() + "pccs"); } if (flags.useWordShapeConjunctions3) { features.Add(p2.GetString <CoreAnnotations.ShapeAnnotation>() + p.GetString <CoreAnnotations.ShapeAnnotation>() + charc + "p2spscc"); features.Add(p.GetString <CoreAnnotations.ShapeAnnotation>() + charc + c2.GetString <CoreAnnotations.ShapeAnnotation>() + "psccc2s"); features.Add(charc + c2.GetString <CoreAnnotations.ShapeAnnotation>() + c3.GetString <CoreAnnotations.ShapeAnnotation>() + "ccc2sc3s"); } } /* * Radical N-gram features. N is upto 4. * Smoothing method of N-gram, because there are too many characters in Chinese. * (It works better than N-gram when they are used individually. less sparse) */ char rcharc; char rcharc2; char rcharp; char rcharp2; if (charc.Length == 0) { rcharc = 'n'; } else { rcharc = RadicalMap.GetRadical(charc[0]); } if (charc2.Length == 0) { rcharc2 = 'n'; } else { rcharc2 = RadicalMap.GetRadical(charc2[0]); } if (charp.Length == 0) { rcharp = 'n'; } else { rcharp = RadicalMap.GetRadical(charp[0]); } if (charp2.Length == 0) { rcharp2 = 'n'; } else { rcharp2 = RadicalMap.GetRadical(charp2[0]); } if (flags.useRad2) { features.Add(rcharc + "rc"); features.Add(rcharc2 + "rc2"); features.Add(rcharp + "rp"); features.Add(rcharp + rcharc + "rprc"); features.Add(rcharc + rcharc2 + "rcrc2"); features.Add(rcharp + rcharc + rcharc2 + "rprcrc2"); } if (flags.useRad2b) { features.Add(rcharc + "rc"); features.Add(rcharc2 + "rc2"); features.Add(rcharp + "rp"); features.Add(rcharp + rcharc + "rprc"); features.Add(rcharc + rcharc2 + "rcrc2"); features.Add(rcharp2 + rcharp + "rp2rp"); } /* Non-word dictionary: SEEN bi-gram marked as non-word. * This is frickin' useful. I hadn't realized. CDM Oct 2007. */ if (flags.useDict2) { NonDict2 nd = new NonDict2(flags); features.Add(nd.CheckDic(charp + charc, flags) + "nondict"); } if (flags.useOutDict2) { if (outDict == null) { CreateOutDict(); } features.Add(outDict.GetW(charp + charc) + "outdict"); // -1 0 features.Add(outDict.GetW(charc + charc2) + "outdict"); // 0 1 features.Add(outDict.GetW(charp2 + charp) + "outdict"); // -2 -1 features.Add(outDict.GetW(charp2 + charp + charc) + "outdict"); // -2 -1 0 features.Add(outDict.GetW(charp3 + charp2 + charp) + "outdict"); // -3 -2 -1 features.Add(outDict.GetW(charp + charc + charc2) + "outdict"); // -1 0 1 features.Add(outDict.GetW(charc + charc2 + charc3) + "outdict"); // 0 1 2 features.Add(outDict.GetW(charp + charc + charc2 + charc3) + "outdict"); } // -1 0 1 2 /* * (CTB/ASBC/HK/PK/MSR) POS information of each characters. * If a character falls into some function categories, * it is very likely there is a boundary. * A lot of Chinese function words belong to single characters. * This feature is also good for numbers and punctuations. * DE* are grouped into DE. */ if (flags.useCTBChar2 || flags.useASBCChar2 || flags.useHKChar2 || flags.usePKChar2 || flags.useMSRChar2) { string[] tagsets; // the "useChPos" now only works for CTB and PK if (flags.useChPos) { if (flags.useCTBChar2) { tagsets = new string[] { "AD", "AS", "BA", "CC", "CD", "CS", "DE", "DT", "ETC", "IJ", "JJ", "LB", "LC", "M", "NN", "NR", "NT", "OD", "P", "PN", "PU", "SB", "SP", "VA", "VC", "VE", "VV" }; } else { if (flags.usePKChar2) { //tagsets = new String[]{"r", "j", "t", "a", "nz", "l", "vn", "i", "m", "ns", "nr", "v", "n", "q", "Ng", "b", "d", "nt"}; tagsets = new string[] { "2", "3", "4" }; } else { throw new Exception("only support settings for CTB and PK now."); } } } else { //logger.info("Using Derived features"); tagsets = new string[] { "2", "3", "4" }; } if (taDetector == null) { CreateTADetector(); } foreach (string tag in tagsets) { features.Add(taDetector.CheckDic(tag + "p", charp) + taDetector.CheckDic(tag + "i", charp) + taDetector.CheckDic(tag + "s", charc) + taDetector.CheckInDic(charp) + taDetector.CheckInDic(charc) + tag + "prep-sufc"); } } //features.add("|ctbchar2"); /* * In error analysis, we found English words and numbers are often separated. * Rule 1: isNumber feature: check if the current and previous char is a number. * Rule 2: Disambiguation of time point and time duration. * Rule 3: isEnglish feature: check if the current and previous character is an english letter. * Rule 4: English name feature: check if the current char is a conjunct pu for English first and last name, since there is no space between two names. * Most of PUs are a good indicator for word boundary, but - and . is a strong indicator that there is no boundry within a previous , a follow char and it. */ if (flags.useRule2) { /* Reduplication features */ // previous character == current character if (charp.Equals(charc)) { features.Add("11-R2"); } // previous character == next character if (charp.Equals(charc2)) { features.Add("22-R2"); } // current character == next next character // fire only when usePk and useHk are both false. // Notice: this should be (almost) the same as the "22" feature, but we keep it for now. if (!flags.usePk && !flags.useHk) { if (charc.Equals(charc2)) { features.Add("33-R2"); } } char cur1 = ' '; char cur2 = ' '; char cur = ' '; char pre = ' '; // actually their length must be either 0 or 1 if (charc2.Length > 0) { cur1 = charc2[0]; } if (charc3.Length > 0) { cur2 = charc3[0]; } if (charc.Length > 0) { cur = charc[0]; } if (charp.Length > 0) { pre = charp[0]; } string prer = rcharp.ToString(); // the radical of previous character Pattern E = Pattern.Compile("[a-zA-Z]"); Pattern N = Pattern.Compile("[0-9]"); Matcher m = E.Matcher(charp); Matcher ce = E.Matcher(charc); Matcher pe = E.Matcher(charp2); Matcher cn = N.Matcher(charc); Matcher pn = N.Matcher(charp2); // if current and previous characters are numbers... if (cur >= '0' && cur <= '9' && pre >= '0' && pre <= '9') { if (cur == '9' && pre == '1' && cur1 == '9' && cur2 >= '0' && cur2 <= '9') { //199x features.Add("YR-R2"); } else { features.Add("2N-R2"); } } else { // if current and previous characters are not both numbers // but previous char is a number // i.e. patterns like "1N" , "2A", etc if (pre >= '0' && pre <= '9') { features.Add("1N-R2"); } else { // if previous character is an English character if (m.Matches()) { features.Add("E-R2"); } else { // if the previous character contains no radical (and it exist) if (prer.Equals(".") && charp.Length == 1) { if (ce.Matches()) { features.Add("PU+E-R2"); } if (pe.Matches()) { features.Add("E+PU-R2"); } if (cn.Matches()) { features.Add("PU+N-R2"); } if (pn.Matches()) { features.Add("N+PU-R2"); } features.Add("PU-R2"); } } } } string engType = IsEnglish(charp, charc); string engPU = IsEngPU(charp); if (!engType.Equals(string.Empty)) { features.Add(engType); } if (!engPU.Equals(string.Empty) && !engType.Equals(string.Empty)) { StringBuilder sb = new StringBuilder(); sb.Append(engPU).Append(engType).Append("R2"); features.Add(sb.ToString()); } } //end of use rule // features using "Character.getType" information! string origS = c.GetString <CoreAnnotations.OriginalCharAnnotation>(); char origC = ' '; if (origS.Length > 0) { origC = origS[0]; } int type = char.GetType(origC); switch (type) { case char.UppercaseLetter: case char.LowercaseLetter: { // A-Z and full-width A-Z // a-z and full-width a-z features.Add("CHARTYPE-LETTER"); break; } case char.DecimalDigitNumber: { features.Add("CHARTYPE-DECIMAL_DIGIT_NUMBER"); break; } case char.OtherLetter: { // mostly chinese chars features.Add("CHARTYPE-OTHER_LETTER"); break; } default: { // other types features.Add("CHARTYPE-MISC"); break; } } features.Add("cliqueCpC"); return(features); }
public virtual ICollection <string> FeaturesCpC(PaddedList <IN> cInfo, int loc) { ICollection <string> features = new List <string>(); CoreLabel c = cInfo[loc]; CoreLabel c1 = cInfo[loc + 1]; CoreLabel c2 = cInfo[loc + 2]; CoreLabel c3 = cInfo[loc + 3]; CoreLabel p = cInfo[loc - 1]; CoreLabel p2 = cInfo[loc - 2]; CoreLabel p3 = cInfo[loc - 3]; string charc = c.Get(typeof(CoreAnnotations.CharAnnotation)); if (charc == null) { charc = string.Empty; } string charc1 = c1.Get(typeof(CoreAnnotations.CharAnnotation)); if (charc1 == null) { charc1 = string.Empty; } string charc2 = c2.Get(typeof(CoreAnnotations.CharAnnotation)); if (charc2 == null) { charc2 = string.Empty; } string charc3 = c3.Get(typeof(CoreAnnotations.CharAnnotation)); if (charc3 == null) { charc3 = string.Empty; } string charp = p.Get(typeof(CoreAnnotations.CharAnnotation)); if (charp == null) { charp = string.Empty; } string charp2 = p2.Get(typeof(CoreAnnotations.CharAnnotation)); if (charp2 == null) { charp2 = string.Empty; } string charp3 = p3.Get(typeof(CoreAnnotations.CharAnnotation)); if (charp3 == null) { charp3 = string.Empty; } /* * N-gram features. N is upto 2. */ if (flags.useWord2) { // features.add(charc +"c"); // features.add(charc1+"c1"); // features.add(charp +"p"); // features.add(charp +charc +"pc"); // if( flags.useMsr ){ // features.add(charc +charc1 +"cc1"); // features.add(charp + charc1 +"pc1"); // } features.Add(charc + "::c"); features.Add(charc1 + "::c1"); features.Add(charp + "::p"); features.Add(charp2 + "::p2"); // trying to restore the features that Huishin described in SIGHAN 2005 paper features.Add(charc + charc1 + "::cn"); features.Add(charp + charc + "::pc"); features.Add(charp + charc1 + "::pn"); features.Add(charp2 + charp + "::p2p"); features.Add(charp2 + charc + "::p2c"); features.Add(charc2 + charc + "::n2c"); features.Add("|word2"); } /* * Radical N-gram features. N is upto 4. * Smoothing method of N-gram, because there are too many characters in Chinese. * (It works better than N-gram when they are used individually. less sparse) */ char rcharc; char rcharc1; char rcharc2; char rcharc3; char rcharp; char rcharp1; char rcharp2; char rcharp3; if (charc.Length == 0) { rcharc = 'n'; } else { rcharc = RadicalMap.GetRadical(charc[0]); } if (charc1.Length == 0) { rcharc1 = 'n'; } else { rcharc1 = RadicalMap.GetRadical(charc1[0]); } if (charc2.Length == 0) { rcharc2 = 'n'; } else { rcharc2 = RadicalMap.GetRadical(charc2[0]); } if (charc3.Length == 0) { rcharc3 = 'n'; } else { rcharc3 = RadicalMap.GetRadical(charc3[0]); } if (charp.Length == 0) { rcharp = 'n'; } else { rcharp = RadicalMap.GetRadical(charp[0]); } if (charp2.Length == 0) { rcharp2 = 'n'; } else { rcharp2 = RadicalMap.GetRadical(charp2[0]); } if (charp3.Length == 0) { rcharp3 = 'n'; } else { rcharp3 = RadicalMap.GetRadical(charp3[0]); } if (flags.useRad2) { features.Add(rcharc + "rc"); features.Add(rcharc1 + "rc1"); features.Add(rcharp + "rp"); features.Add(rcharp + rcharc + "rpc"); features.Add(rcharc + rcharc1 + "rcc1"); features.Add(rcharp + rcharc + rcharc1 + "rpcc1"); features.Add("|rad2"); } /* non-word dictionary:SEEM bi-gram marked as non-word */ if (flags.useDict2) { NonDict2 nd = new NonDict2(flags); features.Add(nd.CheckDic(charp + charc, flags) + "nondict"); features.Add("|useDict2"); } if (flags.useOutDict2) { if (outDict == null) { logger.Info("reading " + flags.outDict2 + " as a seen lexicon"); outDict = new CorpusDictionary(flags.outDict2, true); } features.Add(outDict.GetW(charp + charc) + "outdict"); // -1 0 features.Add(outDict.GetW(charc + charc1) + "outdict"); // 0 1 features.Add(outDict.GetW(charp2 + charp) + "outdict"); // -2 -1 features.Add(outDict.GetW(charp2 + charp + charc) + "outdict"); // -2 -1 0 features.Add(outDict.GetW(charp3 + charp2 + charp) + "outdict"); // -3 -2 -1 features.Add(outDict.GetW(charp + charc + charc1) + "outdict"); // -1 0 1 features.Add(outDict.GetW(charc + charc1 + charc2) + "outdict"); // 0 1 2 features.Add(outDict.GetW(charp + charc + charc1 + charc2) + "outdict"); } // -1 0 1 2 /* * (CTB/ASBC/HK/PK/MSR) POS information of each characters. * If a character falls into some function categories, * it is very likely there is a boundary. * A lot of Chinese function words belong to single characters. * This feature is also good for numbers and punctuations. * DE* are grouped into DE. */ if (flags.useCTBChar2 || flags.useASBCChar2 || flags.useHKChar2 || flags.usePKChar2 || flags.useMSRChar2) { string[] tagsets; // the "useChPos" now only works for CTB and PK if (flags.useChPos) { if (flags.useCTBChar2) { tagsets = new string[] { "AD", "AS", "BA", "CC", "CD", "CS", "DE", "DT", "ETC", "IJ", "JJ", "LB", "LC", "M", "NN", "NR", "NT", "OD", "P", "PN", "PU", "SB", "SP", "VA", "VC", "VE", "VV" }; } else { if (flags.usePKChar2) { //tagsets = new String[]{"r", "j", "t", "a", "nz", "l", "vn", "i", "m", "ns", "nr", "v", "n", "q", "Ng", "b", "d", "nt"}; tagsets = new string[] { "2", "3", "4" }; } else { throw new Exception("only support settings for CTB and PK now."); } } } else { //logger.info("Using Derived features"); tagsets = new string[] { "2", "3", "4" }; } if (taDetector == null) { taDetector = new TagAffixDetector(flags); } foreach (string tagset in tagsets) { features.Add(taDetector.CheckDic(tagset + "p", charp) + taDetector.CheckDic(tagset + "i", charp) + taDetector.CheckDic(tagset + "s", charc) + taDetector.CheckInDic(charp) + taDetector.CheckInDic(charc) + tagset + "prep-sufc"); } } // features.add("|ctbchar2"); // Added a constant feature several times!! /* * In error analysis, we found English words and numbers are often separated. * Rule 1: isNumber feature: check if the current and previous char is a number. * Rule 2: Disambiguation of time point and time duration. * Rule 3: isEnglish feature: check if the current and previous character is an english letter. * Rule 4: English name feature: check if the current char is a conjunct pu for English first and last name, since there is no space between two names. * Most of PUs are a good indicator for word boundary, but - and . is a strong indicator that there is no boundry within a previous , a follow char and it. */ if (flags.useRule2) { /* Reduplication features */ // previous character == current character if (charp.Equals(charc)) { features.Add("11"); } // previous character == next character if (charp.Equals(charc1)) { features.Add("22"); } // current character == next next character // fire only when usePk and useHk are both false. // Notice: this should be (almost) the same as the "22" feature, but we keep it for now. if (!flags.usePk && !flags.useHk) { if (charc.Equals(charc2)) { features.Add("33"); } } char cur1 = ' '; char cur2 = ' '; char cur = ' '; char pre = ' '; // actually their length must be either 0 or 1 if (charc1.Length > 0) { cur1 = charc1[0]; } if (charc2.Length > 0) { cur2 = charc2[0]; } if (charc.Length > 0) { cur = charc[0]; } if (charp.Length > 0) { pre = charp[0]; } string prer = rcharp.ToString(); // the radical of previous character Pattern E = Pattern.Compile("[a-zA-Z]"); Pattern N = Pattern.Compile("[0-9]"); Matcher m = E.Matcher(charp); Matcher ce = E.Matcher(charc); Matcher pe = E.Matcher(charp2); Matcher cn = N.Matcher(charc); Matcher pn = N.Matcher(charp2); // if current and previous characters are numbers... if (cur >= '0' && cur <= '9' && pre >= '0' && pre <= '9') { if (cur == '9' && pre == '1' && cur1 == '9' && cur2 >= '0' && cur2 <= '9') { //199x features.Add("YR"); } else { features.Add("2N"); } } else { // if current and previous characters are not both numbers // but previous char is a number // i.e. patterns like "1N" , "2A", etc if (pre >= '0' && pre <= '9') { features.Add("1N"); } else { // if previous character is an English character if (m.Matches()) { features.Add("E"); } else { // if the previous character contains no radical (and it exist) if (prer.Equals(".") && charp.Length == 1) { // fire only when usePk and useHk are both false. Not sure why. -pichuan if (!flags.useHk && !flags.usePk) { if (ce.Matches()) { features.Add("PU+E"); } if (pe.Matches()) { features.Add("E+PU"); } if (cn.Matches()) { features.Add("PU+N"); } if (pn.Matches()) { features.Add("N+PU"); } } features.Add("PU"); } } } } string engType = IsEnglish(charp, charc); string engPU = IsEngPU(charp); if (!engType.Equals(string.Empty)) { features.Add(engType); } if (!engPU.Equals(string.Empty) && !engType.Equals(string.Empty)) { features.Add(engPU + engType); } } //end of use rule // features using "Character.getType" information! string origS = c.Get(typeof(CoreAnnotations.OriginalCharAnnotation)); char origC = ' '; if (origS.Length > 0) { origC = origS[0]; } int type = char.GetType(origC); switch (type) { case char.UppercaseLetter: case char.LowercaseLetter: { // A-Z and full-width A-Z // a-z and full-width a-z features.Add("CHARTYPE-LETTER"); break; } case char.DecimalDigitNumber: { features.Add("CHARTYPE-DECIMAL_DIGIT_NUMBER"); break; } case char.OtherLetter: { // mostly chinese chars features.Add("CHARTYPE-OTHER_LETTER"); break; } default: { // other types features.Add("CHARTYPE-MISC"); break; } } return(features); }
public virtual ICollection <string> MakeFeatures(string word) { IList <string> features = new List <string>(); if (morpho) { foreach (KeyValuePair <string, ICollection <char> > e in cmfs.GetSingletonFeatures()) { if (e.Value.Contains(word[0])) { features.Add(e.Key + "-1"); } } // Hooray for generics!!! :-) foreach (KeyValuePair <string, Pair <ICollection <char>, ICollection <char> > > e_1 in cmfs.GetAffixFeatures()) { bool both = false; if (e_1.Value.First().Contains(word[0])) { features.Add(e_1.Key + "-P"); both = true; } if (e_1.Value.Second().Contains(word[word.Length - 1])) { features.Add(e_1.Key + "-S"); } else { both = false; } if (both && mildConjunctions && !conjunctions) { features.Add(e_1.Key + "-PS"); } } if (conjunctions) { int max = features.Count; for (int i = 1; i < max; i++) { string s1 = features[i]; for (int j = 0; j < i; j++) { string s2 = features[j]; features.Add(s1 + "&&" + s2); } } } } if (!turnOffWordFeatures) { features.Add(word + "-W"); } if (rads) { features.Add(RadicalMap.GetRadical(word[0]) + "-FR"); features.Add(RadicalMap.GetRadical(word[word.Length - 1]) + "-LR"); for (int i = 0; i < word.Length; i++) { features.Add(RadicalMap.GetRadical(word[i]) + "-CR"); } } if (chars) { // first and last chars features.Add(word[0] + "-FC"); features.Add(word[word.Length - 1] + "-LC"); for (int i = 0; i < word.Length; i++) { features.Add(word[i] + "-CC"); } if (bigrams && word.Length > 1) { features.Add(Sharpen.Runtime.Substring(word, 0, 2) + "-FB"); features.Add(Sharpen.Runtime.Substring(word, word.Length - 2) + "-LB"); for (int i_1 = 2; i_1 <= word.Length; i_1++) { features.Add(Sharpen.Runtime.Substring(word, i_1 - 2, i_1) + "-CB"); } } } if (useLength) { int lengthBin = word.Length; if (lengthBin >= 5) { if (lengthBin >= 8) { lengthBin = 8; } else { lengthBin = 5; } } features.Add(word.Length + "-L"); } if (useFreq && !turnOffWordFeatures) { int freq = wordCounter.GetIntCount(word); int freqBin; if (freq <= 1) { freqBin = 0; } else { if (freq <= 3) { freqBin = 1; } else { if (freq <= 6) { freqBin = 2; } else { if (freq <= 15) { freqBin = 3; } else { if (freq <= 50) { freqBin = 4; } else { freqBin = 5; } } } } } features.Add(freqBin + "-FQ"); } features.Add("PR"); if (threshedFeatures != null) { for (IEnumerator <string> iter = features.GetEnumerator(); iter.MoveNext();) { string s = iter.Current; if (!threshedFeatures.Contains(s)) { iter.Remove(); } } } return(features); }