private Grammar CreateGrammar2D() { SrgsDocument doc = new SrgsDocument(); doc.PhoneticAlphabet = SrgsPhoneticAlphabet.Ups; SrgsRule rule = new SrgsRule("check"); SrgsOneOf oneOf = new SrgsOneOf(); int count = phonems.Length; for (int i = 0; i < count; i++) { for (int i2 = 0; i2 < count; i2++) { string text = phonems[i] + phonems[i2]; string pronunciation = phonems[i] + " " + phonems[i2]; SrgsToken token = new SrgsToken(text); token.Pronunciation = pronunciation; SrgsItem item = new SrgsItem(token); oneOf.Add(item); } } rule.Add(oneOf); doc.Rules.Add(rule); doc.Root = rule; doc.Culture = System.Globalization.CultureInfo.GetCultureInfo("en-US"); Grammar grammar = new Grammar(doc); return(grammar); }
public Grammar[] CreateGrammar_Basic2DX() { List <Grammar> grammars = new List <Grammar>(); List <List <string> > words = new List <List <string> >(); int count = phonems_basic.Length; for (int i = 0; i < count; i++) { SrgsDocument doc = new SrgsDocument(); doc.PhoneticAlphabet = SrgsPhoneticAlphabet.Ups; SrgsRule rule = new SrgsRule("basic2D" + i); SrgsOneOf oneOf = new SrgsOneOf(); for (int i2 = 0; i2 < count; i2++) { string text = phonems_basic[i] + phonems_basic[i2]; string pronunciation = phonems_basic[i] + " " + phonems_basic[i2]; SrgsToken token = new SrgsToken(text); token.Pronunciation = pronunciation; SrgsItem item = new SrgsItem(token); oneOf.Add(item); } rule.Add(oneOf); doc.Rules.Add(rule); doc.Root = rule; doc.Culture = System.Globalization.CultureInfo.GetCultureInfo("en-US"); Grammar grammar = new Grammar(doc); grammars.Add(grammar); } return(grammars.ToArray()); }
private static void LoadSRGSGrammar() { //builder = new GrammarBuilder(Choices); //document = new SrgsDocument(builder); //grammar = new Grammar(document); // Create an SrgsDocument, create a new rule and set its scope to public. document = new SrgsDocument { PhoneticAlphabet = SrgsPhoneticAlphabet.Ups }; //document.PhoneticAlphabet = SrgsPhoneticAlphabet.Sapi; //SrgsRule wordRule = (new SrgsRule("word", new SrgsElement[] { oneOfWord })); SrgsRule wordRule = new SrgsRule("word") { Scope = SrgsRuleScope.Public }; SrgsOneOf oneOfWord = new SrgsOneOf(new SrgsItem[] { new SrgsItem("aardvark"), new SrgsItem("beaver"), new SrgsItem("cheetah") }); SrgsItem wordItem = new SrgsItem(); SrgsToken token = new SrgsToken("whatchamacallit") { Pronunciation = "W AE T CH AE M AE K AA L IH T" }; wordItem.Add(token); //oneOfWord = new SrgsOneOf(); oneOfWord.Add(wordItem); wordRule.Add(oneOfWord); //// Create the rule from the SrgsOneOf objects. //SrgsRule slangRule = new SrgsRule("slang", wordItem); //// Build an SrgsDocument object from the rule and set the phonetic alphabet. //SrgsDocument tokenPron = new SrgsDocument(slangRule); //// Create a Grammar object from the SrgsDocument and load it to the recognizer. //Grammar slangGrammar = new Grammar(tokenPron); //slangGrammar.Name = ("Slang Pronunciation"); //RecEngine.LoadGrammarAsync(slangGrammar); //// Add references to winnerRule for ruleEurope. //wordRule.Elements.Add(new SrgsOneOf(new SrgsItem[] {(new SrgsItem (new SrgsRuleRef(ruleEurope)))})); // Add all the rules to the document and set the root rule of the document. document.Rules.Add(new SrgsRule[] { wordRule }); document.Root = wordRule; Grammar grammar = new Grammar(document); RecEngine.LoadGrammar(grammar); }
/// <summary> /// Parse Token /// </summary> private IToken ParseToken(SrgsToken srgsToken, IElement parent) { return(_parser.CreateToken(parent, srgsToken.Text, srgsToken.Pronunciation, srgsToken.Display, -1)); }
/// <summary> /// throughout the algorithm's passes the grammar is updated with the received prefix phonemes /// </summary> /// <param name="prefixes">phonemes to be added to the front</param> /// <param name="doc">the previous grammar</param> /// <param name="passNum">the current pass number</param> /// <param name="preReadPath">location of an additional grammar file which is smaller to improve computation time</param> /// <returns> /// an updated grammar /// </returns> public static SrgsDocument updateGrammar(List <String> prefixes, SrgsDocument doc, int passNum) { Console.WriteLine("Prefixing grammar with phonemes from pass {0}...", passNum - 1); Debug.WriteLine(String.Format("Grammar language is {0}", doc.Culture)); // read prefix wildcard from text file //(this is a smaller wildcard with just 1 and 2 phonemes which, with the prefix, makes up the first word of superwildcard grammar) //string prefixWildcardFile = lex4all.Properties.Resources.en_US_prefixwildcard; //string[] words = prefixWildcardFile.Split(new string[] { "\r\n" }, StringSplitOptions.RemoveEmptyEntries); string[] words = prefixWildcardFile.Split(new char[0], StringSplitOptions.RemoveEmptyEntries); Debug.WriteLine(String.Format("After split, words has length {0}", words.Length)); // set up basic wildcard oneof SrgsOneOf prefixOneOf = new SrgsOneOf(); // make grammar item/token for prefix + each wildcard "word" foreach (string prefix in prefixes) { SrgsToken prefixToken = new SrgsToken(prefix.Replace(" ", ".")); prefixToken.Pronunciation = prefix; SrgsItem prefixItem = new SrgsItem(); prefixItem.Add(prefixToken); prefixOneOf.Add(prefixItem); foreach (string word in words) { if (word.Contains("\n")) { Console.WriteLine("Found a newline in line {0}", word); break; } string pron = prefix + " " + word; string text = pron.Replace(" ", "."); SrgsToken thisToken = new SrgsToken(text); thisToken.Pronunciation = pron; SrgsItem thisItem = new SrgsItem(); thisItem.Add(thisToken); prefixOneOf.Add(thisItem); } } // create grammar rules SrgsRule prefixRule = new SrgsRule("Prefixes" + (passNum - 1).ToString()); prefixRule.Scope = SrgsRuleScope.Public; SrgsItem prefItem = new SrgsItem(prefixOneOf); prefixRule.Elements.Add(prefItem); SrgsRuleRef prefRef = new SrgsRuleRef(prefixRule); SrgsRule wildRule = doc.Rules["Wildcard"]; SrgsRuleRef wildRef = new SrgsRuleRef(wildRule); SrgsRule newSuperRule = new SrgsRule("SuperWildcard" + (passNum - 1).ToString()); newSuperRule.Scope = SrgsRuleScope.Public; newSuperRule.Elements.Add(new SrgsItem(prefRef)); SrgsItem newSuperItem = new SrgsItem(0, 9); newSuperItem.Add(wildRef); newSuperRule.Elements.Add(newSuperItem); // update document by adding prefixed rules doc.Rules.Clear(); doc.Rules.Add(new SrgsRule[] { newSuperRule, prefixRule, wildRule }); doc.Root = newSuperRule; // report Console.WriteLine("Done."); return(doc); }
/// <summary> /// is used by the algorithm to build the super wildcard grammar /// </summary> /// <param name="readPath"> /// location of the wildcard.txt file which contains all possible phoneme combinations /// </param> /// <returns> /// a document object representing the grammar /// </returns> public static SrgsDocument getInitialGrammar() { Console.WriteLine("Building initial grammar..."); // set up basic wildcard rule SrgsOneOf wildOneOf = new SrgsOneOf(); // read phoneme wildcard from text file. all combinations are then added to the basic rule // string[] sfdasf = System.IO.File.ReadAllLines(readPath); // StreamReader rd = new StreamReader(readPath); // string allWords = rd.ReadToEnd(); //string wildcardFile = lex4all.Properties.Resources.en_US_wildcard; //string[] words = wildcardFile.Split(new string[] {"\r\n"}, StringSplitOptions.RemoveEmptyEntries); string[] words = wildcardFile.Split(new char[0], StringSplitOptions.RemoveEmptyEntries); Debug.WriteLine(String.Format("After split, words has length {0}", words.Length)); foreach (string word in words) { if (word.Contains("\n")) { Debug.WriteLine(String.Format("Found a newline in line {0}", word)); break; } else { // make grammar item/token for each wildcard "word" string pron = word; string text = word.Replace(" ", "."); SrgsToken thisToken = new SrgsToken(text); thisToken.Pronunciation = pron; SrgsItem thisItem = new SrgsItem(); thisItem.Add(thisToken); wildOneOf.Add(thisItem); Debug.WriteLine(String.Format("Wrote {0} to wildOneOf", word)); } } // create grammar rules SrgsRule wildRule = new SrgsRule("Wildcard"); wildRule.Scope = SrgsRuleScope.Public; wildRule.Elements.Add(wildOneOf); SrgsRule superRule = new SrgsRule("SuperWildcard"); superRule.Scope = SrgsRuleScope.Public; SrgsRuleRef wildRef = new SrgsRuleRef(wildRule); SrgsItem superItem = new SrgsItem(0, 10); superItem.Add(wildRef); superRule.Elements.Add(superItem); // create document and add rules SrgsDocument gramDoc = new SrgsDocument(); //Dynamically allocate the correct phonetic alphabet depending on the language of choice if (EngineControl.Language.Equals("zh-CN") || EngineControl.Language.Equals("de-DE")) { gramDoc.PhoneticAlphabet = SrgsPhoneticAlphabet.Sapi; phoneticAlphabet = "sapi"; } else if (EngineControl.Language.Equals("ja-JP")) { gramDoc.PhoneticAlphabet = SrgsPhoneticAlphabet.Ipa; phoneticAlphabet = "ipa"; } else { gramDoc.PhoneticAlphabet = SrgsPhoneticAlphabet.Ups; // This is the default phonetic alphabet setting phoneticAlphabet = "ups"; } gramDoc.Culture = new System.Globalization.CultureInfo(EngineControl.Language); gramDoc.Rules.Add(new SrgsRule[] { superRule, wildRule }); gramDoc.Root = superRule; // report Console.WriteLine(String.Format("Done. Grammar language is {0}", gramDoc.Culture)); // output initial grammar Console.WriteLine(""); return(gramDoc); }