/// <summary> /// Constructor of Speech page /// </summary> public SpeechSettingsPage() { _oSelectedSpeechCommand = null; InitializeComponent(); App.ShowToast(AppResources.Speech_register); swEnableSpeech.IsToggled = App.AppSettings.SpeechEnabled; swEnableSpeech.Toggled += async(sender, args) => { App.AppSettings.SpeechEnabled = swEnableSpeech.IsToggled; if (swEnableSpeech.IsToggled) { if (!await ValidateSpeechRecognition()) { swEnableSpeech.IsToggled = false; } } }; _oListSource = App.AppSettings.SpeechCommands; if (_oListSource != null) { listView.ItemsSource = _oListSource; } }
public IActionResult SpeectTextFromRequest(string text) { SpeechModel model = new SpeechModel(); model.Text = text; return(SpeectTextFromPost(model)); }
public string ConvertTextFromRequest(string text) { SpeechModel model = new SpeechModel(); model.Text = text; return(ConvertTextFromPost(model)); }
/// <summary> /// Connect device to Speech Command /// </summary> private async void btnConnect_Clicked(object sender, EventArgs e) { _oSelectedSpeechCommand = (SpeechModel)((TintedCachedImage)sender).BindingContext; var oSwitchPopup = new SwitchPopup(); oSwitchPopup.DeviceSelectedMethod += DelegateMethod; await PopupNavigation.Instance.PushAsync(oSwitchPopup); }
public string ConvertTextFromRequest(string text) { var model = new SpeechModel { Text = text }; return(ConvertTextFromPost(model)); }
public IActionResult SpeectTextFromRequest(string text) { var model = new SpeechModel { Text = text }; return(SpeectTextFromPost(model)); }
public IActionResult SpeectTextFromPost([FromBody] SpeechModel speech_model) { Setting.Lock(); try { if (speech_model.SpeakerSetting != null && speech_model.SpeakerSetting.VoiceDbName.Length > 0 && speech_model.SpeakerSetting.SpeakerName.Length > 0 && AitalkWrapper.Parameter.CurrentSpeakerName != speech_model.SpeakerSetting.SpeakerName) { var error_message = Setting.ApplySpeakerSetting(speech_model.SpeakerSetting); if (error_message != null) { return(BadRequest($"Saved but {error_message}")); } } // 話者パラメータを設定する var speaker = speech_model.Speaker ?? new SpeakerModel(); AitalkWrapper.Parameter.VoiceVolume = (0 <= speaker.Volume) ? speaker.Volume : Setting.DefaultSpeakerParameter.Volume; AitalkWrapper.Parameter.VoiceSpeed = (0 <= speaker.Speed) ? speaker.Speed : Setting.DefaultSpeakerParameter.Speed; AitalkWrapper.Parameter.VoicePitch = (0 <= speaker.Pitch) ? speaker.Pitch : Setting.DefaultSpeakerParameter.Pitch; AitalkWrapper.Parameter.VoiceEmphasis = (0 <= speaker.Emphasis) ? speaker.Emphasis : Setting.DefaultSpeakerParameter.Emphasis; AitalkWrapper.Parameter.PauseMiddle = (0 <= speaker.PauseMiddle) ? speaker.PauseMiddle : Setting.DefaultSpeakerParameter.PauseMiddle; AitalkWrapper.Parameter.PauseLong = (0 <= speaker.PauseLong) ? speaker.PauseLong : Setting.DefaultSpeakerParameter.PauseLong; AitalkWrapper.Parameter.PauseSentence = (0 <= speaker.PauseSentence) ? speaker.PauseSentence : Setting.DefaultSpeakerParameter.PauseSentence; // テキストが与えられた場合は仮名に変換する string kana = null; if ((speech_model.Kana != null) && (0 < speech_model.Kana.Length)) { kana = speech_model.Kana; } else if ((speech_model.Text != null) && (0 < speech_model.Text.Length)) { kana = AitalkWrapper.TextToKana(speech_model.Text, Setting.System.KanaTimeout); } if ((kana == null) || (kana.Length <= 0)) { return(new NoContentResult()); } // 音声変換して結果を返す var wave_stream = new MemoryStream(); AitalkWrapper.KanaToSpeech(kana, wave_stream, Setting.System.SpeechTimeout); return(new FileContentResult(wave_stream.ToArray(), "audio/wav")); } catch (Exception) { return(new NoContentResult()); } finally { Setting.Unlock(); } }
public async Task <JsonResult> GetResponseGladysAsync(SpeechModel speech) { var reponse = await _client.GetAsync(_urLLuis + speech.Text); var result = await reponse.Content.ReadAsStringAsync(); var resultObject = JsonConvert.DeserializeObject <WebServices.Models.LuisResult>(result); return(new JsonResult()); }
private SpeechModel[] ParseGetModelsResponse(byte[] data) { string jsonString = Encoding.UTF8.GetString(data); if (jsonString == null) { Log.Error("SpeechToText", "Failed to get JSON string from response."); return(null); } IDictionary json = (IDictionary)Json.Deserialize(jsonString); if (json == null) { Log.Error("SpechToText", "Failed to parse JSON: {0}", jsonString); return(null); } try { List <SpeechModel> models = new List <SpeechModel>(); IList imodels = json["models"] as IList; if (imodels == null) { throw new Exception("Expected IList"); } foreach (var m in imodels) { IDictionary imodel = m as IDictionary; if (imodel == null) { throw new Exception("Expected IDictionary"); } SpeechModel model = new SpeechModel(); model.Name = (string)imodel["name"]; model.Rate = (long)imodel["rate"]; model.Language = (string)imodel["language"]; model.Description = (string)imodel["description"]; model.URL = (string)imodel["url"]; models.Add(model); } return(models.ToArray()); } catch (Exception e) { Log.Error("SpeechToText", "Caught exception {0} when parsing GetModels() response: {1}", e.ToString(), jsonString); } return(null); }
public ActionResult Index(SpeechModel speechModel) { ViewBag.Key = speechModel.SubscriptionKey; Authentication obj = new Authentication(ViewBag.Key); ViewBag.Content = speechModel.Content; ViewBag.LangCode = speechModel.LanguageCode; ViewBag.Token = obj.GetAccessToken(); return(View(speechModel)); }
public static LanguageModel CreateLanguageModel(SpeechModel model, byte[] languageFile) { var languageModel = new LanguageModel() { name = model.name, description = model.description, locale = model.locale, dataImportKind = "Language", properties = model.properties, languagedata = languageFile }; return(languageModel); }
/// <summary> /// Create new Speech object /// </summary> private void AddNewRecord(string speechID, string speechText) { App.ShowToast(AppResources.Speech_saved + " " + speechText); var speechObject = new SpeechModel() { Id = speechID, Name = speechText, Enabled = true, }; _oListSource.Add(speechObject); SaveAndRefresh(); App.ShowToast(AppResources.noSwitchSelected_explanation_Speech); }
public static AcousticModel CreateAcousticModel(SpeechModel model, byte[] zipFile, byte[] transcriptionFile) { var acousticModel = new AcousticModel() { name = model.name, description = model.description, locale = model.locale, dataImportKind = "Acoustic", properties = model.properties, audiodata = zipFile, transcriptions = transcriptionFile }; return(acousticModel); }
public ResultModel <SynthesizeSpeechResponse> DownloadWord(SpeechModel model) { if (!File.Exists("wwwroot/assets/speeches/")) { Directory.CreateDirectory("wwwroot/assets/speeches/"); } Environment.SetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS", "./speech-key.json"); try { var client = TextToSpeechClient.Create(); // The input to be synthesized, can be provided as text or SSML. var input = new SynthesisInput { Text = model.Text }; // Build the voice request. var voiceSelection = new VoiceSelectionParams { LanguageCode = model.LanguageCode, SsmlGender = model.Gender }; // Specify the type of audio file. var audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; // Perform the text-to-speech request. return(new ResultModel <SynthesizeSpeechResponse> { Data = client.SynthesizeSpeech(input, voiceSelection, audioConfig), Success = true }); } catch (Exception ex) { notFoundLanguageCodes.Add(new NotFoundLanguageCode { Code = model.LanguageCode, ErrorMessage = ex.Message }); return(new ResultModel <SynthesizeSpeechResponse> { Success = false, ErrorMessage = ex.Message }); } }
public SpeechModel GetVoicFromText(SpeechModel model) { model.Code = Guid.NewGuid(); try { var response = new ResultModel <SynthesizeSpeechResponse>(); if (notFoundLanguageCodes.Any(x => x.Code == model.LanguageCode)) { response = new ResultModel <SynthesizeSpeechResponse> { Success = false, ErrorMessage = notFoundLanguageCodes.Find(x => x.Code == model.LanguageCode).ErrorMessage }; } else { response = DownloadWord(model); } if (response.Success) { // Write the response to the output file. using FileStream output = File.Create("wwwroot/assets/speeches/" + model.Code + ".mp3"); response.Data.AudioContent.WriteTo(output); model.Status = SpeechStatus.Success; } else { model.ErrorMessage = response.ErrorMessage; model.Status = SpeechStatus.Error; } } catch (Exception ex) { model.Status = SpeechStatus.Error; model.ErrorMessage = ex.Message; } model.AddedDate = DateTime.Now; model.Id = db.Query <int>(@" insert into [dbo].[speech] (Code, AddedDate, Status, ErrorMessage) VALUES (@Code, @AddedDate, @Status, @ErrorMessage); SELECT CAST(SCOPE_IDENTITY() as int) ", model).SingleOrDefault(); return(model); }
public string ConvertTextFromPost([FromBody] SpeechModel speech_model) { Setting.Lock(); try { if ((speech_model.Text == null) || (speech_model.Text.Length <= 0)) { return(null); } return(AitalkWrapper.TextToKana(speech_model.Text, Setting.System.KanaTimeout)); } catch (Exception) { return(null); } finally { Setting.Unlock(); } }
private void OnGetModels(SpeechModel [] models) { if (models != null) { SpeechModel bestModel = null; foreach (var model in models) { if (model.Language.StartsWith(m_Language) && (bestModel == null || model.Rate > bestModel.Rate)) { bestModel = model; } } if (bestModel != null) { Log.Status("SpeechToTextWidget", "Selecting Recognize Model: {0} ", bestModel.Name); m_SpeechToText.RecognizeModel = bestModel.Name; } } }
public ActionResult Index(SpeechModel speechModel, string ArticleUrl) { try { string accessToken; //Extract contents from an article tc.TrackTrace("Extract contents from article"); ArticleExtraction articleExtraction = new ArticleExtraction(); StringBuilder articleExtract = new StringBuilder(); articleExtract.Append(articleExtraction.ExtractArticle(ArticleUrl)); //Add end of article note articleExtract.AppendLine("This article is over. Thanks for listening."); //ErrorLogger.Debug("Article extract: " + articleExtract); //ErrorLogger.Debug("Get access token"); tc.TrackTrace("Article extract: " + articleExtract); tc.TrackTrace("Get access token"); //Get access token for cognitive speech API Authentication auth = new Authentication(); accessToken = auth.GetAccessToken(); ViewBag.Content = articleExtract.ToString(); ViewBag.Token = accessToken; ViewBag.Key = speechModel.SubscriptionKey; ViewBag.LocaleCode = speechModel.LocaleCode; //return View("ReadArticle", speechModel); return(View(speechModel)); } catch (Exception ex) { //return RedirectToAction("Index"); //ErrorLogger.Debug(ex.Message); //ErrorLogger.Debug(ex.StackTrace); //ErrorLogger.Debug(ex.InnerException.ToString()); tc.TrackException(ex); return(View("Error")); } }
private static Dictionary <string, object> ModelToDictionary(SpeechModel model) { Dictionary <string, object> postParameters = new Dictionary <string, object>(); foreach (var propertyInfo in model.GetType().GetProperties(BindingFlags.Public | BindingFlags.Instance)) { var param = propertyInfo.GetValue(model); var fileName = string.Empty; switch (propertyInfo.Name) { case "languagedata": case "transcriptions": var textFile = (byte[])param; fileName = propertyInfo.Name; var textFileParameter = new FileParameter(textFile, fileName, "text/plain"); postParameters.Add(propertyInfo.Name, textFileParameter); break; case "audiodata": var zipFile = (byte[])param; fileName = propertyInfo.Name; var zipFileParameter = new FileParameter(zipFile, fileName, "application/x-zip-compressed"); postParameters.Add(propertyInfo.Name, zipFileParameter); break; case "properties": var jsonProperties = JsonConvert.SerializeObject(param); postParameters.Add(propertyInfo.Name, jsonProperties); break; default: // it's just a string/string key value pair postParameters.Add(propertyInfo.Name, param); break; } } return(postParameters); }
public SpeechModel GetModel(string modelName) { if (string.IsNullOrEmpty(modelName)) { throw new ArgumentNullException("modelName can not be null or empty"); } SpeechModel result = null; try { result = this.Client.WithAuthentication(this.UserName, this.Password) .GetAsync($"{this.Endpoint}{PATH_MODELS}/{modelName}") .As <SpeechModel>() .Result; } catch (AggregateException ae) { throw ae.InnerException as ServiceResponseException; } return(result); }
//ASR: Automatic Speech Recognition public AutomaticSpeechRecognition(SpeechModel speechModel) { this.speechModel = speechModel; }
public ActionResult Index() { SpeechModel speechModel = new SpeechModel(); return(View(speechModel)); }
/// <summary> /// Create new speech recognition options. /// </summary> /// <param name="speechModel">The language model to use for recognition.</param> /// <param name="audioFormat">The audio format.</param> /// <param name="language">The language for speech recognition.</param> public SpeechRecognitionOptions(SpeechModel speechModel, RecognitionAudioFormat audioFormat, RecognitionLanguage language) { SpeechModel = speechModel; AudioFormat = audioFormat; Language = language; }
/// <summary> /// Create new speech recognition session options. /// </summary> /// <param name="speechModel">The language model to use for recognition.</param> /// <param name="audioFormat">The audio format.</param> /// <param name="connectionMode">Network security settings.</param> public SpeechRecognitionSessionOptions(SpeechModel speechModel, RecognitionAudioFormat audioFormat, ConnectionMode connectionMode = ConnectionMode.Secure) { SpeechModel = speechModel; AudioFormat = audioFormat; ConnectionMode = connectionMode; }