Esempio n. 1
0
        /// <summary>
        /// Listen and perform the repeat using default voice and language fields
        /// </summary>
        /// <returns></returns>
        private async Task Repeat()
        {
            try
            {
                _assetWrapper.ShowSystemImage(SystemImage.JoyGoofy2);
                _assetWrapper.PlaySystemSound(SystemSound.SystemWakeWord);
                await _misty.StartRecordingAudioAsync("DefaultAzureAudio.wav");

                _misty.Wait(5000);

                await _misty.StopRecordingAudioAsync();

                _assetWrapper.PlaySystemSound(SystemSound.SystemSuccess);
                _misty.Wait(500);

                //Test for mic issue... this uses a file vs. recording
                //TranslationRecognitionResult description = await _azureCognitive.TranslateAudioFile("TestMe.wav", "en-US", new List<string> { "en" }, true);

                //Else use recorded audio
                IGetAudioResponse audioResponse = await _misty.GetAudioAsync("DefaultAzureAudio.wav", false);

                TranslationRecognitionResult description = await _azureCognitive.TranslateAudioStream((byte[])audioResponse.Data.Audio, _fromDefaultLanguage, new List <string> {
                    _toDefaultLanguage
                });

                BroadcastDetails($"You said {(description?.Text == null ? "something, but I failed to process it." : $",{description.Text}")}", _defaultVoice);

                _assetWrapper.ShowSystemImage(SystemImage.ContentLeft);
            }
Esempio n. 2
0
        /// <summary>
        /// Listen and perform the translation using foreign voice and language fields
        /// </summary>
        private async void Translate()
        {
            try
            {
                _assetWrapper.PlaySystemSound(SystemSound.SystemWakeWord);
                _assetWrapper.ShowSystemImage(SystemImage.Joy);
                await _misty.StartRecordingAudioAsync("TranslateAzureAudio.wav");

                _misty.Wait(5000);

                await _misty.StopRecordingAudioAsync();

                _assetWrapper.PlaySystemSound(SystemSound.SystemSuccess);
                _misty.Wait(500);

                IGetAudioResponse audioResponse = await _misty.GetAudioAsync("TranslateAzureAudio.wav", false);

                TranslationRecognitionResult description = await _azureCognitive.TranslateAudioStream((byte[])audioResponse.Data.Audio, _fromForeignLanguage, new List <string> {
                    _toForeignLanguage
                });

                if (description.Translations.Any())
                {
                    string translation = description.Translations.FirstOrDefault(x => x.Key == _toForeignLanguage).Value;

                    //Sometimes comes back without region
                    if (string.IsNullOrWhiteSpace(translation))
                    {
                        string toLanguage2 = _toForeignLanguage.Split('-')[0];
                        translation = description.Translations.FirstOrDefault(x => x.Key == toLanguage2).Value;
                    }

                    if (string.IsNullOrWhiteSpace(translation))
                    {
                        BroadcastDetails("I am unable to translate this.", _defaultVoice);
                    }
                    else
                    {
                        BroadcastDetails(translation, _foreignVoice);
                    }
                }

                _assetWrapper.ShowSystemImage(SystemImage.ContentRight);
            }
            catch (Exception ex)
            {
                _misty.SkillLogger.Log("Failed to translate the audio input.", ex);
            }
        }
Esempio n. 3
0
        static async Task Translate(string targetLanguage)
        {
            string translation = "";

            // Translate speech
            string      audioFile = "station.wav";
            SoundPlayer wavPlayer = new SoundPlayer(audioFile);

            // Play the audio synchronously, otherwise the translation will interrupt it
            wavPlayer.PlaySync();
            using AudioConfig audioConfig          = AudioConfig.FromWavFileInput(audioFile);
            using TranslationRecognizer translator = new TranslationRecognizer(translationConfig, audioConfig);

            // Add an inline event handler to handle the Synthesizing event and output the audio to the current output device
            translator.Synthesizing += (_, e) =>
            {
                var audio = e.Result.GetAudio();
                if (audio.Length > 0)
                {
                    // Output to a file using File.WriteAllBytes("YourAudioFile.wav", audio);
                    // Or place the data into a stream and play to the current output device
                    using (System.IO.MemoryStream ms = new System.IO.MemoryStream(audio))
                    {
                        // Construct the sound player
                        SoundPlayer player = new SoundPlayer(ms);
                        player.Play();
                    }
                }
            };
            Console.WriteLine("Getting speech from file...");
            TranslationRecognitionResult result = await translator.RecognizeOnceAsync();

            Console.WriteLine($"Translating '{result.Text}'");
            translation            = result.Translations[targetLanguage];
            Console.OutputEncoding = Encoding.UTF8;
            Console.WriteLine(translation);
        }