public static async Task <String> TranslationContinuousRecognitionAsync()
        {
            var    config       = SpeechTranslationConfig.FromSubscription("a8c500c9ef9f421e977486a17f0adcca", "westeurope");
            string fromLanguage = "pl-PL";

            config.SpeechRecognitionLanguage = fromLanguage;
            config.AddTargetLanguage("pl");
            string result = "";
            // Sets voice name of synthesis output.
            const string PolishVoice = "pl-PL";

            config.VoiceName = PolishVoice;
            // Creates a translation recognizer using microphone as audio input.
            using (var recognizer = new TranslationRecognizer(config))
            {
                // Subscribes to events.


                recognizer.Recognized += async(s, e) =>
                {
                    if (e.Result.Reason == ResultReason.TranslatedSpeech)
                    {
                        await recognizer.StopContinuousRecognitionAsync();

                        result = e.Result.Text;
                    }
                };
                return(result);
            }
        }
Example #2
0
        private SpeechTranslationConfig GetSpeechClient()
        {
            var subscriptionKey = configurationReader.GetKey();
            var region          = configurationReader.GetRegion();

            return(SpeechTranslationConfig.FromSubscription(subscriptionKey, region));
        }
        public AzureCognitiveService(IDictionary <AzureServiceType, AzureServiceAuthorization> servicesAuthorization, ISDKLogger logger)
        {
            _logger = logger;
            _servicesAuthorization = servicesAuthorization;

            foreach (KeyValuePair <AzureServiceType, AzureServiceAuthorization> auth in _servicesAuthorization)
            {
                if (auth.Key == AzureServiceType.ComputerVision)
                {
                    _computerVisionClient = new ComputerVisionClient(
                        new ApiKeyServiceClientCredentials(auth.Value.SubscriptionKey),
                        new System.Net.Http.DelegatingHandler[] { });
                    _computerVisionClient.Endpoint = auth.Value.Endpoint;

                    _availableServices.Add(AzureServiceType.ComputerVision);
                }
                else if (auth.Key == AzureServiceType.Speech)
                {
                    _speechConfig            = SpeechConfig.FromSubscription(auth.Value.SubscriptionKey, auth.Value.Region);
                    _speechTranslationConfig = SpeechTranslationConfig.FromSubscription(auth.Value.SubscriptionKey, auth.Value.Region);
                    SetProfanityOption(AzureProfanitySetting);
                    _speechTranslationConfig.SpeechSynthesisVoiceName = CurrentSpeakingVoice;

                    _availableServices.Add(AzureServiceType.Speech);
                }
            }
        }
        private void SetupTranscriptionAndTranslationService()
        {
            try
            {
                var lCognitiveKey    = _settings.AzureCognitiveKey;
                var lCognitiveRegion = _settings.AzureCognitiveRegion;

                _eventPublisher.Publish("MySTT Setup", $"Got region: {lCognitiveRegion}, key starting from: {lCognitiveKey??lCognitiveKey.Substring(0, lCognitiveKey.Length /2)}");

                this.mTransSpeechConfig = SpeechTranslationConfig.FromSubscription(lCognitiveKey, lCognitiveRegion);
                var fromLanguage = "en-US";
                var toLanguages  = new List <string> {
                    "el-GR"
                };
                //var toLanguages = new List<string> { "ru-RU" };
                this.mTransSpeechConfig.SpeechRecognitionLanguage = fromLanguage;
                toLanguages.ForEach(this.mTransSpeechConfig.AddTargetLanguage);
                this.mInputStream = AudioInputStream.CreatePushStream(AudioStreamFormat.GetWaveFormatPCM(SAMPLESPERSECOND, BITSPERSAMPLE, NUMBEROFCHANNELS));

                this.mAudioConfig           = AudioConfig.FromStreamInput(this.mInputStream);
                this.mTranslationRecognizer = new TranslationRecognizer(this.mTransSpeechConfig, this.mAudioConfig);

                this.mTranslationRecognizer.Recognizing       += this.MSpeechRecognizer_Recognizing;
                this.mTranslationRecognizer.Recognized        += this.MSpeechRecognizer_Recognized;
                this.mTranslationRecognizer.SpeechEndDetected += this.MSpeechRecognizer_SpeechEndDetected;

                this.StartRecognisionIfNeeded();
            }
            catch (Exception ex)
            {
                _eventPublisher.Publish("MySTT Setup - Failed", $"Failed to initialize: {ex.Message}");
            }
        }
Example #5
0
        public static async Task TranslateSpeechToText()
        {
            // Creates an instance of a speech translation config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechTranslationConfig.FromSubscription("311b76d8841344b6a277dbd8401611dc", "westus");

            // Sets source and target languages.
            // Replace with the languages of your choice, from list found here: https://aka.ms/speech/sttt-languages
            string fromLanguage = "en-US";

            config.SpeechRecognitionLanguage = fromLanguage;
            config.AddTargetLanguage("de");
            config.AddTargetLanguage("fr");

            // Creates a translation recognizer using the default microphone audio input device.
            using (var recognizer = new TranslationRecognizer(config))
            {
                // Starts translation, and returns after a single utterance is recognized. The end of a
                // single utterance is determined by listening for silence at the end or until a maximum of 15
                // seconds of audio is processed. The task returns the recognized text as well as the translation.
                // Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single
                // shot recognition like command or query.
                // For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead.
                Console.WriteLine("Say something...");
                var result = await recognizer.RecognizeOnceAsync();

                // Checks result.
                if (result.Reason == ResultReason.TranslatedSpeech)
                {
                    Console.WriteLine($"RECOGNIZED '{fromLanguage}': {result.Text}");
                    foreach (var element in result.Translations)
                    {
                        Console.WriteLine($"TRANSLATED into '{element.Key}': {element.Value}");
                    }
                }
                else if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    Console.WriteLine($"RECOGNIZED '{fromLanguage}': {result.Text} (text could not be translated)");
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                }
            }
        }
        public void Initialize(string subscriptionKey, string region)
        {
            subscriptionKey.EnsureIsNotNull(nameof(subscriptionKey));
            subscriptionKey.EnsureIsNotNull(nameof(region));

            speechConfiguration = SpeechTranslationConfig.FromSubscription(subscriptionKey, region);
            speechConfiguration.OutputFormat = OutputFormat.Detailed;
            SendMessage($"Created the SpeechConfiguration with {subscriptionKey} | {region}");
        }
Example #7
0
 public TranslationEngine(IConfiguration config, IHubContext <TranslationHub> hub)
 {
     _hub               = hub;
     _config            = config;
     _translationConfig = SpeechTranslationConfig.FromSubscription(_config["SUBSCRIPTION_KEY"], _config["REGION"]);
     _speechConfig      = SpeechTranslationConfig.FromSubscription(_config["SUBSCRIPTION_KEY"], _config["REGION"]);
     _audioInput        = AudioConfig.FromStreamInput(_inputStream);
     _audioOutputStream = AudioOutputStream.CreatePullStream();
     _output            = AudioConfig.FromStreamOutput(_audioOutputStream);
 }
Example #8
0
        static async Task Main(string[] args)
        {
            try
            {
                // Get config settings from AppSettings
                IConfigurationBuilder builder       = new ConfigurationBuilder().AddJsonFile("appsettings.json");
                IConfigurationRoot    configuration = builder.Build();
                string cogSvcKey    = configuration["CognitiveServiceKey"];
                string cogSvcRegion = configuration["CognitiveServiceRegion"];


                // Set a dictionary of supported voices
                var voices = new Dictionary <string, string>
                {
                    ["fr"] = "fr-FR-Julie",
                    ["es"] = "es-ES-Laura",
                    ["hi"] = "hi-IN-Kalpana"
                };

                // Configure translation
                translationConfig = SpeechTranslationConfig.FromSubscription(cogSvcKey, cogSvcRegion);
                translationConfig.SpeechRecognitionLanguage = "en-US";
                Console.WriteLine("Ready to translate from " + translationConfig.SpeechRecognitionLanguage);


                string targetLanguage = "";
                while (targetLanguage != "quit")
                {
                    Console.WriteLine("\nEnter a target language\n fr = French\n es = Spanish\n hi = Hindi\n Enter anything else to stop\n");
                    targetLanguage = Console.ReadLine().ToLower();
                    // Check if the user has requested a language that this app supports
                    if (voices.ContainsKey(targetLanguage))
                    {
                        // Because the synthesised speech event only supports 1:1 translation, we'll remove any languages already in the translationconfig
                        if (translationConfig.TargetLanguages.Count > 1)
                        {
                            foreach (string language in translationConfig.TargetLanguages)
                            {
                                translationConfig.RemoveTargetLanguage(language);
                            }
                        }

                        // and add the requested one in
                        translationConfig.AddTargetLanguage(targetLanguage);
                        translationConfig.VoiceName = voices[targetLanguage];
                        await Translate(targetLanguage);
                    }
                    else
                    {
                        targetLanguage = "quit";
                    }
                }
            }
            catch (Exception ex) { Console.WriteLine(ex.Message); }
        }
Example #9
0
        static void Main()
        {
            var config = SpeechTranslationConfig.FromSubscription(ConfigurationManager.AppSettings.Get("TTSKey"), ConfigurationManager.AppSettings.Get("Region"));

            //RecognizeOnceSpeechAsync(config).Wait();
            //Translate.RecognizeLng().Wait();
            Translate.TranslationContinuousRecognitionAsync(config).Wait();
            //TTS(config).Wait();
            Console.WriteLine("Please press a key to continue.");
            Console.ReadLine();
        }
Example #10
0
        public void Initialize(string subscriptionKey, string region, InputSourceType inputSource, string wavFilename)
        {
            subscriptionKey.EnsureIsNotNull(nameof(subscriptionKey));
            subscriptionKey.EnsureIsNotNull(nameof(region));

            speechConfiguration = SpeechTranslationConfig.FromSubscription(subscriptionKey, region);
            speechConfiguration.OutputFormat = OutputFormat.Detailed;
            SendMessage($"Created the SpeechConfiguration with {subscriptionKey} | {region}");

            audioConfig = GetAudioConfig(inputSource, wavFilename);
        }
Example #11
0
        public async Task <ActionResult> translateAsync()
        {
            string fromLanguage = "en-US";


            const string GermanVoice = "Microsoft Server Speech Text to Speech Voice (de-DE, Hedda)";

            var config = SpeechTranslationConfig.FromSubscription("ae9492aae8044a4c888a45a45e957d83", "westus");

            config.SpeechRecognitionLanguage = fromLanguage;
            config.VoiceName = GermanVoice;


            config.AddTargetLanguage("de");


            using (var recognizer = new TranslationRecognizer(config))
            {
                recognizer.Recognizing += (s, e) =>
                {
                    foreach (var element in e.Result.Translations)
                    {
                        ViewBag.message = element.Value;
                    }
                };

                recognizer.Recognized += (s, e) =>
                {
                    if (e.Result.Reason == ResultReason.TranslatedSpeech)
                    {
                        foreach (var element in e.Result.Translations)
                        {
                            ViewBag.message = element.Value;
                        }
                    }
                    else if (e.Result.Reason == ResultReason.RecognizedSpeech)
                    {
                        foreach (var element in e.Result.Translations)
                        {
                            ViewBag.message = element.Value;
                        }
                    }
                    else if (e.Result.Reason == ResultReason.NoMatch)
                    {
                    }
                };


                await recognizer.RecognizeOnceAsync();

                return(View("Index"));
            }
        }
        public static async Task Translate(string subscriptionKey, string region, string inputFilename, string fromLanguage, IEnumerable <string> targetLanguages, Voice voice, string outputFilename)
        {
            if (!outputFilename.EndsWith(".wav") && !outputFilename.EndsWith(".mp3"))
            {
                throw new ArgumentOutOfRangeException(paramName: nameof(outputFilename), message: "Output filename must have '.wav' or '.mp3' extension");
            }

            var config   = SpeechTranslationConfig.FromSubscription(subscriptionKey, region);
            var wavBytes = await GetWAVFromFile(inputFilename);

            await config.TranslationWithFileAsync(wavBytes, fromLanguage, targetLanguages, voice, outputFilename);
        }
        /// <summary>
        /// Initializes the config object with subscription key and region
        /// Initializes the recognizer object with a TranslationRecognizer
        /// Subscribes the recognizer to recognition Event Handlers
        /// If recognition is running, starts a thread which stops the recognition
        /// </summary>
        private void CreateRecognizer()
        {
            this.config = SpeechTranslationConfig.FromSubscription(SubscriptionKey, Region);
            this.config.SpeechRecognitionLanguage = FromLanguage;
            this.config.VoiceName = voice;
            ToLanguages.ForEach(l => this.config.AddTargetLanguage(l));

            this.recognizer = new TranslationRecognizer(this.config);

            this.recognizer.Recognizing  += this.OnRecognizingEventHandler;
            this.recognizer.Recognized   += this.OnRecognizedEventHandler;
            this.recognizer.Synthesizing += this.OnSynthesizingEventHandler;
            this.recognizer.Canceled     += this.OnCanceledEventHandler;
        }
Example #14
0
    void Start()
    {
        if (outputText == null)
        {
            UnityEngine.Debug.LogError("outputText property is null! Assign a UI Text element to it.");
        }
        else if (recoButton == null)
        {
            _message = "recoButton property is null! Assign a UI Button to it.";
            UnityEngine.Debug.LogError(_message);
        }
        else
        {
            // Continue with normal initialization, Text and Button objects are present.
#if PLATFORM_ANDROID
            // Request to use the microphone, cf.
            // https://docs.unity3d.com/Manual/android-RequestingPermissions.html
            message = "Waiting for mic permission";
            if (!Permission.HasUserAuthorizedPermission(Permission.Microphone))
            {
                Permission.RequestUserPermission(Permission.Microphone);
            }
#elif PLATFORM_IOS
            if (!Application.HasUserAuthorization(UserAuthorization.Microphone))
            {
                Application.RequestUserAuthorization(UserAuthorization.Microphone);
            }
#else
            _micPermissionGranted = true;
            _message = "Click button to recognize speech";
#endif
            _config = SpeechTranslationConfig.FromSubscription(SubscriptionKey, SubscriptionRegion);
            _config.SpeechRecognitionLanguage = "es-US";
            _config.AddTargetLanguage("en-US");
            _pushStream              = AudioInputStream.CreatePushStream();
            _audioInput              = AudioConfig.FromStreamInput(_pushStream);
            _recognizer              = new TranslationRecognizer(_config, _audioInput);
            _recognizer.Recognizing += RecognizingHandler;
            _recognizer.Recognized  += RecognizedHandler;
            _recognizer.Canceled    += CanceledHandler;

            foreach (var device in Microphone.devices)
            {
                Debug.Log("DeviceName: " + device);
            }
            _audioSource = GameObject.Find("AudioSource").GetComponent <AudioSource>();
        }
    }
Example #15
0
    private void CreateRecognizer()
    {
        //Microsoft.CognitiveServices.Speech.Internal.carbon_csharp a = new car();

        this.config = SpeechTranslationConfig.FromSubscription(SubscriptionKey, Region);
        this.config.SpeechRecognitionLanguage = FromLanguage;
        this.config.VoiceName = voice;
        ToLanguages.ForEach(l => this.config.AddTargetLanguage(l));

        this.recognizer = new TranslationRecognizer(this.config);

        this.recognizer.Recognizing  += this.OnRecognizingEventHandler;
        this.recognizer.Recognized   += this.OnRecognizedEventHandler;
        this.recognizer.Synthesizing += this.OnSynthesizingEventHandler;
        this.recognizer.Canceled     += this.OnCanceledEventHandler;
    }
Example #16
0
        public async Task <(ResultReason, string)> ListenAsync()
        {
            var sourceLanguageConfigs = new SourceLanguageConfig[]
            {
                SourceLanguageConfig.FromLanguage("en-US"),
                SourceLanguageConfig.FromLanguage("it-IT")
            };
            var config = SpeechTranslationConfig.FromSubscription(Config.Key, Config.Region);
            var autoDetectSourceLanguageConfig = AutoDetectSourceLanguageConfig.FromSourceLanguageConfigs(sourceLanguageConfigs);

            using var recognizer = new SpeechRecognizer(config, autoDetectSourceLanguageConfig);
            var result = await recognizer.RecognizeOnceAsync();

            return(result.Reason switch
            {
                ResultReason.RecognizedSpeech => (ResultReason.RecognizedSpeech, result.Text),
                _ => (ResultReason.NoMatch, null)
            });
Example #17
0
        private void Init(string from, string to)
        {
            this.toLanguage = to;

            Profile       = MediaEncodingProfile.CreateWav(AudioEncodingQuality.Low);
            Profile.Audio = AudioEncodingProperties.CreatePcm(16000, 1, 16);

            byte channels         = 1;
            byte bitsPerSample    = 16;
            uint samplesPerSecond = 16000; // or 8000
            var  audioFormat      = AudioStreamFormat.GetWaveFormatPCM(samplesPerSecond, bitsPerSample, channels);

            // Init Push Stream

            pushStream = AudioInputStream.CreatePushStream(audioFormat);

            if (from == to)
            {
                var config = SpeechConfig.FromSubscription(apiKey, region);
                config.SpeechRecognitionLanguage = from;

                speechRecognizer = new SpeechRecognizer(config, AudioConfig.FromStreamInput(pushStream));

                speechRecognizer.Recognizing += RecognisingSpeechHandler;
                speechRecognizer.Recognized  += RecognisingSpeechHandler;

                speechRecognizer.SessionStarted += (sender, args) => this.RecognisionStarted?.Invoke();
                speechRecognizer.SessionStopped += (sender, args) => this.RecognisionStopped?.Invoke();
            }
            else
            {
                var config = SpeechTranslationConfig.FromSubscription(apiKey, region);
                config.SpeechRecognitionLanguage = from;
                config.AddTargetLanguage(to);

                translationRecognizer = new TranslationRecognizer(config, AudioConfig.FromStreamInput(pushStream));

                translationRecognizer.SessionStarted += (sender, args) => this.RecognisionStarted?.Invoke();
                translationRecognizer.SessionStopped += (sender, args) => this.RecognisionStopped?.Invoke();

                translationRecognizer.Recognizing += RecognisingTranslationHandler;
                translationRecognizer.Recognized  += RecognisingTranslationHandler;
            }
        }
Example #18
0
    public async void ButtonClick()
    {
        Debug.Log("Onclick fired");
        var translationConfig = SpeechTranslationConfig.FromSubscription(SpeechServiceSubscriptionKey, SpeechServiceRegion);

        translationConfig.SpeechRecognitionLanguage = "en-US";
        translationConfig.AddTargetLanguage("fr");

        using (var recognizer = new TranslationRecognizer(translationConfig))
        {
            Debug.Log("Creating recognizer");
            lock (threadLocker)
            {
                waitingforReco = true;
            }

            var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);

            if (result.Reason == ResultReason.TranslatedSpeech)
            {
                recognizedString = result.Text;
                Debug.Log("Text: " + recognizedString);
                foreach (var element in result.Translations)
                {
                    translatedString = element.Value;
                }
            }
            else if (result.Reason == ResultReason.NoMatch)
            {
                recognizedString = "NOMATCH: Speech could not be recognized.";
            }
            else if (result.Reason == ResultReason.Canceled)
            {
                var cancellation = CancellationDetails.FromResult(result);
                recognizedString = $"CANCELED: Reason={cancellation.Reason} ErrorDetails={cancellation.ErrorDetails}";
            }

            lock (threadLocker)
            {
                waitingforReco = false;
            }
        }
    }
Example #19
0
    private async void StartRecognition()
    {
        if (isRecognitionStarted)
        {
            return;
        }
        Debug.Log("start recognition");

        string fromLang;
        string toLang;

        if (modeList.value == 0)
        {
            fromLang = "ja-JP";
            toLang   = "en";
        }
        else
        {
            fromLang = "en-US";
            toLang   = "ja";
        }
        Debug.Log("mode : " + fromLang + " -> " + toLang);

        var config = SpeechTranslationConfig.FromSubscription(apiKeyInputField.text, apiRegionInputField.text);

        config.SpeechRecognitionLanguage = fromLang;
        config.AddTargetLanguage(toLang);

        recognizer                      = new TranslationRecognizer(config);
        recognizer.Canceled            += CanceledHandler;
        recognizer.SessionStarted      += SessionStartedHandler;
        recognizer.SessionStopped      += SessionStoppedHandler;
        recognizer.SpeechStartDetected += SpeechStartDetectedHandler;
        recognizer.SpeechEndDetected   += SpeechEndDetectedHandler;
        recognizer.Recognizing         += RecognizingHandler;
        recognizer.Recognized          += RecognizedHandler;

        await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

        isRecognitionStarted      = true;
        isRecognitionStateChanged = true;
    }
Example #20
0
    void CreateTranslationRecognizer()
    {
        if (translator == null)
        {
            SpeechTranslationConfig config = SpeechTranslationConfig.FromSubscription(lunarcomController.SpeechServiceAPIKey, lunarcomController.SpeechServiceRegion);
            config.SpeechRecognitionLanguage = fromLanguage;
            config.AddTargetLanguage(toLanguage);

            translator = new TranslationRecognizer(config);

            if (translator != null)
            {
                translator.Recognizing    += HandleTranslatorRecognizing;
                translator.Recognized     += HandleTranslatorRecognized;
                translator.Canceled       += HandleTranslatorCanceled;
                translator.SessionStarted += HandleTranslatorSessionStarted;
                translator.SessionStopped += HandleTranslatorSessionStopped;
            }
        }
    }
Example #21
0
        private SpeechTranslationConfig createSpeechTranslationConfig(String logId, Key key, string sourceLanguage, List <string> languages)
        {
            SpeechTranslationConfig speechConfig = SpeechTranslationConfig.FromSubscription(key.ApiKey, key.Region);

            speechConfig.RequestWordLevelTimestamps();
            if (!IsSupportedRecognition(sourceLanguage))
            {
                _logger.LogError($"{logId}: !!!! Unknown recognition language ({sourceLanguage})! Recogition may fail ...");
            }
            speechConfig.SpeechRecognitionLanguage = sourceLanguage;

            _logger.LogInformation($"{logId}: Requested output languages: { String.Join(",", languages) }, source = ({sourceLanguage})");
            String shortCodeSource = sourceLanguage.Split('-')[0].ToLower();

            foreach (var language in languages)
            {
                String shortCodeTarget = language.Split('-')[0].ToLower();
                if (shortCodeSource == shortCodeTarget)
                {
                    continue;
                }
                if (IsSupportedTranslation(language))
                {
                    _logger.LogInformation($"{logId}: Adding target {language}");
                    speechConfig.AddTargetLanguage(language);
                }
                else
                {
                    _logger.LogWarning($"{logId}: Skipping unsupported target {language}");
                }
            }



            speechConfig.OutputFormat = OutputFormat.Detailed;
            return(speechConfig);
        }
    /// <summary>
    /// Creates a class-level Translation Recognizer for a specific language using Azure credentials
    /// and hooks-up lifecycle & recognition events. Translation can be enabled with one or more target
    /// languages translated simultaneously
    /// </summary>
    void CreateTranslationRecognizer()
    {
        Debug.Log("Creating Translation Recognizer.");
        recognizedString = "Initializing speech recognition with translation, please wait...";

        if (translator == null)
        {
            SpeechTranslationConfig config = SpeechTranslationConfig.FromSubscription(SpeechServiceAPIKey, SpeechServiceRegion);
            config.SpeechRecognitionLanguage = fromLanguage;
            if (Languages1.captionText.text.Length > 0)
            {
                config.AddTargetLanguage(ExtractLanguageCode(Languages1.captionText.text));
            }
            if (Languages2.captionText.text.Length > 0)
            {
                config.AddTargetLanguage(ExtractLanguageCode(Languages2.captionText.text));
            }
            if (Languages3.captionText.text.Length > 0)
            {
                config.AddTargetLanguage(ExtractLanguageCode(Languages3.captionText.text));
            }
            translator = new TranslationRecognizer(config);

            if (translator != null)
            {
                translator.Recognizing         += RecognizingTranslationHandler;
                translator.Recognized          += RecognizedTranslationHandler;
                translator.SpeechStartDetected += SpeechStartDetectedHandler;
                translator.SpeechEndDetected   += SpeechEndDetectedHandler;
                translator.Canceled            += CanceledTranslationHandler;
                translator.SessionStarted      += SessionStartedHandler;
                translator.SessionStopped      += SessionStoppedHandler;
            }
        }
        Debug.Log("CreateTranslationRecognizer exit");
    }
Example #23
0
        static async Task CreateConversationAsync()
        {
            // Replace with your own subscription key and service region (e.g., "westus").
            string subscriptionKey = "YourSubscriptionKey";
            string region          = "YourServiceRegion";

            // Change this to match the language you are speaking. You can find the full list of supported
            // speech language codes here: https://docs.microsoft.com/azure/cognitive-services/speech-service/language-support
            string fromLanguage = "en-US";

            // Change this to the language you would like to translate the transcriptions to. You can find
            // the full list of supported translation languages here: https://aka.ms/speech/sttt-languages
            string toLanguage = "de";

            // Set this to the display name you want for the conversation host
            string displayName = "The host";

            // Creates an instance of a speech config with specified subscription key and service region.
            var config = SpeechTranslationConfig.FromSubscription(subscriptionKey, region);

            config.SpeechRecognitionLanguage = fromLanguage;
            config.AddTargetLanguage(toLanguage);

            // Create the conversation object you'll need to manage the conversation
            using (var conversation = await Conversation.CreateConversationAsync(config).ConfigureAwait(false))
            {
                // Start the conversation so you and others can join
                await conversation.StartConversationAsync().ConfigureAwait(false);

                // Get the conversation ID. It will be up to your scenario to determine how this is shared
                // with other participants.
                string conversationId = conversation.ConversationId;
                Console.WriteLine($"CONVERSATION: Created a new conversation with ID '{conversationId}'");

                // At this point, you can use the conversation object to manage the conversation. For example,
                // to mute everyone else in the room you can call this method:
                await conversation.MuteAllParticipantsAsync().ConfigureAwait(false);

                // Configure which audio source you want to use. In this case we will use your default microphone
                var audioConfig = AudioConfig.FromDefaultMicrophoneInput();

                // Create the conversation translator you'll need to send audio, send IMs, and receive conversation events
                using (var conversationTranslator = new ConversationTranslator(audioConfig))
                {
                    // You should connect all the event handlers you need at this point
                    conversationTranslator.SessionStarted += (s, e) =>
                    {
                        Console.WriteLine($"SESSION STARTED: {e.SessionId}");
                    };
                    conversationTranslator.SessionStopped += (s, e) =>
                    {
                        Console.WriteLine($"SESSION STOPPED: {e.SessionId}");
                    };
                    conversationTranslator.Canceled += (s, e) =>
                    {
                        Console.WriteLine($"CANCELED: Reason={e.Reason}");
                        switch (e.Reason)
                        {
                        case CancellationReason.EndOfStream:
                            Console.WriteLine($"CANCELED: End of audio reached");
                            break;

                        case CancellationReason.Error:
                            Console.WriteLine($"CANCELED: ErrorCode= {e.ErrorCode}");
                            Console.WriteLine($"CANCELED: ErrorDetails= {e.ErrorDetails}");
                            break;
                        }
                    };
                    conversationTranslator.ConversationExpiration += (s, e) =>
                    {
                        Console.WriteLine($"CONVERSATION: Will expire in {e.ExpirationTime.TotalMinutes} minutes");
                    };
                    conversationTranslator.ParticipantsChanged += (s, e) =>
                    {
                        Console.Write("PARTICIPANTS: The following participant(s) have ");
                        switch (e.Reason)
                        {
                        case ParticipantChangedReason.JoinedConversation:
                            Console.Write("joined");
                            break;

                        case ParticipantChangedReason.LeftConversation:
                            Console.Write("left");
                            break;

                        case ParticipantChangedReason.Updated:
                            Console.Write("been updated");
                            break;
                        }

                        Console.WriteLine(":");

                        foreach (var participant in e.Participants)
                        {
                            Console.WriteLine($"\tPARTICIPANT: {participant.DisplayName}");
                        }
                    };
                    conversationTranslator.TextMessageReceived += (s, e) =>
                    {
                        Console.WriteLine($"TEXT MESSAGE: From '{e.Result.ParticipantId}': '{e.Result.Text}'");
                        foreach (var entry in e.Result.Translations)
                        {
                            Console.WriteLine($"\tTRANSLATED: '{entry.Key}': '{entry.Value}'");
                        }
                    };
                    conversationTranslator.Transcribed += (s, e) =>
                    {
                        Console.WriteLine($"TRANSCRIBED: From '{e.Result.ParticipantId}': '{e.Result.Text}'");
                        foreach (var entry in e.Result.Translations)
                        {
                            Console.WriteLine($"\tTRANSLATED: '{entry.Key}': '{entry.Value}'");
                        }
                    };
                    conversationTranslator.Transcribing += (s, e) =>
                    {
                        Console.WriteLine($"TRANSCRIBING: From '{e.Result.ParticipantId}': '{e.Result.Text}'");
                        foreach (var entry in e.Result.Translations)
                        {
                            Console.WriteLine($"\tTRANSLATED: '{entry.Key}': '{entry.Value}'");
                        }
                    };

                    // Join the conversation so you can start receiving events
                    await conversationTranslator.JoinConversationAsync(conversation, displayName).ConfigureAwait(false);

                    // You can now send an instant message to all other participants in the room
                    await conversationTranslator.SendTextMessageAsync("The instant message to send").ConfigureAwait(false);

                    // Start sending audio
                    await conversationTranslator.StartTranscribingAsync().ConfigureAwait(false);

                    // At this point, you should start receiving transcriptions for what you are saying using
                    // the default microphone. Press enter to stop audio capture
                    Console.WriteLine("Started transcribing. Press enter to stop");
                    while (Console.ReadKey(true).Key != ConsoleKey.Enter)
                    {
                    }

                    // Stop audio capture
                    await conversationTranslator.StopTranscribingAsync().ConfigureAwait(false);

                    // Leave the conversation. After this you will no longer receive events
                    await conversationTranslator.LeaveConversationAsync().ConfigureAwait(false);
                }

                // End the conversation
                await conversation.EndConversationAsync().ConfigureAwait(false);

                // Delete the conversation. Any other participants that are still in the conversation will be removed
                await conversation.DeleteConversationAsync().ConfigureAwait(false);
            }
        }
Example #24
0
                    // Translation from microphone.
                    public static async Task TranslationWithMicrophoneAsync()
                    {
                        // <TranslationWithMicrophoneAsync>
                        // Translation source language.
                        // Replace with a language of your choice.
                        string fromLanguage = "en-US";

                        // Voice name of synthesis output.
                        const string GermanVoice = "Microsoft Server Speech Text to Speech Voice (de-DE, Hedda)";

                        // Creates an instance of a speech translation config with specified subscription key and service region.
                        // Replace with your own subscription key and service region (e.g., "westus").
                        var config = SpeechTranslationConfig.FromSubscription("", "westus");

                        config.SpeechRecognitionLanguage = fromLanguage;
                        config.VoiceName = GermanVoice;

                        // Translation target language(s).
                        // Replace with language(s) of your choice.
                        config.AddTargetLanguage("de");

                        // Creates a translation recognizer using microphone as audio input.
                        using (var recognizer = new TranslationRecognizer(config))
                        {
                            // Subscribes to events.
                            recognizer.Recognizing += (s, e) =>
                            {
                                //Console.WriteLine($"RECOGNIZING in '{fromLanguage}': Text={e.Result.Text}");
                                //foreach (var element in e.Result.Translations)
                                //{
                                //    Console.WriteLine($"    TRANSLATING into '{element.Key}': {element.Value}");
                                //}
                            };

                            recognizer.Recognized += (s, e) =>
                            {
                                if (e.Result.Reason == ResultReason.TranslatedSpeech)
                                {
                                    Console.WriteLine($"\nRECOGNIZED in '{fromLanguage}': Text={e.Result.Text}");
                                    foreach (var element in e.Result.Translations)
                                    {
                                        Console.WriteLine($"TRANSLATED into '{element.Key}': {element.Value}");
                                    }
                                }
                                else if (e.Result.Reason == ResultReason.RecognizedSpeech)
                                {
                                    //Console.WriteLine($"\nText={e.Result.Text}");
                                    //Console.WriteLine($"    Speech not translated.");
                                }
                                else if (e.Result.Reason == ResultReason.NoMatch)
                                {
                                    //Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                                }
                            };

                            recognizer.Synthesizing += (s, e) =>
                            {
                                //var audio = e.Result.GetAudio();
                                //Console.WriteLine(audio.Length != 0
                                //    ? $"AudioSize: {audio.Length}"
                                //    : $"AudioSize: {audio.Length} (end of synthesis data)");

                                //if (audio.Length > 0)
                                //{
                                //    #if NET461
                                //                            using (var m = new MemoryStream(audio))
                                //                            {
                                //                                SoundPlayer simpleSound = new SoundPlayer(m);
                                //                                simpleSound.PlaySync();
                                //                            }
                                //    #endif
                                //}
                            };

                            recognizer.Canceled += (s, e) =>
                            {
                                Console.WriteLine($"CANCELED: Reason={e.Reason}");

                                if (e.Reason == CancellationReason.Error)
                                {
                                    Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}");
                                    Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
                                    Console.WriteLine($"CANCELED: Did you update the subscription info?");
                                }
                            };

                            recognizer.SessionStarted += (s, e) =>
                            {
                                Console.WriteLine("\nSession started event.");
                            };

                            recognizer.SessionStopped += (s, e) =>
                            {
                                Console.WriteLine("\nSession stopped event.");
                            };

                            // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                            Console.WriteLine("Say something...");
                            await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                            do
                            {
                                Console.WriteLine("Press Enter to stop");
                            } while (Console.ReadKey().Key != ConsoleKey.Enter);

                            // Stops continuous recognition.
                            await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
                        }
                        // </TranslationWithMicrophoneAsync>
                    }
Example #25
0
        public async Task <TranscriptUtterance> SpeechToTranslatedTextAsync(string audioUrl, string sourceLanguage, string targetLanguage)
        {
            Transcripts.Clear();

            TranscriptUtterance utterance = null;

            var config = SpeechTranslationConfig.FromSubscription(_subscriptionKey, _region);

            config.SpeechRecognitionLanguage = sourceLanguage;

            config.AddTargetLanguage(targetLanguage);

            var stopTranslation = new TaskCompletionSource <int>();

            using (var audioInput = await AudioUtils.DownloadWavFileAsync(audioUrl))
            {
                using (var recognizer = new TranslationRecognizer(config, audioInput))
                {
                    // Subscribes to events.
                    recognizer.Recognized += (s, e) => {
                        if (e.Result.Reason == ResultReason.TranslatedSpeech)
                        {
                            utterance = new TranscriptUtterance
                            {
                                Recognition = e.Result.Text,
                                Translation = e.Result.Translations.FirstOrDefault().Value,
                            };
                        }
                        else if (e.Result.Reason == ResultReason.NoMatch)
                        {
                            Trace.TraceError($"NOMATCH: Speech could not be translated.");
                        }
                    };

                    recognizer.Canceled += (s, e) =>
                    {
                        if (e.Reason == CancellationReason.Error)
                        {
                            Trace.TraceError($"Failed to decode incoming text message: {e.ErrorDetails}");
                        }

                        stopTranslation.TrySetResult(0);
                    };

                    recognizer.SessionStopped += (s, e) => {
                        Trace.TraceInformation("Session stopped event.");
                        stopTranslation.TrySetResult(0);
                    };

                    await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                    // Waits for completion.
                    // Use Task.WaitAny to keep the task rooted.
                    Task.WaitAny(new[] { stopTranslation.Task });

                    // Stops translation.
                    await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);

                    return(utterance);
                }
            }
        }
        public async Task <string> TranslationWithAudioStreamAsync(Stream audioStream, string fromLanguage = "en-US", string targetLanguage = "en-US")
        {
            // Creates an instance of a speech translation config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechTranslationConfig.FromSubscription(this.subscriptionKey, this.region);

            config.SpeechRecognitionLanguage = fromLanguage;

            // Translation target language(s).
            // Replace with language(s) of your choice.
            config.AddTargetLanguage(targetLanguage);

            var stopTranslation = new TaskCompletionSource <int>();

            string translateResult = null;

            // Create an audio stream from a wav file.
            // Replace with your own audio file name.
            using (var audioInput = OpenWavFile(audioStream))
            {
                // Creates a translation recognizer using audio stream as input.
                using (var recognizer = new TranslationRecognizer(config, audioInput))
                {
                    // Subscribes to events.
                    recognizer.Recognizing += (s, e) =>
                    {
                        log.LogInformation($"RECOGNIZING in '{fromLanguage}': Text = {e.Result.Text}");
                        foreach (var element in e.Result.Translations)
                        {
                            log.LogInformation($"    TRANSLATING into '{element.Key}': {element.Value}");
                        }
                    };

                    recognizer.Recognized += (s, e) =>
                    {
                        if (e.Result.Reason == ResultReason.TranslatedSpeech)
                        {
                            log.LogInformation($"RECOGNIZED in '{fromLanguage}': Text={e.Result.Text}");
                            foreach (var element in e.Result.Translations)
                            {
                                log.LogInformation($"    TRANSLATED into '{element.Key}': {element.Value}");
                                translateResult = element.Value;
                            }
                        }
                        else if (e.Result.Reason == ResultReason.RecognizedSpeech)
                        {
                            log.LogInformation($"RECOGNIZED: Text={e.Result.Text}");
                            log.LogInformation($"    Speech not translated.");
                        }
                        else if (e.Result.Reason == ResultReason.NoMatch)
                        {
                            log.LogInformation($"NOMATCH: Speech could not be recognized.");
                        }
                    };

                    recognizer.Canceled += (s, e) =>
                    {
                        log.LogInformation($"CANCELED: Reason={e.Reason}");

                        if (e.Reason == CancellationReason.Error)
                        {
                            log.LogInformation($"CANCELED: ErrorCode={e.ErrorCode}");
                            log.LogInformation($"CANCELED: ErrorDetails={e.ErrorDetails}");
                            log.LogInformation($"CANCELED: Did you update the subscription info?");
                        }

                        stopTranslation.TrySetResult(0);
                    };

                    recognizer.SpeechStartDetected += (s, e) =>
                    {
                        log.LogInformation("\nSpeech start detected event.");
                    };

                    recognizer.SpeechEndDetected += (s, e) =>
                    {
                        log.LogInformation("\nSpeech end detected event.");
                    };

                    recognizer.SessionStarted += (s, e) =>
                    {
                        log.LogInformation("\nSession started event.");
                    };

                    recognizer.SessionStopped += (s, e) =>
                    {
                        log.LogInformation($"\nSession stopped event.");
                        log.LogInformation($"\nStop translation.");
                        stopTranslation.TrySetResult(0);
                    };

                    // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                    log.LogInformation("Start translation...");
                    await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                    // Waits for completion.
                    // Use Task.WaitAny to keep the task rooted.
                    Task.WaitAny(new[] { stopTranslation.Task });

                    // Stops translation.
                    await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);

                    return(translateResult);
                }
            }
        }
Example #27
0
        private async Task StartSpeechTranslation()
        {
            try
            {
                if (isTranslationListening || string.IsNullOrEmpty(settings.SpeechKey))
                {
                    return;
                }

                isTranslationListening = true;
                // Creates an instance of a speech factory with specified subscription key and service region.
                // Replace with your own subscription key and service region (e.g., "westus").
                var config = SpeechTranslationConfig.FromSubscription(settings.SpeechKey, settings.SpeechRegion);
                config.SpeechRecognitionLanguage = "en-US";

                translationStopRecognition = new TaskCompletionSource <int>();

                Random rand     = new Random();
                string language = textLanguges.ElementAt(rand.Next(textLanguges.Keys.Count())).Key;

                config.AddTargetLanguage(language);
                using (var recognizer = new TranslationRecognizer(config))
                {
                    // Subscribes to events.
                    recognizer.Recognizing += (s, e) =>
                    {
                        try
                        {
                            Debug.WriteLine($"Message received {e.Result.Text}");
                            string languageLong = textLanguges[e.Result.Translations.First().Key];
                            UpdateTranslationUI($"English: {e.Result.Text}", $"{languageLong}: {e.Result.Translations.First().Value}");
                        }
                        catch (Exception)
                        {
                            // let it go
                        }
                    };

                    recognizer.Recognized += (s, e) =>
                    {
                        var result = e.Result;
                    };

                    recognizer.Canceled += (s, e) =>
                    {
                        //NotifyUser($"An error occurred. Please step in front of camera to reactivate.");
                        isTranslationListening = false;
                        translationStopRecognition.TrySetResult(0);
                    };

                    recognizer.SessionStopped += (s, e) =>
                    {
                        //NotifyUser($"\n    Session event. Event: {e.EventType.ToString()}.");
                        // Stops recognition when session stop is detected.

                        //NotifyUser($"\nStop recognition.");
                        isTranslationListening = false;
                        translationStopRecognition.TrySetResult(0);
                    };

                    // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                    await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                    UpdateTranslationUI($"Warming Up Translation", "");
                    await Task.Delay(3500);

                    UpdateTranslationUI($"Say Hi!", "");

                    // Waits for completion.
                    // Use Task.WaitAny to keep the task rooted.
                    Task.WaitAny(new[] { translationStopRecognition.Task });
                    //NotifyUser($"Stopped listenint");

                    isTranslationListening = false;

                    // Stops recognition.
                    await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
                }
            }
            catch (Exception ex)
            {
                // Exception caught let it go!
            }
        }
Example #28
0
        private async void SpeechTranslationFromMicrophone_ButtonClicked(object sender, RoutedEventArgs e)
        {
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechTranslationConfig.FromSubscription("YourSubscriptionKey", "YourSubcriptionRegion");

            // Sets source and target languages.
            string fromLanguage = "en-US";

            config.SpeechRecognitionLanguage = fromLanguage;
            config.AddTargetLanguage("de");

            try
            {
                // Creates a speech recognizer using microphone as audio input.
                using (var recognizer = new TranslationRecognizer(config))
                {
                    // The TaskCompletionSource to stop recognition.
                    var stopRecognition = new TaskCompletionSource <int>();

                    // Subscribes to events.
                    recognizer.Recognizing += (s, ee) =>
                    {
                        NotifyUser($"RECOGNIZING in '{fromLanguage}': Text={ee.Result.Text}", NotifyType.StatusMessage);
                        foreach (var element in ee.Result.Translations)
                        {
                            NotifyUser($"    TRANSLATING into '{element.Key}': {element.Value}", NotifyType.StatusMessage);
                        }
                    };

                    recognizer.Recognized += (s, ee) =>
                    {
                        if (ee.Result.Reason == ResultReason.TranslatedSpeech)
                        {
                            NotifyUser($"\nFinal result: Reason: {ee.Result.Reason.ToString()}, recognized text in {fromLanguage}: {ee.Result.Text}.", NotifyType.StatusMessage);
                            foreach (var element in ee.Result.Translations)
                            {
                                NotifyUser($"    TRANSLATING into '{element.Key}': {element.Value}", NotifyType.StatusMessage);
                            }
                        }
                    };

                    recognizer.Canceled += (s, ee) =>
                    {
                        NotifyUser($"\nRecognition canceled. Reason: {ee.Reason}; ErrorDetails: {ee.ErrorDetails}", NotifyType.StatusMessage);
                    };

                    recognizer.SessionStarted += (s, ee) =>
                    {
                        NotifyUser("\nSession started event.", NotifyType.StatusMessage);
                    };

                    recognizer.SessionStopped += (s, ee) =>
                    {
                        NotifyUser("\nSession stopped event.", NotifyType.StatusMessage);
                        stopRecognition.TrySetResult(0);
                    };

                    // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                    await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                    // Waits for completion.
                    // Use Task.WaitAny to keep the task rooted.
                    Task.WaitAny(new[] { stopRecognition.Task });

                    // Stops continuous recognition.
                    await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
                }
            }
            catch (Exception ex)
            {
                NotifyUser($"{ex.ToString()}", NotifyType.ErrorMessage);
            }
        }
Example #29
0
        public async Task SpeechTranslationWithMicrophoneAsync()
        {
            // Translation source language.
            var    selectedRecordingLanguage = RecordingLanguagesPicker.SelectedItem as RecordingLanguage;
            string fromLanguage = selectedRecordingLanguage.Locale; //"ja-JP";

            // Creates an instance of a speech translation config with specified subscription key and service region.
            string speechSubscriptionKey    = AppSettingsManager.Settings["SpeechSubscriptionKey"];
            string speechSubscriptionRegion = AppSettingsManager.Settings["SpeechSubscriptionRegion"];
            var    config = SpeechTranslationConfig.FromSubscription(speechSubscriptionKey, speechSubscriptionRegion);

            config.SpeechRecognitionLanguage = fromLanguage;

            // Translation target language(s).
            config.AddTargetLanguage("en-US");

            // Creates a translation recognizer using microphone as audio input.
            using (var recognizer = new TranslationRecognizer(config))
            {
                //Subscribes to events.
                recognizer.Recognizing += (s, e) =>
                {
                    Console.WriteLine($"RECOGNIZING in '{fromLanguage}': Text={e.Result.Text}");
                    foreach (var element in e.Result.Translations)
                    {
                        Console.WriteLine($"    TRANSLATING into '{element.Key}': {element.Value}");
                        UpdateRecognizingText(element.Value);
                    }
                };

                recognizer.Recognized += (s, e) =>
                {
                    if (e.Result.Reason == ResultReason.TranslatedSpeech)
                    {
                        //Console.WriteLine($"RECOGNIZED in '{fromLanguage}': Text={e.Result.Text}");
                        foreach (var element in e.Result.Translations)
                        {
                            Console.WriteLine($"    TRANSLATED into '{element.Key}': {element.Value}");
                            UpdateRecognizedText(element.Value);
                        }
                    }

                    /*
                     * //Triggered when text recongized but not able to translate
                     * else if (e.Result.Reason == ResultReason.RecognizedSpeech)
                     * {
                     *  Console.WriteLine($"RECOGNIZED: Text={e.Result.Text}");
                     *  Console.WriteLine($"    Speech not translated.");
                     * }
                     * else if (e.Result.Reason == ResultReason.NoMatch)
                     * {
                     *  Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                     * }
                     */
                };

                recognizer.Canceled += (s, e) =>
                {
                    Console.WriteLine($"CANCELED: Reason={e.Reason}");

                    if (e.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                };

                recognizer.SessionStarted += (s, e) =>
                {
                    Console.WriteLine("\nSession started event.");
                };

                recognizer.SessionStopped += (s, e) =>
                {
                    Console.WriteLine("\nSession stopped event.");
                };

                // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                // Recognize as long as 'stopped' is clicked
                Recording = true;
                do
                {
                    //loop until stop button is pressed
                } while (Recording != false);

                // Stops continuous recognition.
                await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
            }
        }
        // Translation using file input.
        public static async Task TranslationWithFileAsync()
        {
            // <TranslationWithFileAsync>
            // Translation source language.
            // Replace with a language of your choice.
            string fromLanguage = "en-US";

            // Creates an instance of a speech translation config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechTranslationConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");

            config.SpeechRecognitionLanguage = fromLanguage;

            // Translation target language(s).
            // Replace with language(s) of your choice.
            config.AddTargetLanguage("de");
            config.AddTargetLanguage("fr");

            var stopTranslation = new TaskCompletionSource <int>();

            // Creates a translation recognizer using file as audio input.
            // Replace with your own audio file name.
            using (var audioInput = AudioConfig.FromWavFileInput(@"whatstheweatherlike.wav"))
            {
                using (var recognizer = new TranslationRecognizer(config, audioInput))
                {
                    // Subscribes to events.
                    recognizer.Recognizing += (s, e) =>
                    {
                        Console.WriteLine($"RECOGNIZING in '{fromLanguage}': Text={e.Result.Text}");
                        foreach (var element in e.Result.Translations)
                        {
                            Console.WriteLine($"    TRANSLATING into '{element.Key}': {element.Value}");
                        }
                    };

                    recognizer.Recognized += (s, e) => {
                        if (e.Result.Reason == ResultReason.TranslatedSpeech)
                        {
                            Console.WriteLine($"RECOGNIZED in '{fromLanguage}': Text={e.Result.Text}");
                            foreach (var element in e.Result.Translations)
                            {
                                Console.WriteLine($"    TRANSLATED into '{element.Key}': {element.Value}");
                            }
                        }
                        else if (e.Result.Reason == ResultReason.RecognizedSpeech)
                        {
                            Console.WriteLine($"RECOGNIZED: Text={e.Result.Text}");
                            Console.WriteLine($"    Speech not translated.");
                        }
                        else if (e.Result.Reason == ResultReason.NoMatch)
                        {
                            Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                        }
                    };

                    recognizer.Canceled += (s, e) =>
                    {
                        Console.WriteLine($"CANCELED: Reason={e.Reason}");

                        if (e.Reason == CancellationReason.Error)
                        {
                            Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
                            Console.WriteLine($"CANCELED: Did you update the subscription info?");
                        }

                        stopTranslation.TrySetResult(0);
                    };

                    recognizer.SpeechStartDetected += (s, e) => {
                        Console.WriteLine("\nSpeech start detected event.");
                    };

                    recognizer.SpeechEndDetected += (s, e) => {
                        Console.WriteLine("\nSpeech end detected event.");
                    };

                    recognizer.SessionStarted += (s, e) => {
                        Console.WriteLine("\nSession started event.");
                    };

                    recognizer.SessionStopped += (s, e) => {
                        Console.WriteLine("\nSession stopped event.");
                        Console.WriteLine($"\nStop translation.");
                        stopTranslation.TrySetResult(0);
                    };

                    // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                    Console.WriteLine("Start translation...");
                    await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                    // Waits for completion.
                    // Use Task.WaitAny to keep the task rooted.
                    Task.WaitAny(new[] { stopTranslation.Task });

                    // Stops translation.
                    await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
                }
            }
            // </TranslationWithFileAsync>
        }