public async Task StartSpeechRecognitionAsync()
        {
            SpeechTranslationConfig config = GetRecognizerConfig();

            if (config == null)
            {
                return;
            }

            ResetState();
            DisposeRecognizer();

            DeviceInformation microphoneInput = await Util.GetDeviceInformation(DeviceClass.AudioCapture, SettingsHelper.Instance.MicrophoneName);

            using (AudioConfig audioConfig = AudioConfig.FromMicrophoneInput(microphoneInput.Id))
            {
                translationRecognizer                 = audioConfig != null ? new TranslationRecognizer(config, audioConfig) : new TranslationRecognizer(config);
                translationRecognizer.Recognizing    += OnTranslateRecognizing;
                translationRecognizer.Recognized     += OnTranslateRecognized;
                translationRecognizer.Canceled       += OnTranslateCanceled;
                translationRecognizer.SessionStarted += (s, e) =>
                {
                    recognizeCancellationTokenSource = new CancellationTokenSource();
                };

                await translationRecognizer.StartContinuousRecognitionAsync();
            }
        }
        private void SetupTranscriptionAndTranslationService()
        {
            try
            {
                var lCognitiveKey    = _settings.AzureCognitiveKey;
                var lCognitiveRegion = _settings.AzureCognitiveRegion;

                _eventPublisher.Publish("MySTT Setup", $"Got region: {lCognitiveRegion}, key starting from: {lCognitiveKey??lCognitiveKey.Substring(0, lCognitiveKey.Length /2)}");

                this.mTransSpeechConfig = SpeechTranslationConfig.FromSubscription(lCognitiveKey, lCognitiveRegion);
                var fromLanguage = "en-US";
                var toLanguages  = new List <string> {
                    "el-GR"
                };
                //var toLanguages = new List<string> { "ru-RU" };
                this.mTransSpeechConfig.SpeechRecognitionLanguage = fromLanguage;
                toLanguages.ForEach(this.mTransSpeechConfig.AddTargetLanguage);
                this.mInputStream = AudioInputStream.CreatePushStream(AudioStreamFormat.GetWaveFormatPCM(SAMPLESPERSECOND, BITSPERSAMPLE, NUMBEROFCHANNELS));

                this.mAudioConfig           = AudioConfig.FromStreamInput(this.mInputStream);
                this.mTranslationRecognizer = new TranslationRecognizer(this.mTransSpeechConfig, this.mAudioConfig);

                this.mTranslationRecognizer.Recognizing       += this.MSpeechRecognizer_Recognizing;
                this.mTranslationRecognizer.Recognized        += this.MSpeechRecognizer_Recognized;
                this.mTranslationRecognizer.SpeechEndDetected += this.MSpeechRecognizer_SpeechEndDetected;

                this.StartRecognisionIfNeeded();
            }
            catch (Exception ex)
            {
                _eventPublisher.Publish("MySTT Setup - Failed", $"Failed to initialize: {ex.Message}");
            }
        }
Ejemplo n.º 3
0
        public AzureCognitiveService(IDictionary <AzureServiceType, AzureServiceAuthorization> servicesAuthorization, ISDKLogger logger)
        {
            _logger = logger;
            _servicesAuthorization = servicesAuthorization;

            foreach (KeyValuePair <AzureServiceType, AzureServiceAuthorization> auth in _servicesAuthorization)
            {
                if (auth.Key == AzureServiceType.ComputerVision)
                {
                    _computerVisionClient = new ComputerVisionClient(
                        new ApiKeyServiceClientCredentials(auth.Value.SubscriptionKey),
                        new System.Net.Http.DelegatingHandler[] { });
                    _computerVisionClient.Endpoint = auth.Value.Endpoint;

                    _availableServices.Add(AzureServiceType.ComputerVision);
                }
                else if (auth.Key == AzureServiceType.Speech)
                {
                    _speechConfig            = SpeechConfig.FromSubscription(auth.Value.SubscriptionKey, auth.Value.Region);
                    _speechTranslationConfig = SpeechTranslationConfig.FromSubscription(auth.Value.SubscriptionKey, auth.Value.Region);
                    SetProfanityOption(AzureProfanitySetting);
                    _speechTranslationConfig.SpeechSynthesisVoiceName = CurrentSpeakingVoice;

                    _availableServices.Add(AzureServiceType.Speech);
                }
            }
        }
        private SpeechTranslationConfig GetRecognizerConfig()
        {
            var translationLanguageCodes = new List <string>();

            if (this.firstTranslateLanguageCombobox.SelectedValue is SupportedLanguage firstLanguage)
            {
                translationLanguageCodes.Add(firstLanguage.Code);
            }
            if (this.secondTranslateLanguageCombobox.SelectedValue is SupportedLanguage secondLanguage)
            {
                translationLanguageCodes.Add(secondLanguage.Code);
            }
            if (!(this.inputLanguageCombobox.SelectedValue is SupportedLanguage language) || !translationLanguageCodes.Any())
            {
                return(null);
            }

            var speechTranslationConfig = SpeechTranslationConfig.FromEndpoint(GetSpeechTranslationEndpoint(SettingsHelper.Instance.SpeechApiEndpoint), SettingsHelper.Instance.SpeechApiKey);

            speechTranslationConfig.SpeechRecognitionLanguage = language.Code;
            foreach (string code in translationLanguageCodes)
            {
                speechTranslationConfig.AddTargetLanguage(code);
            }
            return(speechTranslationConfig);
        }
Ejemplo n.º 5
0
        private SpeechTranslationConfig GetSpeechClient()
        {
            var subscriptionKey = configurationReader.GetKey();
            var region          = configurationReader.GetRegion();

            return(SpeechTranslationConfig.FromSubscription(subscriptionKey, region));
        }
Ejemplo n.º 6
0
        public static async Task <String> TranslationContinuousRecognitionAsync()
        {
            var    config       = SpeechTranslationConfig.FromSubscription("a8c500c9ef9f421e977486a17f0adcca", "westeurope");
            string fromLanguage = "pl-PL";

            config.SpeechRecognitionLanguage = fromLanguage;
            config.AddTargetLanguage("pl");
            string result = "";
            // Sets voice name of synthesis output.
            const string PolishVoice = "pl-PL";

            config.VoiceName = PolishVoice;
            // Creates a translation recognizer using microphone as audio input.
            using (var recognizer = new TranslationRecognizer(config))
            {
                // Subscribes to events.


                recognizer.Recognized += async(s, e) =>
                {
                    if (e.Result.Reason == ResultReason.TranslatedSpeech)
                    {
                        await recognizer.StopContinuousRecognitionAsync();

                        result = e.Result.Text;
                    }
                };
                return(result);
            }
        }
Ejemplo n.º 7
0
 private void OnDestroy()
 {
     if (this.config != null)
     {
         this.recognizer.Dispose();
         this.config = null;
     }
 }
        public void Initialize(string subscriptionKey, string region)
        {
            subscriptionKey.EnsureIsNotNull(nameof(subscriptionKey));
            subscriptionKey.EnsureIsNotNull(nameof(region));

            speechConfiguration = SpeechTranslationConfig.FromSubscription(subscriptionKey, region);
            speechConfiguration.OutputFormat = OutputFormat.Detailed;
            SendMessage($"Created the SpeechConfiguration with {subscriptionKey} | {region}");
        }
Ejemplo n.º 9
0
        public static async Task TranslateSpeechToText()
        {
            // Creates an instance of a speech translation config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechTranslationConfig.FromSubscription("311b76d8841344b6a277dbd8401611dc", "westus");

            // Sets source and target languages.
            // Replace with the languages of your choice, from list found here: https://aka.ms/speech/sttt-languages
            string fromLanguage = "en-US";

            config.SpeechRecognitionLanguage = fromLanguage;
            config.AddTargetLanguage("de");
            config.AddTargetLanguage("fr");

            // Creates a translation recognizer using the default microphone audio input device.
            using (var recognizer = new TranslationRecognizer(config))
            {
                // Starts translation, and returns after a single utterance is recognized. The end of a
                // single utterance is determined by listening for silence at the end or until a maximum of 15
                // seconds of audio is processed. The task returns the recognized text as well as the translation.
                // Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single
                // shot recognition like command or query.
                // For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead.
                Console.WriteLine("Say something...");
                var result = await recognizer.RecognizeOnceAsync();

                // Checks result.
                if (result.Reason == ResultReason.TranslatedSpeech)
                {
                    Console.WriteLine($"RECOGNIZED '{fromLanguage}': {result.Text}");
                    foreach (var element in result.Translations)
                    {
                        Console.WriteLine($"TRANSLATED into '{element.Key}': {element.Value}");
                    }
                }
                else if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    Console.WriteLine($"RECOGNIZED '{fromLanguage}': {result.Text} (text could not be translated)");
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                }
            }
        }
Ejemplo n.º 10
0
 public TranslationEngine(IConfiguration config, IHubContext <TranslationHub> hub)
 {
     _hub               = hub;
     _config            = config;
     _translationConfig = SpeechTranslationConfig.FromSubscription(_config["SUBSCRIPTION_KEY"], _config["REGION"]);
     _speechConfig      = SpeechTranslationConfig.FromSubscription(_config["SUBSCRIPTION_KEY"], _config["REGION"]);
     _audioInput        = AudioConfig.FromStreamInput(_inputStream);
     _audioOutputStream = AudioOutputStream.CreatePullStream();
     _output            = AudioConfig.FromStreamOutput(_audioOutputStream);
 }
Ejemplo n.º 11
0
        static async Task Main(string[] args)
        {
            try
            {
                // Get config settings from AppSettings
                IConfigurationBuilder builder       = new ConfigurationBuilder().AddJsonFile("appsettings.json");
                IConfigurationRoot    configuration = builder.Build();
                string cogSvcKey    = configuration["CognitiveServiceKey"];
                string cogSvcRegion = configuration["CognitiveServiceRegion"];


                // Set a dictionary of supported voices
                var voices = new Dictionary <string, string>
                {
                    ["fr"] = "fr-FR-Julie",
                    ["es"] = "es-ES-Laura",
                    ["hi"] = "hi-IN-Kalpana"
                };

                // Configure translation
                translationConfig = SpeechTranslationConfig.FromSubscription(cogSvcKey, cogSvcRegion);
                translationConfig.SpeechRecognitionLanguage = "en-US";
                Console.WriteLine("Ready to translate from " + translationConfig.SpeechRecognitionLanguage);


                string targetLanguage = "";
                while (targetLanguage != "quit")
                {
                    Console.WriteLine("\nEnter a target language\n fr = French\n es = Spanish\n hi = Hindi\n Enter anything else to stop\n");
                    targetLanguage = Console.ReadLine().ToLower();
                    // Check if the user has requested a language that this app supports
                    if (voices.ContainsKey(targetLanguage))
                    {
                        // Because the synthesised speech event only supports 1:1 translation, we'll remove any languages already in the translationconfig
                        if (translationConfig.TargetLanguages.Count > 1)
                        {
                            foreach (string language in translationConfig.TargetLanguages)
                            {
                                translationConfig.RemoveTargetLanguage(language);
                            }
                        }

                        // and add the requested one in
                        translationConfig.AddTargetLanguage(targetLanguage);
                        translationConfig.VoiceName = voices[targetLanguage];
                        await Translate(targetLanguage);
                    }
                    else
                    {
                        targetLanguage = "quit";
                    }
                }
            }
            catch (Exception ex) { Console.WriteLine(ex.Message); }
        }
Ejemplo n.º 12
0
        static void Main()
        {
            var config = SpeechTranslationConfig.FromSubscription(ConfigurationManager.AppSettings.Get("TTSKey"), ConfigurationManager.AppSettings.Get("Region"));

            //RecognizeOnceSpeechAsync(config).Wait();
            //Translate.RecognizeLng().Wait();
            Translate.TranslationContinuousRecognitionAsync(config).Wait();
            //TTS(config).Wait();
            Console.WriteLine("Please press a key to continue.");
            Console.ReadLine();
        }
Ejemplo n.º 13
0
        public void Initialize(string subscriptionKey, string region, InputSourceType inputSource, string wavFilename)
        {
            subscriptionKey.EnsureIsNotNull(nameof(subscriptionKey));
            subscriptionKey.EnsureIsNotNull(nameof(region));

            speechConfiguration = SpeechTranslationConfig.FromSubscription(subscriptionKey, region);
            speechConfiguration.OutputFormat = OutputFormat.Detailed;
            SendMessage($"Created the SpeechConfiguration with {subscriptionKey} | {region}");

            audioConfig = GetAudioConfig(inputSource, wavFilename);
        }
Ejemplo n.º 14
0
        public async Task <ActionResult> translateAsync()
        {
            string fromLanguage = "en-US";


            const string GermanVoice = "Microsoft Server Speech Text to Speech Voice (de-DE, Hedda)";

            var config = SpeechTranslationConfig.FromSubscription("ae9492aae8044a4c888a45a45e957d83", "westus");

            config.SpeechRecognitionLanguage = fromLanguage;
            config.VoiceName = GermanVoice;


            config.AddTargetLanguage("de");


            using (var recognizer = new TranslationRecognizer(config))
            {
                recognizer.Recognizing += (s, e) =>
                {
                    foreach (var element in e.Result.Translations)
                    {
                        ViewBag.message = element.Value;
                    }
                };

                recognizer.Recognized += (s, e) =>
                {
                    if (e.Result.Reason == ResultReason.TranslatedSpeech)
                    {
                        foreach (var element in e.Result.Translations)
                        {
                            ViewBag.message = element.Value;
                        }
                    }
                    else if (e.Result.Reason == ResultReason.RecognizedSpeech)
                    {
                        foreach (var element in e.Result.Translations)
                        {
                            ViewBag.message = element.Value;
                        }
                    }
                    else if (e.Result.Reason == ResultReason.NoMatch)
                    {
                    }
                };


                await recognizer.RecognizeOnceAsync();

                return(View("Index"));
            }
        }
        public static async Task Translate(string subscriptionKey, string region, string inputFilename, string fromLanguage, IEnumerable <string> targetLanguages, Voice voice, string outputFilename)
        {
            if (!outputFilename.EndsWith(".wav") && !outputFilename.EndsWith(".mp3"))
            {
                throw new ArgumentOutOfRangeException(paramName: nameof(outputFilename), message: "Output filename must have '.wav' or '.mp3' extension");
            }

            var config   = SpeechTranslationConfig.FromSubscription(subscriptionKey, region);
            var wavBytes = await GetWAVFromFile(inputFilename);

            await config.TranslationWithFileAsync(wavBytes, fromLanguage, targetLanguages, voice, outputFilename);
        }
        /// <summary>
        /// Initializes the config object with subscription key and region
        /// Initializes the recognizer object with a TranslationRecognizer
        /// Subscribes the recognizer to recognition Event Handlers
        /// If recognition is running, starts a thread which stops the recognition
        /// </summary>
        private void CreateRecognizer()
        {
            this.config = SpeechTranslationConfig.FromSubscription(SubscriptionKey, Region);
            this.config.SpeechRecognitionLanguage = FromLanguage;
            this.config.VoiceName = voice;
            ToLanguages.ForEach(l => this.config.AddTargetLanguage(l));

            this.recognizer = new TranslationRecognizer(this.config);

            this.recognizer.Recognizing  += this.OnRecognizingEventHandler;
            this.recognizer.Recognized   += this.OnRecognizedEventHandler;
            this.recognizer.Synthesizing += this.OnSynthesizingEventHandler;
            this.recognizer.Canceled     += this.OnCanceledEventHandler;
        }
Ejemplo n.º 17
0
    void Start()
    {
        if (outputText == null)
        {
            UnityEngine.Debug.LogError("outputText property is null! Assign a UI Text element to it.");
        }
        else if (recoButton == null)
        {
            _message = "recoButton property is null! Assign a UI Button to it.";
            UnityEngine.Debug.LogError(_message);
        }
        else
        {
            // Continue with normal initialization, Text and Button objects are present.
#if PLATFORM_ANDROID
            // Request to use the microphone, cf.
            // https://docs.unity3d.com/Manual/android-RequestingPermissions.html
            message = "Waiting for mic permission";
            if (!Permission.HasUserAuthorizedPermission(Permission.Microphone))
            {
                Permission.RequestUserPermission(Permission.Microphone);
            }
#elif PLATFORM_IOS
            if (!Application.HasUserAuthorization(UserAuthorization.Microphone))
            {
                Application.RequestUserAuthorization(UserAuthorization.Microphone);
            }
#else
            _micPermissionGranted = true;
            _message = "Click button to recognize speech";
#endif
            _config = SpeechTranslationConfig.FromSubscription(SubscriptionKey, SubscriptionRegion);
            _config.SpeechRecognitionLanguage = "es-US";
            _config.AddTargetLanguage("en-US");
            _pushStream              = AudioInputStream.CreatePushStream();
            _audioInput              = AudioConfig.FromStreamInput(_pushStream);
            _recognizer              = new TranslationRecognizer(_config, _audioInput);
            _recognizer.Recognizing += RecognizingHandler;
            _recognizer.Recognized  += RecognizedHandler;
            _recognizer.Canceled    += CanceledHandler;

            foreach (var device in Microphone.devices)
            {
                Debug.Log("DeviceName: " + device);
            }
            _audioSource = GameObject.Find("AudioSource").GetComponent <AudioSource>();
        }
    }
Ejemplo n.º 18
0
    private void CreateRecognizer()
    {
        //Microsoft.CognitiveServices.Speech.Internal.carbon_csharp a = new car();

        this.config = SpeechTranslationConfig.FromSubscription(SubscriptionKey, Region);
        this.config.SpeechRecognitionLanguage = FromLanguage;
        this.config.VoiceName = voice;
        ToLanguages.ForEach(l => this.config.AddTargetLanguage(l));

        this.recognizer = new TranslationRecognizer(this.config);

        this.recognizer.Recognizing  += this.OnRecognizingEventHandler;
        this.recognizer.Recognized   += this.OnRecognizedEventHandler;
        this.recognizer.Synthesizing += this.OnSynthesizingEventHandler;
        this.recognizer.Canceled     += this.OnCanceledEventHandler;
    }
Ejemplo n.º 19
0
        public async Task <(ResultReason, string)> ListenAsync()
        {
            var sourceLanguageConfigs = new SourceLanguageConfig[]
            {
                SourceLanguageConfig.FromLanguage("en-US"),
                SourceLanguageConfig.FromLanguage("it-IT")
            };
            var config = SpeechTranslationConfig.FromSubscription(Config.Key, Config.Region);
            var autoDetectSourceLanguageConfig = AutoDetectSourceLanguageConfig.FromSourceLanguageConfigs(sourceLanguageConfigs);

            using var recognizer = new SpeechRecognizer(config, autoDetectSourceLanguageConfig);
            var result = await recognizer.RecognizeOnceAsync();

            return(result.Reason switch
            {
                ResultReason.RecognizedSpeech => (ResultReason.RecognizedSpeech, result.Text),
                _ => (ResultReason.NoMatch, null)
            });
Ejemplo n.º 20
0
        private void Init(string from, string to)
        {
            this.toLanguage = to;

            Profile       = MediaEncodingProfile.CreateWav(AudioEncodingQuality.Low);
            Profile.Audio = AudioEncodingProperties.CreatePcm(16000, 1, 16);

            byte channels         = 1;
            byte bitsPerSample    = 16;
            uint samplesPerSecond = 16000; // or 8000
            var  audioFormat      = AudioStreamFormat.GetWaveFormatPCM(samplesPerSecond, bitsPerSample, channels);

            // Init Push Stream

            pushStream = AudioInputStream.CreatePushStream(audioFormat);

            if (from == to)
            {
                var config = SpeechConfig.FromSubscription(apiKey, region);
                config.SpeechRecognitionLanguage = from;

                speechRecognizer = new SpeechRecognizer(config, AudioConfig.FromStreamInput(pushStream));

                speechRecognizer.Recognizing += RecognisingSpeechHandler;
                speechRecognizer.Recognized  += RecognisingSpeechHandler;

                speechRecognizer.SessionStarted += (sender, args) => this.RecognisionStarted?.Invoke();
                speechRecognizer.SessionStopped += (sender, args) => this.RecognisionStopped?.Invoke();
            }
            else
            {
                var config = SpeechTranslationConfig.FromSubscription(apiKey, region);
                config.SpeechRecognitionLanguage = from;
                config.AddTargetLanguage(to);

                translationRecognizer = new TranslationRecognizer(config, AudioConfig.FromStreamInput(pushStream));

                translationRecognizer.SessionStarted += (sender, args) => this.RecognisionStarted?.Invoke();
                translationRecognizer.SessionStopped += (sender, args) => this.RecognisionStopped?.Invoke();

                translationRecognizer.Recognizing += RecognisingTranslationHandler;
                translationRecognizer.Recognized  += RecognisingTranslationHandler;
            }
        }
Ejemplo n.º 21
0
    public async void ButtonClick()
    {
        Debug.Log("Onclick fired");
        var translationConfig = SpeechTranslationConfig.FromSubscription(SpeechServiceSubscriptionKey, SpeechServiceRegion);

        translationConfig.SpeechRecognitionLanguage = "en-US";
        translationConfig.AddTargetLanguage("fr");

        using (var recognizer = new TranslationRecognizer(translationConfig))
        {
            Debug.Log("Creating recognizer");
            lock (threadLocker)
            {
                waitingforReco = true;
            }

            var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);

            if (result.Reason == ResultReason.TranslatedSpeech)
            {
                recognizedString = result.Text;
                Debug.Log("Text: " + recognizedString);
                foreach (var element in result.Translations)
                {
                    translatedString = element.Value;
                }
            }
            else if (result.Reason == ResultReason.NoMatch)
            {
                recognizedString = "NOMATCH: Speech could not be recognized.";
            }
            else if (result.Reason == ResultReason.Canceled)
            {
                var cancellation = CancellationDetails.FromResult(result);
                recognizedString = $"CANCELED: Reason={cancellation.Reason} ErrorDetails={cancellation.ErrorDetails}";
            }

            lock (threadLocker)
            {
                waitingforReco = false;
            }
        }
    }
Ejemplo n.º 22
0
    private async void StartRecognition()
    {
        if (isRecognitionStarted)
        {
            return;
        }
        Debug.Log("start recognition");

        string fromLang;
        string toLang;

        if (modeList.value == 0)
        {
            fromLang = "ja-JP";
            toLang   = "en";
        }
        else
        {
            fromLang = "en-US";
            toLang   = "ja";
        }
        Debug.Log("mode : " + fromLang + " -> " + toLang);

        var config = SpeechTranslationConfig.FromSubscription(apiKeyInputField.text, apiRegionInputField.text);

        config.SpeechRecognitionLanguage = fromLang;
        config.AddTargetLanguage(toLang);

        recognizer                      = new TranslationRecognizer(config);
        recognizer.Canceled            += CanceledHandler;
        recognizer.SessionStarted      += SessionStartedHandler;
        recognizer.SessionStopped      += SessionStoppedHandler;
        recognizer.SpeechStartDetected += SpeechStartDetectedHandler;
        recognizer.SpeechEndDetected   += SpeechEndDetectedHandler;
        recognizer.Recognizing         += RecognizingHandler;
        recognizer.Recognized          += RecognizedHandler;

        await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

        isRecognitionStarted      = true;
        isRecognitionStateChanged = true;
    }
        public async Task SpeechRecognitionFromFileAsync(StorageFile file)
        {
            SpeechTranslationConfig config = GetRecognizerConfig();

            if (config == null)
            {
                return;
            }

            ResetState();
            stopRecognitionTaskCompletionSource = new TaskCompletionSource <int>();
            using (var audioInput = AudioConfig.FromWavFileInput(file.Path))
            {
                using (var recognizer = new TranslationRecognizer(config, audioInput))
                {
                    recognizer.Recognizing    += OnTranslateRecognizing;
                    recognizer.Recognized     += OnTranslateRecognized;
                    recognizer.Canceled       += OnTranslateCanceled;
                    recognizer.SessionStarted += (s, e) =>
                    {
                        recognizeCancellationTokenSource = new CancellationTokenSource();
                    };
                    recognizer.SessionStopped += (s, e) =>
                    {
                        if (recognizeCancellationTokenSource != null && recognizeCancellationTokenSource.Token.CanBeCanceled)
                        {
                            recognizeCancellationTokenSource.Cancel();
                        }
                        stopRecognitionTaskCompletionSource.TrySetResult(0);
                    };

                    // Starts continuous recognition.
                    await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                    // Waits for completion.
                    await stopRecognitionTaskCompletionSource.Task.ConfigureAwait(false);

                    // Stops recognition.
                    await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
                }
            }
        }
Ejemplo n.º 24
0
    void CreateTranslationRecognizer()
    {
        if (translator == null)
        {
            SpeechTranslationConfig config = SpeechTranslationConfig.FromSubscription(lunarcomController.SpeechServiceAPIKey, lunarcomController.SpeechServiceRegion);
            config.SpeechRecognitionLanguage = fromLanguage;
            config.AddTargetLanguage(toLanguage);

            translator = new TranslationRecognizer(config);

            if (translator != null)
            {
                translator.Recognizing    += HandleTranslatorRecognizing;
                translator.Recognized     += HandleTranslatorRecognized;
                translator.Canceled       += HandleTranslatorCanceled;
                translator.SessionStarted += HandleTranslatorSessionStarted;
                translator.SessionStopped += HandleTranslatorSessionStopped;
            }
        }
    }
Ejemplo n.º 25
0
        private SpeechTranslationConfig createSpeechTranslationConfig(String logId, Key key, string sourceLanguage, List <string> languages)
        {
            SpeechTranslationConfig speechConfig = SpeechTranslationConfig.FromSubscription(key.ApiKey, key.Region);

            speechConfig.RequestWordLevelTimestamps();
            if (!IsSupportedRecognition(sourceLanguage))
            {
                _logger.LogError($"{logId}: !!!! Unknown recognition language ({sourceLanguage})! Recogition may fail ...");
            }
            speechConfig.SpeechRecognitionLanguage = sourceLanguage;

            _logger.LogInformation($"{logId}: Requested output languages: { String.Join(",", languages) }, source = ({sourceLanguage})");
            String shortCodeSource = sourceLanguage.Split('-')[0].ToLower();

            foreach (var language in languages)
            {
                String shortCodeTarget = language.Split('-')[0].ToLower();
                if (shortCodeSource == shortCodeTarget)
                {
                    continue;
                }
                if (IsSupportedTranslation(language))
                {
                    _logger.LogInformation($"{logId}: Adding target {language}");
                    speechConfig.AddTargetLanguage(language);
                }
                else
                {
                    _logger.LogWarning($"{logId}: Skipping unsupported target {language}");
                }
            }



            speechConfig.OutputFormat = OutputFormat.Detailed;
            return(speechConfig);
        }
Ejemplo n.º 26
0
        public async Task <MSTResult> RecognitionWithVideoStreamAsync(string logId, string videoFilePath, Key key, Dictionary <string, List <Caption> > captions, string sourceLanguage, Dictionary <string, TimeSpan> startAfterMap)
        {
            List <string> outputLanguages = startAfterMap.Keys.ToList <string>();
            TimeSpan      restartOffset   = startAfterMap.Any() ? startAfterMap.Values.Min() : TimeSpan.Zero;

            _logger.LogInformation($"{logId}:RecognitionWithVideoStreamAsync restartOffset=({restartOffset.TotalSeconds}) seconds");

            var trimmedAudioFile = await createTrimmedAudioFileAsync(videoFilePath, (float)restartOffset.TotalSeconds);

            try
            {
                SpeechTranslationConfig speechConfig = createSpeechTranslationConfig(logId, key, sourceLanguage, outputLanguages);


                var result = await performRecognitionAsync(logId, trimmedAudioFile.FilePath, speechConfig, restartOffset,
                                                           sourceLanguage, captions, startAfterMap);

                return(result);
            }
            finally
            {
                QuietDelete(trimmedAudioFile.FilePath);
            }
        }
    /// <summary>
    /// Creates a class-level Translation Recognizer for a specific language using Azure credentials
    /// and hooks-up lifecycle & recognition events. Translation can be enabled with one or more target
    /// languages translated simultaneously
    /// </summary>
    void CreateTranslationRecognizer()
    {
        Debug.Log("Creating Translation Recognizer.");
        recognizedString = "Initializing speech recognition with translation, please wait...";

        if (translator == null)
        {
            SpeechTranslationConfig config = SpeechTranslationConfig.FromSubscription(SpeechServiceAPIKey, SpeechServiceRegion);
            config.SpeechRecognitionLanguage = fromLanguage;
            if (Languages1.captionText.text.Length > 0)
            {
                config.AddTargetLanguage(ExtractLanguageCode(Languages1.captionText.text));
            }
            if (Languages2.captionText.text.Length > 0)
            {
                config.AddTargetLanguage(ExtractLanguageCode(Languages2.captionText.text));
            }
            if (Languages3.captionText.text.Length > 0)
            {
                config.AddTargetLanguage(ExtractLanguageCode(Languages3.captionText.text));
            }
            translator = new TranslationRecognizer(config);

            if (translator != null)
            {
                translator.Recognizing         += RecognizingTranslationHandler;
                translator.Recognized          += RecognizedTranslationHandler;
                translator.SpeechStartDetected += SpeechStartDetectedHandler;
                translator.SpeechEndDetected   += SpeechEndDetectedHandler;
                translator.Canceled            += CanceledTranslationHandler;
                translator.SessionStarted      += SessionStartedHandler;
                translator.SessionStopped      += SessionStoppedHandler;
            }
        }
        Debug.Log("CreateTranslationRecognizer exit");
    }
Ejemplo n.º 28
0
        static async Task CreateConversationAsync()
        {
            // Replace with your own subscription key and service region (e.g., "westus").
            string subscriptionKey = "YourSubscriptionKey";
            string region          = "YourServiceRegion";

            // Change this to match the language you are speaking. You can find the full list of supported
            // speech language codes here: https://docs.microsoft.com/azure/cognitive-services/speech-service/language-support
            string fromLanguage = "en-US";

            // Change this to the language you would like to translate the transcriptions to. You can find
            // the full list of supported translation languages here: https://aka.ms/speech/sttt-languages
            string toLanguage = "de";

            // Set this to the display name you want for the conversation host
            string displayName = "The host";

            // Creates an instance of a speech config with specified subscription key and service region.
            var config = SpeechTranslationConfig.FromSubscription(subscriptionKey, region);

            config.SpeechRecognitionLanguage = fromLanguage;
            config.AddTargetLanguage(toLanguage);

            // Create the conversation object you'll need to manage the conversation
            using (var conversation = await Conversation.CreateConversationAsync(config).ConfigureAwait(false))
            {
                // Start the conversation so you and others can join
                await conversation.StartConversationAsync().ConfigureAwait(false);

                // Get the conversation ID. It will be up to your scenario to determine how this is shared
                // with other participants.
                string conversationId = conversation.ConversationId;
                Console.WriteLine($"CONVERSATION: Created a new conversation with ID '{conversationId}'");

                // At this point, you can use the conversation object to manage the conversation. For example,
                // to mute everyone else in the room you can call this method:
                await conversation.MuteAllParticipantsAsync().ConfigureAwait(false);

                // Configure which audio source you want to use. In this case we will use your default microphone
                var audioConfig = AudioConfig.FromDefaultMicrophoneInput();

                // Create the conversation translator you'll need to send audio, send IMs, and receive conversation events
                using (var conversationTranslator = new ConversationTranslator(audioConfig))
                {
                    // You should connect all the event handlers you need at this point
                    conversationTranslator.SessionStarted += (s, e) =>
                    {
                        Console.WriteLine($"SESSION STARTED: {e.SessionId}");
                    };
                    conversationTranslator.SessionStopped += (s, e) =>
                    {
                        Console.WriteLine($"SESSION STOPPED: {e.SessionId}");
                    };
                    conversationTranslator.Canceled += (s, e) =>
                    {
                        Console.WriteLine($"CANCELED: Reason={e.Reason}");
                        switch (e.Reason)
                        {
                        case CancellationReason.EndOfStream:
                            Console.WriteLine($"CANCELED: End of audio reached");
                            break;

                        case CancellationReason.Error:
                            Console.WriteLine($"CANCELED: ErrorCode= {e.ErrorCode}");
                            Console.WriteLine($"CANCELED: ErrorDetails= {e.ErrorDetails}");
                            break;
                        }
                    };
                    conversationTranslator.ConversationExpiration += (s, e) =>
                    {
                        Console.WriteLine($"CONVERSATION: Will expire in {e.ExpirationTime.TotalMinutes} minutes");
                    };
                    conversationTranslator.ParticipantsChanged += (s, e) =>
                    {
                        Console.Write("PARTICIPANTS: The following participant(s) have ");
                        switch (e.Reason)
                        {
                        case ParticipantChangedReason.JoinedConversation:
                            Console.Write("joined");
                            break;

                        case ParticipantChangedReason.LeftConversation:
                            Console.Write("left");
                            break;

                        case ParticipantChangedReason.Updated:
                            Console.Write("been updated");
                            break;
                        }

                        Console.WriteLine(":");

                        foreach (var participant in e.Participants)
                        {
                            Console.WriteLine($"\tPARTICIPANT: {participant.DisplayName}");
                        }
                    };
                    conversationTranslator.TextMessageReceived += (s, e) =>
                    {
                        Console.WriteLine($"TEXT MESSAGE: From '{e.Result.ParticipantId}': '{e.Result.Text}'");
                        foreach (var entry in e.Result.Translations)
                        {
                            Console.WriteLine($"\tTRANSLATED: '{entry.Key}': '{entry.Value}'");
                        }
                    };
                    conversationTranslator.Transcribed += (s, e) =>
                    {
                        Console.WriteLine($"TRANSCRIBED: From '{e.Result.ParticipantId}': '{e.Result.Text}'");
                        foreach (var entry in e.Result.Translations)
                        {
                            Console.WriteLine($"\tTRANSLATED: '{entry.Key}': '{entry.Value}'");
                        }
                    };
                    conversationTranslator.Transcribing += (s, e) =>
                    {
                        Console.WriteLine($"TRANSCRIBING: From '{e.Result.ParticipantId}': '{e.Result.Text}'");
                        foreach (var entry in e.Result.Translations)
                        {
                            Console.WriteLine($"\tTRANSLATED: '{entry.Key}': '{entry.Value}'");
                        }
                    };

                    // Join the conversation so you can start receiving events
                    await conversationTranslator.JoinConversationAsync(conversation, displayName).ConfigureAwait(false);

                    // You can now send an instant message to all other participants in the room
                    await conversationTranslator.SendTextMessageAsync("The instant message to send").ConfigureAwait(false);

                    // Start sending audio
                    await conversationTranslator.StartTranscribingAsync().ConfigureAwait(false);

                    // At this point, you should start receiving transcriptions for what you are saying using
                    // the default microphone. Press enter to stop audio capture
                    Console.WriteLine("Started transcribing. Press enter to stop");
                    while (Console.ReadKey(true).Key != ConsoleKey.Enter)
                    {
                    }

                    // Stop audio capture
                    await conversationTranslator.StopTranscribingAsync().ConfigureAwait(false);

                    // Leave the conversation. After this you will no longer receive events
                    await conversationTranslator.LeaveConversationAsync().ConfigureAwait(false);
                }

                // End the conversation
                await conversation.EndConversationAsync().ConfigureAwait(false);

                // Delete the conversation. Any other participants that are still in the conversation will be removed
                await conversation.DeleteConversationAsync().ConfigureAwait(false);
            }
        }
Ejemplo n.º 29
0
                    // Translation from microphone.
                    public static async Task TranslationWithMicrophoneAsync()
                    {
                        // <TranslationWithMicrophoneAsync>
                        // Translation source language.
                        // Replace with a language of your choice.
                        string fromLanguage = "en-US";

                        // Voice name of synthesis output.
                        const string GermanVoice = "Microsoft Server Speech Text to Speech Voice (de-DE, Hedda)";

                        // Creates an instance of a speech translation config with specified subscription key and service region.
                        // Replace with your own subscription key and service region (e.g., "westus").
                        var config = SpeechTranslationConfig.FromSubscription("", "westus");

                        config.SpeechRecognitionLanguage = fromLanguage;
                        config.VoiceName = GermanVoice;

                        // Translation target language(s).
                        // Replace with language(s) of your choice.
                        config.AddTargetLanguage("de");

                        // Creates a translation recognizer using microphone as audio input.
                        using (var recognizer = new TranslationRecognizer(config))
                        {
                            // Subscribes to events.
                            recognizer.Recognizing += (s, e) =>
                            {
                                //Console.WriteLine($"RECOGNIZING in '{fromLanguage}': Text={e.Result.Text}");
                                //foreach (var element in e.Result.Translations)
                                //{
                                //    Console.WriteLine($"    TRANSLATING into '{element.Key}': {element.Value}");
                                //}
                            };

                            recognizer.Recognized += (s, e) =>
                            {
                                if (e.Result.Reason == ResultReason.TranslatedSpeech)
                                {
                                    Console.WriteLine($"\nRECOGNIZED in '{fromLanguage}': Text={e.Result.Text}");
                                    foreach (var element in e.Result.Translations)
                                    {
                                        Console.WriteLine($"TRANSLATED into '{element.Key}': {element.Value}");
                                    }
                                }
                                else if (e.Result.Reason == ResultReason.RecognizedSpeech)
                                {
                                    //Console.WriteLine($"\nText={e.Result.Text}");
                                    //Console.WriteLine($"    Speech not translated.");
                                }
                                else if (e.Result.Reason == ResultReason.NoMatch)
                                {
                                    //Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                                }
                            };

                            recognizer.Synthesizing += (s, e) =>
                            {
                                //var audio = e.Result.GetAudio();
                                //Console.WriteLine(audio.Length != 0
                                //    ? $"AudioSize: {audio.Length}"
                                //    : $"AudioSize: {audio.Length} (end of synthesis data)");

                                //if (audio.Length > 0)
                                //{
                                //    #if NET461
                                //                            using (var m = new MemoryStream(audio))
                                //                            {
                                //                                SoundPlayer simpleSound = new SoundPlayer(m);
                                //                                simpleSound.PlaySync();
                                //                            }
                                //    #endif
                                //}
                            };

                            recognizer.Canceled += (s, e) =>
                            {
                                Console.WriteLine($"CANCELED: Reason={e.Reason}");

                                if (e.Reason == CancellationReason.Error)
                                {
                                    Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}");
                                    Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
                                    Console.WriteLine($"CANCELED: Did you update the subscription info?");
                                }
                            };

                            recognizer.SessionStarted += (s, e) =>
                            {
                                Console.WriteLine("\nSession started event.");
                            };

                            recognizer.SessionStopped += (s, e) =>
                            {
                                Console.WriteLine("\nSession stopped event.");
                            };

                            // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                            Console.WriteLine("Say something...");
                            await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                            do
                            {
                                Console.WriteLine("Press Enter to stop");
                            } while (Console.ReadKey().Key != ConsoleKey.Enter);

                            // Stops continuous recognition.
                            await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
                        }
                        // </TranslationWithMicrophoneAsync>
                    }
Ejemplo n.º 30
0
        public static async Task TranslationContinuousRecognitionAsync(SpeechTranslationConfig config)
        {
            byte[] audio        = null;
            string fromLanguage = "en-US";

            #region LanguageDetection

            /*SpeechConfig speechConfig = SpeechConfig.FromEndpoint(new System.Uri(ConfigurationManager.AppSettings.Get("SpeechEndpoint")), ConfigurationManager.AppSettings.Get("TTSKey"));
             * AudioConfig audioConfig = AudioConfig.FromDefaultMicrophoneInput();
             * string fromLanguage = string.Empty;
             * AutoDetectSourceLanguageConfig autoDetectSourceLanguageConfig = AutoDetectSourceLanguageConfig
             *                                          .FromLanguages(new string[] { "en-US", "ru-RU" });
             * using (var recognizer = new SpeechRecognizer(
             *  speechConfig,
             *  autoDetectSourceLanguageConfig,
             *  audioConfig))
             * {
             *  Console.WriteLine("Say something...");
             *  var speechRecognitionResult = await recognizer.RecognizeOnceAsync();
             *  var autoDetectSourceLanguageResult =
             *      AutoDetectSourceLanguageResult.FromResult(speechRecognitionResult);
             *  fromLanguage = autoDetectSourceLanguageResult.Language;
             *  Console.WriteLine("I recognized " + speechRecognitionResult.Text + " in " + fromLanguage);
             * }*/
            #endregion
            config.SpeechRecognitionLanguage = fromLanguage;
            config.AddTargetLanguage("de");

            const string GermanVoice = "de-DE-Hedda";
            config.VoiceName = GermanVoice;
            // Creates a translation recognizer using microphone as audio input.
            using (var recognizer = new TranslationRecognizer(config))
            {
                recognizer.Recognizing += (s, e) =>
                {
                    Console.WriteLine($"RECOGNIZING in '{fromLanguage}': Text={e.Result.Text}");
                    foreach (var element in e.Result.Translations)
                    {
                        Console.WriteLine($"    TRANSLATING into '{element.Key}': {element.Value}");
                    }
                };

                recognizer.Recognized += (s, e) =>
                {
                    if (e.Result.Reason == ResultReason.TranslatedSpeech)
                    {
                        Console.WriteLine($"\nFinal result: Reason: {e.Result.Reason.ToString()}, recognized text in {fromLanguage}: {e.Result.Text}.");
                        foreach (var element in e.Result.Translations)
                        {
                            Console.WriteLine($"    TRANSLATING into '{element.Key}': {element.Value}");
                        }
                    }
                };

                recognizer.Synthesizing += (s, e) =>
                {
                    audio = e.Result.GetAudio();
                    Console.WriteLine(audio.Length != 0
                        ? $"AudioSize: {audio.Length}"
                        : $"AudioSize: {audio.Length} (end of synthesis data)");
                    using (MemoryStream ms = new MemoryStream(audio))
                    {
                        SoundPlayer player = new SoundPlayer();
                        player.Stream          = null;
                        player.Stream          = ms;
                        player.Stream.Position = 0;
                        player.PlaySync();
                    }
                };

                recognizer.Canceled += (s, e) =>
                {
                    Console.WriteLine($"\nRecognition canceled. Reason: {e.Reason}; ErrorDetails: {e.ErrorDetails}");
                };

                recognizer.SessionStarted += (s, e) =>
                {
                    Console.WriteLine("\nSession started event.");
                };

                recognizer.SessionStopped += (s, e) =>
                {
                    Console.WriteLine("\nSession stopped event.");
                };

                // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                Console.WriteLine("Say something...");
                await recognizer.RecognizeOnceAsync();//.StartContinuousRecognitionAsync().ConfigureAwait(false);

                do
                {
                    Console.WriteLine("Press Enter to stop");
                } while (Console.ReadKey().Key != ConsoleKey.Enter);


                // Stops continuous recognition.
                await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
            }
        }