Exemple #1
0
 private static void DisposeAll()
 {
     s_twitchBot.Disconnect();
     s_speechSynthesizer.Dispose();
     s_speechRecognizer.Dispose();
     s_audioConfig.Dispose();
 }
 public void Dispose()
 {
     NetConfiguration.Dispose();
     AudioConfig.Dispose();
     Config           = null;
     NetConfiguration = null;
     AudioConfig      = null;
     _config          = null;
 }
Exemple #3
0
        protected virtual void Dispose(bool cleanup)
        {
            if (disposed)
            {
                return;
            }

            if (cleanup)
            {
                speechRecognizer.Dispose();
                audioConfig.Dispose();
                PushStream.Dispose();
            }

            disposed = true;
        }
Exemple #4
0
 public void Dispose()
 {
     if (recognizer != null)
     {
         recognizer.Recognizing    -= OnSpeechRecognizing;
         recognizer.SessionStarted -= OnSessionStarted;
         recognizer.SessionStopped -= OnSessionStopped;
         recognizer.Dispose();
         recognizer = null;
     }
     if (audioConfig != null)
     {
         audioConfig.Dispose();
         audioConfig = null;
     }
 }
Exemple #5
0
 public void Dispose()
 {
     _inputStream.Dispose();
     _audioInput.Dispose();
     _recognizer.Dispose();
 }
 public void Dispose()
 {
     Dispose(Disposables);
     AudioConfig?.Dispose();
     StreamAudioSource?.Dispose();
 }
        private async void SpeechRecognitionFromStream_ButtonClicked(object sender, RoutedEventArgs e)
        {
            stopRecognitionTaskCompletionSource = new TaskCompletionSource <int>();
            AudioConfig  audioInput = null;
            BinaryReader reader     = null;
            Stream       stream     = null;

            if (!AreKeysValid())
            {
                NotifyUser("Subscription Key is missing!", NotifyType.ErrorMessage);
                return;
            }
            else
            {
                NotifyUser(" ", NotifyType.StatusMessage);
            }

            var picker = new Windows.Storage.Pickers.FileOpenPicker();

            picker.FileTypeFilter.Add(".wav");
            StorageFile file = await picker.PickSingleFileAsync();

            if (file == null)
            {
                string s = string.Format(CultureInfo.InvariantCulture, "Can't open {0} !", file.Path);
                NotifyUser(s, NotifyType.ErrorMessage);
                return;
            }
            try
            {
                stream = (await file.OpenReadAsync()).AsStreamForRead();
                reader = new BinaryReader(stream);

                // Create an audio stream from a wav file.
                audioInput = MicrosoftSpeechSDKSamples.Helper.OpenWavFile(reader);

                // Creates an instance of a speech config with specified and service region (e.g., "westus").
                var config = SpeechConfig.FromSubscription(this.SubscriptionKey, this.Region);
                config.SpeechRecognitionLanguage = this.RecognitionLanguage;

                // Creates a speech recognizer using file as audio input.
                using (var recognizer = new SpeechRecognizer(config, audioInput))
                {
                    // Subscribes to events.
                    recognizer.Recognizing += (s, ee) =>
                    {
                        NotifyUser(ee.Result.Text, NotifyType.StatusMessage);
                    };
                    recognizer.Recognized += (s, ee) =>
                    {
                        string str = "";
                        if (ee.Result.Reason == ResultReason.RecognizedSpeech)
                        {
                            str = $"RECOGNIZED: Text={ee.Result.Text}";
                        }
                        else if (ee.Result.Reason == ResultReason.NoMatch)
                        {
                            str = $"NOMATCH: Speech could not be recognized.";
                        }
                        NotifyUser(str, NotifyType.StatusMessage);
                    };
                    recognizer.Canceled += (s, ee) =>
                    {
                        StringBuilder sb = new StringBuilder();
                        sb.AppendLine($"CANCELED: Reason={ee.Reason}");

                        if (ee.Reason == CancellationReason.Error)
                        {
                            sb.AppendLine($"CANCELED: ErrorCode={ee.ErrorCode}");
                            sb.AppendLine($"CANCELED: ErrorDetails={ee.ErrorDetails}");
                            sb.AppendLine($"CANCELED: Did you update the subscription info?");
                        }

                        NotifyUser(sb.ToString(), NotifyType.StatusMessage);
                    };
                    recognizer.SessionStarted += (s, ee) =>
                    {
                        NotifyUser("Session started event.", NotifyType.StatusMessage);
                    };
                    recognizer.SessionStopped += (s, ee) =>
                    {
                        NotifyUser("Session stopped event.", NotifyType.StatusMessage);
                        NotifyUser("Stop recognition.", NotifyType.StatusMessage);
                        stopRecognitionTaskCompletionSource.TrySetResult(0);
                    };
                    // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                    await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                    // Waits for completion.
                    await stopRecognitionTaskCompletionSource.Task.ConfigureAwait(false);

                    // Stops recognition.
                    await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
                }
            }
            catch (System.FormatException ex)
            {
                NotifyUser(ex.ToString(), NotifyType.ErrorMessage);
            }
            finally
            {
                if (reader != null)
                {
                    reader.Dispose();
                }
                if (audioInput != null)
                {
                    audioInput.Dispose();
                }
                if (stream != null)
                {
                    stream.Dispose();
                }
            }
        }
Exemple #8
0
        /**
         * Speech サービスで audio ファイルから文字起こし
         * https://docs.microsoft.com/ja-jp/azure/cognitive-services/speech-service/get-started-speech-to-text
         */
        private static void Audio2Text(string tempfile, ILogger log)
        {
            using (FileStream fs = File.OpenWrite(tempfile + ".txt"))
            {
                // Speech サービスへ接続
                string key        = System.Environment.GetEnvironmentVariable("CognitiveServiceApiKey");
                string endPoint   = System.Environment.GetEnvironmentVariable("CognitiveEndpoint");
                string endPointId = System.Environment.GetEnvironmentVariable("CognitiveEndpointId");
                Uri    uriEndpoint;
                try
                {
                    uriEndpoint = new Uri(endPoint);
                }
                catch (Exception ex)
                {
                    log.LogWarning(ex.Message);
                    return;
                }

                // カスタムドメインを利用した場合のエンドポイント指定
                SpeechConfig speechConfig = SpeechConfig.FromEndpoint(uriEndpoint, key);
                // endPointId は、カスタムスピーチ利用の場合のみ指定
                speechConfig.EndpointId = endPointId;
                speechConfig.SpeechRecognitionLanguage = "ja-JP";
                AudioConfig      audioConfig = AudioConfig.FromWavFileInput(tempfile + ".wav");
                SpeechRecognizer recognizer  = new SpeechRecognizer(speechConfig, audioConfig);

                // イベントタスクの同期用
                var stopRecognition = new TaskCompletionSource <int>();

                // 部分文字列の抽出毎に繰り返し呼ばれる
                recognizer.Recognized += (s, e) =>
                {
                    if (e.Result.Reason == ResultReason.RecognizedSpeech)
                    {
                        String transcript = e.Result.Text;
                        log.LogInformation("RECOGNIZED: Text=" + transcript);
                        try
                        {
                            Byte[] info = new UTF8Encoding(true).GetBytes(transcript);
                            fs.Write(info, 0, info.Length);
                        }
                        catch (Exception ex)
                        {
                            log.LogWarning(ex.Message);
                        }
                    }
                    else if (e.Result.Reason == ResultReason.NoMatch)
                    {
                        log.LogInformation($"NOMATCH: Speech could not be recognized.");
                    }
                };

                // 途中で処理が完了したら呼ばれる
                recognizer.Canceled += (s, e) =>
                {
                    Console.WriteLine($"CANCELED: Reason={e.Reason}");

                    if (e.Reason == CancellationReason.Error)
                    {
                        log.LogInformation($"CANCELED: ErrorCode={e.ErrorCode}");
                        log.LogInformation($"CANCELED: ErrorDetails={e.ErrorDetails}");
                        log.LogInformation($"CANCELED: Did you update the subscription info?");
                    }

                    stopRecognition.TrySetResult(0);
                };

                // 最後まで完了したら呼ばれる
                recognizer.SessionStopped += (s, e) =>
                {
                    log.LogInformation("\n    Session stopped event.");
                    stopRecognition.TrySetResult(0);
                };

                // 文字起こしの開始
                recognizer.StartContinuousRecognitionAsync();

                // Waits for completion. Use Task.WaitAny to keep the task rooted.
                Task.WaitAny(new[] { stopRecognition.Task });

                recognizer.Dispose();
                audioConfig.Dispose();
            }
        }