Пример #1
0
        private async void ResetRecognition()
        {
            Debug.WriteLine(nameof(ResetRecognition) + ": Start");
            _canWrite = false;
            _call?.Dispose();
            _call = _client.StreamingRecognize();
            await ComboBoxLanguage.Dispatcher.BeginInvoke(new Action((async() =>
            {
                try
                {
                    await
                    _call.RequestStream.WriteAsync(ConfigRequestFactory(ComboBoxLanguage.Text,
                                                                        RecognitionConfig.Types.AudioEncoding.Linear16, SAMPLING_RATE));
                    _canWrite = true;
                }
                catch (Exception ex)
                {
                    Debug.WriteLine(nameof(ResetRecognition) + ": " + ex.Message);
                }
            })));

            Debug.WriteLine(nameof(ResetRecognition) + ": End");
        }
Пример #2
0
        static void Main(string[] args)
        {
            // 証明書を作成
            var credential = GoogleCredential.FromJson(File.ReadAllText("SpeechTest-4db378c087bb.json"));

            credential = credential.CreateScoped("https://www.googleapis.com/auth/cloud-platform");

            // サーバに接続するためのチャンネルを作成
            var channel = new Channel("speech.googleapis.com:443", credential.ToChannelCredentials());

            // Google Speech APIを利用するためのクライアントを作成
            var client = new Speech.SpeechClient(channel);

            // ストリーミングの設定
            var streamingConfig = new StreamingRecognitionConfig
            {
                Config = new RecognitionConfig
                {
                    SampleRate   = 16000,
                    Encoding     = RecognitionConfig.Types.AudioEncoding.Linear16,
                    LanguageCode = "ja-JP",
                },
                InterimResults  = true,
                SingleUtterance = false,
            };

            // ストリーミングを開始
            using (var call = client.StreamingRecognize())
            {
                Console.WriteLine("-----------\nstart.\n");

                // Cloud Speech APIからレスポンスが返ってきた時の挙動を設定
                var responseReaderTask = Task.Run(async() =>
                {
                    // MoveNext1回につきレスポンス1回分のデータがくる
                    while (await call.ResponseStream.MoveNext())
                    {
                        var note = call.ResponseStream.Current;

                        // データがあれば、認識結果を出力する
                        if (note.Results != null && note.Results.Count > 0 &&
                            note.Results[0].Alternatives.Count > 0)
                        {
                            Console.WriteLine("result: " + note.Results[0].Alternatives[0].Transcript);
                        }
                    }
                });

                // 最初の呼び出しを行う。最初は設定データだけを送る
                var initialRequest = new StreamingRecognizeRequest
                {
                    StreamingConfig = streamingConfig,
                };
                call.RequestStream.WriteAsync(initialRequest).Wait();

                // 録音モデルの作成
                IAudioRecorder recorder = new RecordModel();

                // 録音モデルが音声データを吐いたら、それをすかさずサーバに送信する
                recorder.RecordDataAvailabled += (sender, e) =>
                {
                    if (e.Length > 0)
                    {
                        // WriteAsyncは一度に一回しか実行できないので非同期処理の時は特に注意
                        // ここではlockをかけて処理が重ならないようにしている
                        lock (recorder)
                        {
                            call.RequestStream.WriteAsync(new StreamingRecognizeRequest
                            {
                                AudioContent = RecognitionAudio.FromBytes(e.Buffer, 0, e.Length).Content,
                            }).Wait();
                        }
                    }
                };

                // 録音の開始
                recorder.Start();

                // Cloud Speech APIのストリーミングは1回60秒までなので、50秒まできたら打ち切る
                var timer = new Timer(1000 * 50);
                timer.Start();

                // 50秒経過した時、実際に打ち切るコード
                timer.Elapsed += async(sender, e) =>
                {
                    recorder.Stop();
                    await call.RequestStream.CompleteAsync();
                };

                // 待機
                responseReaderTask.Wait();

                // ここに到達した時点で、APIの呼び出しが終了したということなので、タイマーを切る
                timer.Dispose();
            }

            Console.WriteLine("\n-----------\nCompleted (Time out)");
            Console.ReadKey();
        }
Пример #3
0
        /// <summary>
        /// Asynchronously streams audio to the Google Cloud Speech server and receives results.
        /// </summary>
        static async Task StreamingRequest()
        {
            using (var call = m_Client.StreamingRecognize())
            {
                Task responseReaderTask = Task.Run(async() =>
                {
                    while (await call.ResponseStream.MoveNext())
                    {
                        StreamingRecognizeResponse response = call.ResponseStream.Current;
                        var responseJSON = new JSONObject();
                        if (response.Error != null)
                        {
                            var errorJSON = new JSONObject();
                            errorJSON.AddField("code", response.Error.Code);
                            errorJSON.AddField("message", response.Error.Message);
                            responseJSON.AddField("error", errorJSON);
                        }
                        if (response.Results != null && response.Results.Count > 0)
                        {
                            var resultsJSON = new JSONObject();
                            foreach (var result in response.Results)
                            {
                                var resultJSON = new JSONObject();
                                if (result.Alternatives != null && result.Alternatives.Count > 0)
                                {
                                    var alternativesJSON = new JSONObject();
                                    foreach (var alternative in result.Alternatives)
                                    {
                                        var alternativeJSON = new JSONObject();
                                        alternativeJSON.AddField("transcript", alternative.Transcript);
                                        alternativeJSON.AddField("confidence", alternative.Confidence);
                                        alternativesJSON.Add(alternativeJSON);
                                    }
                                    resultJSON.AddField("alternatives", alternativesJSON);
                                    resultJSON.AddField("is_final", result.IsFinal);
                                    resultJSON.AddField("stability", result.Stability);
                                }
                                resultsJSON.Add(resultJSON);
                            }
                            responseJSON.AddField("results", resultsJSON);
                        }
                        responseJSON.AddField("result_index", response.ResultIndex);
                        responseJSON.AddField("endpointer_type", response.EndpointerType.ToString());
                        Console.WriteLine("response: " + responseJSON);
                    }
                });

                // Send initial config request
                var configRequest = new StreamingRecognizeRequest();
                var streamingRecognitionConfig = new StreamingRecognitionConfig();
                var recognitionConfig          = new RecognitionConfig();
                recognitionConfig.Encoding   = RecognitionConfig.Types.AudioEncoding.LINEAR16;
                recognitionConfig.SampleRate = 16000;
                streamingRecognitionConfig.InterimResults  = true;
                streamingRecognitionConfig.SingleUtterance = false;
                streamingRecognitionConfig.Config          = recognitionConfig;
                configRequest.StreamingConfig = streamingRecognitionConfig;
                await call.RequestStream.WriteAsync(configRequest);

                // Send audio chunks
                Task sendChunksTask = SendChunks(call);

                await sendChunksTask;
                await call.RequestStream.CompleteAsync();

                await responseReaderTask;

                call.Dispose();
            }
        }