/// <summary>
        /// Wave in recording task gets called when we think we have enough audio to send to googles
        /// </summary>
        /// <param name="seconds"></param>
        /// <returns></returns>
        private async Task <object> StreamBufferToGooglesAsync()
        {
            //I don't like having to re-create these everytime, but breaking the
            //code out is for another refactoring.
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();

            // Write the initial request with the config.
            //Again, this is googles code example, I tried unrolling this stuff
            //and the google api stopped working, so stays like this for now
            await streamingCall.WriteAsync(new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "en",
                    },

                    //Note: play with this value
                    // InterimResults = true,  // this needs to be true for real time
                    SingleUtterance = true,
                }
            });



            //Get what ever data we have in our internal wave buffer and put into
            //byte array for googles
            byte[] buffer = new byte[waveBuffer.BufferLength];
            int    offset = 0;
            int    count  = waveBuffer.BufferLength;

            //Gulp ... yummy bytes ....
            waveBuffer.Read(buffer, offset, count);

            try
            {
                //Sending to Googles .... finally
                streamingCall.WriteAsync(new StreamingRecognizeRequest()
                {
                    AudioContent = Google.Protobuf.ByteString.CopyFrom(buffer, 0, count)
                }).Wait();
            }
            catch (Exception wtf)
            {
                string wtfMessage = wtf.Message;
            }

            //Again, this is googles code example below, I tried unrolling this stuff
            //and the google api stopped working, so stays like this for now

            //Print responses as they arrive. Need to move this into a method for cleanslyness
            Task printResponses = Task.Run(async() =>
            {
                string saidWhat     = "";
                string lastSaidWhat = "";
                while (await streamingCall.ResponseStream.MoveNext(default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream.Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            saidWhat = alternative.Transcript;
                            if (lastSaidWhat != saidWhat)
                            {
                                Console.WriteLine(saidWhat);
                                lastSaidWhat = saidWhat;
                                //Need to call this on UI thread ....
                                textBox1.Invoke((MethodInvoker) delegate { textBox1.AppendText(textBox1.Text + saidWhat + " \r\n"); });
                            }
                        } // end for
                    }     // end for
                }
            });

            //Clear our internal wave buffer
            waveBuffer.ClearBuffer();

            //Tell googles we are done for now
            await streamingCall.WriteCompleteAsync();

            return(0);
        }
Example #2
0
        /// <summary>
        /// This Functions goes to Twilio, make a call, Twilio save a record of this call then goes to Google Speech to text API and transcribe the call saving a txt local file
        /// </summary>
        public static void Call()
        {
            Console.WriteLine("Init Twilio API...");
            ///Twilio SID and Token
            const string accountSid = "ACa62c3db32794048447xxxxxxxxxxxx";
            const string authToken  = "f420e52ee6774e0898fdxxxxxxxxxxxx";

            TwilioClient.Init(accountSid, authToken);
            Console.WriteLine("Let's make a call, please provide me your phone number (format: +AreaNumner)");
            var phoneNumber = Console.ReadLine();
            var call        = CallResource.Create(
                record: true,
                url: new Uri("https://corn-collie-1715.twil.io/assets/Voice.xml"), //is a best practice to upload the assets in twilio account
                to: new Twilio.Types.PhoneNumber(phoneNumber),
                from: new Twilio.Types.PhoneNumber("+000000000")                   //Twilio number created in you account
                );

            RecordingResource recordings;

            RecordingResource.StatusEnum recordingStatus;

            do
            {
                recordings = RecordingResource.Read().Where(x => x.CallSid == call.Sid).FirstOrDefault();
            } while (recordings == null);

            do
            {
                Console.WriteLine("Processing Recording....");
                recordingStatus = RecordingResource.Read().Where(x => x.CallSid == call.Sid).Select(x => x.Status).FirstOrDefault();
            } while (recordingStatus == RecordingResource.StatusEnum.Processing);

            WebClient wc = new WebClient();

            wc.DownloadFile(@"https://api.twilio.com/" + recordings.Uri.Replace("json", "wav"), recordings.Sid + ".wav");
            Console.WriteLine("Now we have the recording,Lets sync with Google Services, please wait... ");
            string audioDirectory = Path.Combine(Environment.CurrentDirectory, recordings.Sid + ".wav");
            var    memoryStream   = new MemoryStream();

            using (var file = new FileStream(audioDirectory, FileMode.Open, FileAccess.Read))
                file.CopyTo(memoryStream);

            var speechClient  = SpeechClient.Create();
            var storageClient = StorageClient.Create();

            //We have to upload the file to google storage before transcribe
            var uploadedWavFile = storageClient.UploadObject(GoogleBucketName, recordings.Sid + ".wav", "audio/wav", memoryStream);

            //Get the file
            var storageObject = storageClient.GetObject(GoogleBucketName, recordings.Sid + ".wav");
            var storageUri    = $@"gs://{GoogleBucketName}/{storageObject.Name}";

            storageObject.Acl = storageObject.Acl ?? new List <ObjectAccessControl>();
            storageClient.UpdateObject(storageObject, new UpdateObjectOptions
            {
                PredefinedAcl = PredefinedObjectAcl.PublicRead
            });

            Console.WriteLine("We will start to transcribe your recording, this operation will take few moments...");
            //Speech to Text operation
            var longOperation = speechClient.LongRunningRecognize(new RecognitionConfig()
            {
                //the properties below are not the required for MP3 files and that's why the opertion returns null, we can make this more
                //generic knowing what kind of properties we need for each file type or standarize the result just for one type.
                //Encoding = RecognitionConfig.Types.AudioEncoding.Linear16,
                //SampleRateHertz = 44100,
                EnableWordTimeOffsets = true,
                LanguageCode          = "en-US"
            }, RecognitionAudio.FromStorageUri(storageUri));

            longOperation = longOperation.PollUntilCompleted();
            //TODO: fix this implementation. Sometimes there is a null being returned from longOperation.Result
            var response = longOperation.Result;

            if (response != null && response.Results != null && response.Results.Count > 0)
            {
                Console.WriteLine("Is done!, now we will create a file with the complete transcription for you...");
                //var resultArray = (JsonConvert.DeserializeObject<RootObject>(response.Results.ToString()));
                foreach (var res in response.Results)
                {
                    string transcription = res.Alternatives.Select(x => x.Transcript).FirstOrDefault();
                    File.AppendAllText(Path.Combine(Environment.CurrentDirectory, recordings.Sid + ".txt"), transcription);
                }
            }
            Console.WriteLine("File Created!, Now we will clean our directories and give you the path of the mentioned file...");
            storageClient.DeleteObject(GoogleBucketName, storageObject.Name);

            if (File.Exists(Path.Combine(Environment.CurrentDirectory, recordings.Sid + ".wav")))
            {
                File.Delete(Path.Combine(Environment.CurrentDirectory, recordings.Sid + ".wav"));
            }

            Console.WriteLine("You can find your txt file here: " + Path.Combine(Environment.CurrentDirectory, recordings.Sid + ".txt"));
            Console.ReadLine();
        }
        private static async Task <object> StreamBufferToGooglesAsync()
        {
            //I don't like having to re-create these everytime, but breaking the
            //code out is for another refactoring.
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();

            // Write the initial request with the config.
            //Again, this is googles code example, I tried unrolling this stuff
            //and the google api stopped working, so stays like this for now
            await streamingCall.WriteAsync(new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "bg-BG",
                    },

                    //Note: play with this value
                    // InterimResults = true,  // this needs to be true for real time
                    SingleUtterance = true,
                }
            });



            //Get what ever data we have in our internal wave buffer and put into
            //byte array for googles
            byte[] buffer = new byte[waveBuffer.BufferLength];
            int    offset = 0;
            int    count  = waveBuffer.BufferLength;

            //Read the buffer
            waveBuffer.Read(buffer, offset, count);
            //Clear our internal wave buffer
            waveBuffer.ClearBuffer();

            try
            {
                //Sending to Google for STT
                await streamingCall.WriteAsync(new StreamingRecognizeRequest()
                {
                    AudioContent = Google.Protobuf.ByteString.CopyFrom(buffer, 0, count)
                });
            }
            catch (Exception wtf)
            {
                string wtfMessage = wtf.Message;
            }
            finally
            {
                //Tell Google we are done for now
                await streamingCall.WriteCompleteAsync();
            }

            //Again, this is googles code example below, I tried unrolling this stuff
            //and the google api stopped working, so stays like this for now

            //Print responses as they arrive. Need to move this into a method for cleanslyness
            Task printResponses = Task.Run(async() =>
            {
                string saidWhat     = "";
                string lastSaidWhat = "";
                while (await streamingCall.ResponseStream.MoveNext(default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream.Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            saidWhat = alternative.Transcript;
                            if (lastSaidWhat != saidWhat)
                            {
                                lastSaidWhat = saidWhat;
                                Console.WriteLine(saidWhat.ToLower().Trim() + " \r\n");

                                // TODO Trim the message text or not???????????

                                string myString = Encoding.UTF8.GetString(Encoding.UTF8.GetBytes(saidWhat));
                                // Send the recognized text to the MQTT topic
                                mqttClient.Publish("/sttbg_mqtt/stt_text", Encoding.UTF8.GetBytes("{\"data\": \"" + myString.ToLower() + "\"}"), MqttMsgBase.QOS_LEVEL_EXACTLY_ONCE, true);
                            }
                        } // end for
                    }     // end for
                }
            });

            try
            {
                await printResponses;
            }
            catch
            {
            }

            return(0);
        }
        private async Task <string> Recognition(string credentialsFilePath, string filePath)
        {
            GoogleCredential googleCredential;

            using (Stream m = new FileStream(credentialsFilePath, FileMode.Open))
            {
                googleCredential = GoogleCredential.FromStream(m);
            }
            var channel = new Grpc.Core.Channel(SpeechClient.DefaultEndpoint.Host, googleCredential.ToChannelCredentials());

            var speech        = SpeechClient.Create(channel);
            var streamingCall = speech.StreamingRecognize();
            await streamingCall.WriteAsync(new StreamingRecognizeRequest
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "en-US"
                    },
                    InterimResults = true
                }
            });

            using (FileStream fileStream = new FileStream(filePath, FileMode.Open))
            {
                var buffer = new byte[32 * 1024];
                int bytesRead;
                while ((bytesRead = await fileStream.ReadAsync(buffer, 0, buffer.Length)) > 0)
                {
                    await streamingCall.WriteAsync(
                        new StreamingRecognizeRequest
                    {
                        AudioContent = Google.Protobuf.ByteString.CopyFrom(buffer, 0, bytesRead)
                    });

                    //await Task.Delay(500);
                }
                ;
            }

            string finalText = string.Empty;

            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream.Current.Results)
                    {
                        if (!result.IsFinal)
                        {
                            foreach (var alternative in result.Alternatives)
                            {
                                Debug.WriteLine("alternative: " + alternative.Transcript);
                            }
                        }
                        else
                        {
                            foreach (var alternative in result.Alternatives)
                            {
                                Debug.WriteLine("final: " + alternative.Transcript);
                            }

                            var speechRecognitionAlternative = result.Alternatives.OrderByDescending(x => x.Confidence).FirstOrDefault();
                            if (speechRecognitionAlternative != null)
                            {
                                finalText = speechRecognitionAlternative.Transcript;
                            }
                        }
                    }
                }
            });

            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(finalText);
        }
Example #5
0
        async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                Console.WriteLine("No Mic");
                return(-1);
            }
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();

            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "en",
                    },
                    InterimResults = true,
                }
            });

            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream
                             .Current.Results)
                    {
                        var sentanceResult  = result.Alternatives.Last().Transcript;
                        var newWordList     = sentanceResult.Split(' ');
                        var currentWordList = currentSentance.Split(' ');

                        if (newWordList.Length > currentWordList.Length ||
                            newWordList.First() != currentWordList.First())
                        {
                            currentSentance = sentanceResult;
                            textBox1.Invoke((MethodInvoker)(() => textBox1.AppendText(newWordList.Last() + Environment.NewLine)));
                            if (newWordList.Last() != lastTrigger)
                            {
                                textBox1.Invoke((MethodInvoker)(() => textBox1.AppendText("TRIGGER: " + newWordList.Last() + Environment.NewLine)));
                                lastTrigger = newWordList.Last();
                            }
                            Console.WriteLine(newWordList.Last());
                        }
                    }
                }
            });

            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Console.WriteLine("Reconnecting stream");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
Example #6
0
        // [END speech_streaming_recognize]

        // [START speech_streaming_mic_recognize]
        static async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                Console.WriteLine("No microphone!");
                return(-1);
            }
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "en",
                    },
                    InterimResults = true,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream
                             .Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            Console.WriteLine(alternative.Transcript);
                        }
                    }
                }
            });
            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Console.WriteLine("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
Example #7
0
 public GoogleASR(Pipeline pipeline, string audioLanguage) : base(pipeline)
 {
     this.speech        = SpeechClient.Create();
     this.AudioLanguage = audioLanguage;
 }
        //Captures Speech and Converts to text; displays messages.
        private async void CaptureVoice(object obj)
        {
            NotRunning   = false;
            VoiceCapture = "Status: Loading";

            List <string> words = new List <string>();

            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "ja-JP",
                    },
                    InterimResults = true,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream
                             .Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            Console.WriteLine(alternative.Transcript);
                            words.Add(alternative.Transcript);
                        }
                    }
                }
            });
            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            VoiceCapture = "Status: Speak Now";
            Console.WriteLine("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(5));

            // Stop recording and shut down.
            waveIn.StopRecording();
            VoiceCapture = "Status: Processing";
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            if (words.Contains(BackSide) == true || words.Contains(FrontSide) == true)
            {
                VoiceCapture = "Status: Correct";
                Console.WriteLine("True");
            }
            else
            {
                VoiceCapture = "Status: Incorrect";
                Console.WriteLine("False");
            }

            NotRunning = true;
        }
Example #9
0
        private void backgroundWorker1_DoWork(object sender, DoWorkEventArgs e)
        {
            Control.CheckForIllegalCrossThreadCalls = false;

            waveIn.StopRecording();

            if (File.Exists("audio.raw"))
            {
                File.Delete("audio.raw");
            }


            writer = new WaveFileWriter(output, waveIn.WaveFormat);



            byte[] buffer = new byte[bwp.BufferLength];
            int    offset = 0;
            int    count  = bwp.BufferLength;

            var read = bwp.Read(buffer, offset, count);

            if (count > 0)
            {
                writer.Write(buffer, offset, read);
            }

            waveIn.Dispose();
            waveIn = null;
            writer.Close();
            writer = null;

            reader = new WaveFileReader("audio.raw"); // (new MemoryStream(bytes));
            waveOut.Init(reader);
            waveOut.PlaybackStopped += new EventHandler <StoppedEventArgs>(waveOut_PlaybackStopped);
            //    waveOut.Play();

            reader.Close();

            if (File.Exists("audio.raw"))
            {
                var speech = SpeechClient.Create();

                var response = speech.Recognize(new RecognitionConfig()
                {
                    Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                    SampleRateHertz = 16000,
                    LanguageCode    = "tr",
                }, RecognitionAudio.FromFile("audio.raw"));

                // AIzaSyCMVUZen9fupUmN2QOo1fIIvnCjoPPmEUY

                textBox1.Text = "";

                foreach (var result in response.Results)
                {
                    foreach (var alternative in result.Alternatives)
                    {
                        textBox1.Text = textBox1.Text + " " + alternative.Transcript;
                    }
                }
                MessageBox.Show("Tamamlandı. Algılanan: " + textBox1.Text);
                if (textBox1.Text.Length == 0)
                {
                    textBox1.Text = "Ses kaydı çok uzun ya da hiç ses algılanamadı.";
                }
            }
            else
            {
                textBox1.Text = "Ses Dosyası Bulunamadı";
            }
        }
Example #10
0
 /// <summary>
 /// Default constructor will set expected audio language to english.
 /// </summary>
 /// <param name="pipeline"></param>
 public GoogleASR(Pipeline pipeline) : base(pipeline)
 {
     speech             = SpeechClient.Create();
     this.AudioLanguage = "en";
 }
        private async Task <int> StreamingMicRecognizeAsync()
        {
            try
            {
                _writeMore = true;
                timer      = new Stopwatch();
                timer.Start();
                if (WaveIn.DeviceCount < 1)
                {
                    throw new ApplicationException("No microphone!");
                }

                _speechClient = SpeechClient.Create();
                var stream = _speechClient.StreamingRecognize();
                streams.Add(stream);
                var speechContext = new SpeechContext();
                speechContext.Phrases.AddRange(new[]
                                               { "int", "for", "true", "false", "public", "private", "bool", "static", "void", "переменная" }
                                               /*.Concat(_variableProvider.GetVariables().Select(v => v.Name))*/);
                // Write the initial request with the config.
                StreamingRecognizeRequest recognizeRequest = GetStreamingRecognizeRequest(speechContext);
                await stream.WriteAsync(recognizeRequest);

                // Print responses as they arrive.

                Task printResponses = Task.Run(async() =>
                {
                    while (await stream.ResponseStream.MoveNext(default(CancellationToken)))
                    {
                        foreach (StreamingRecognitionResult streamingRecognitionResult in stream
                                 .ResponseStream
                                 .Current.Results)
                        {
                            if (streamingRecognitionResult.IsFinal)
                            {
                                var transcript = streamingRecognitionResult.Alternatives[0].Transcript;
                                OnSpeechRecognized?.Invoke(this, new SpeechRecognizerEventArgs(transcript));
                                if (timer.Elapsed.TotalSeconds >= threshold)
                                {
                                    Restart();
                                }
                            }
                        }
                    }
                });
                // Read from the microphone and stream to API.
                ActivateMicrophone();
                Console.WriteLine("Speak now.");
                //await Task.Delay(TimeSpan.FromSeconds(seconds));
                // Stop recording and shut down.
                //StopRecognition();
                await printResponses;
                //await printResponses;
                return(0);
            }
            catch (Exception e)
            {
                Debug.WriteLine(e);
            }

            return(-1);
        }
Example #12
0
        public static async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            String R = "";

            try
            {
                if (NAudio.Wave.WaveIn.DeviceCount < 1)
                {
                    System.Windows.Forms.MessageBox.Show("No Mic Found");
                    return(-1);
                }
                else
                {
                }

                var speech        = SpeechClient.Create();
                var streamingCall = speech.StreamingRecognize();
                // Write the initial request with the config.
                await streamingCall.WriteAsync(
                    new StreamingRecognizeRequest()
                {
                    StreamingConfig = new StreamingRecognitionConfig()
                    {
                        Config = new RecognitionConfig()
                        {
                            Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                            SampleRateHertz = 16000,

                            MaxAlternatives = 1,
                            LanguageCode    = "vi",
                        },
                        InterimResults = true,
                    }
                });

                // Print responses as they arrive.
                Task printResponses = Task.Run(async() =>
                {
                    string a = "";
                    while (await streamingCall.ResponseStream.MoveNext(
                               default(CancellationToken)))
                    {
                        R = R + "@";

                        foreach (var result in streamingCall.ResponseStream.Current.Results)
                        {
                            //result.ToString();
                            R = R + result.Alternatives[0].Transcript.ToString(); // alternative.Transcript.ToString();
                                                                                  //    foreach (var alternative in result.Alternatives)
                                                                                  //    {
                                                                                  //        T.Text = T.Text + alternative.Transcript.ToString();
                                                                                  //        //Console.WriteLine(alternative.Transcript);
                                                                                  //    }
                        }

                        DBase.SR = R.Substring(R.LastIndexOf("@"));
                    }
                });
                // Read from the microphone and stream to API.

                object writeLock = new object();
                bool   writeMore = true;
                var    waveIn    = new NAudio.Wave.WaveInEvent();
                waveIn.DeviceNumber   = 0;
                waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
                waveIn.DataAvailable +=
                    (object sender, NAudio.Wave.WaveInEventArgs args) =>
                {
                    if (DBase.SPEECH_STATUS == "STOP")
                    {
                        DBase.SPEECH_STATUS = "";
                        waveIn.StopRecording();
                        DBase.SPEECH_STATUS = "PAUSE";
                        lock (writeLock) writeMore = false;
                        streamingCall.WriteCompleteAsync();

                        return;
                    }
                    lock (writeLock)
                    {
                        if (!writeMore)
                        {
                            return;
                        }
                        streamingCall.WriteAsync(
                            new StreamingRecognizeRequest()
                        {
                            AudioContent = Google.Protobuf.ByteString
                                           .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                        }).Wait();
                    }
                };

                waveIn.StartRecording();

                DBase.SR            = DBase.SR + DateTime.Now.ToString("hh:mm") + " - Speak Now : ";
                DBase.SPEECH_STATUS = "ON";
                await Task.Delay(TimeSpan.FromSeconds(seconds));

                // Stop recording and shut down].
Finish:
                waveIn.StopRecording();
                DBase.SPEECH_STATUS = "PAUSE";
                lock (writeLock) writeMore = false;
                await streamingCall.WriteCompleteAsync();

                await printResponses;
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.ToString());
            }
            return(0);
        }
Example #13
0
        private static async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            if (WaveIn.DeviceCount < 1)
            {
                File.WriteAllText("error.txt", "No microphone!");
                return((object)-1);
            }
            string lower = INISetting.GetValueWithAdd <string>("CredentialsFilePath", "credentials.json").ToLower();

            Console.WriteLine(lower);
            GoogleCredential googleCredential;

            using (Stream stream = (Stream) new FileStream(lower, FileMode.Open))
                googleCredential = GoogleCredential.FromStream(stream);
            SpeechClient.StreamingRecognizeStream streamingCall = SpeechClient.Create(new Channel(SpeechClient.DefaultEndpoint.Host, googleCredential.ToChannelCredentials())).StreamingRecognize();
            await streamingCall.WriteAsync(new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "ru"
                    },
                    InterimResults = true
                }
            });

            Task printResponses = Task.Run((Func <Task>)(async() =>
            {
                string s = "";
                while (true)
                {
                    if (await streamingCall.ResponseStream.MoveNext(new CancellationToken()))
                    {
                        using (IEnumerator <StreamingRecognitionResult> enumerator1 = streamingCall.ResponseStream.Current.Results.GetEnumerator())
                        {
                            if (enumerator1.MoveNext())
                            {
                                using (IEnumerator <SpeechRecognitionAlternative> enumerator2 = enumerator1.Current.Alternatives.GetEnumerator())
                                {
                                    if (enumerator2.MoveNext())
                                    {
                                        SpeechRecognitionAlternative current = enumerator2.Current;
                                        Console.WriteLine(current.Transcript);
                                        s += current.Transcript;
                                    }
                                }
                            }
                        }
                        File.WriteAllText(Path.GetTempPath() + "\\speechtext\\speechtext.txt", s);
                        s = "";
                    }
                    else
                    {
                        break;
                    }
                }
            }));
            object      writeLock = new object();
            bool        writeMore = true;
            WaveInEvent waveIn    = new WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new WaveFormat(16000, 1);
            waveIn.DataAvailable += (EventHandler <WaveInEventArgs>)((sender, args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(new StreamingRecognizeRequest()
                    {
                        AudioContent = ByteString.CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            });
            waveIn.StartRecording();
            Console.WriteLine("Speak now " + (object)seconds);
            await Task.Delay(TimeSpan.FromSeconds((double)seconds));

            waveIn.StopRecording();
            lock (writeLock)
                writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return((object)0);
        }
Example #14
0
        public static async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                Console.WriteLine("No microphone!");
                return(-1);
            }
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            // config로 초기 요청을 작성하십시오.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "ko-KR",
                        Model           = "command_and_search",
                        UseEnhanced     = true,
                        SpeechContexts  = { new SpeechContext()
                                            {
                                                Phrases = { "티미야", "인터넷", "켜", "꺼" }
                                            } }
                    },
                    InterimResults = true
                }
            });

            // 응답이 도착하면 인쇄하십시오.
            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream
                             .Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            resultText = alternative.Transcript;

                            //Console.WriteLine(resultText);
                        }
                    }
                }
            });
            // 마이크에서 읽고 API로 스트리밍합니다.
            object writeLock = new object();

            writeMore = true;
            var waveIn = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Beep(512, 50);
            Beep(640, 50);
            Console.WriteLine("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // 녹음을 중지하고 종료하십시오.
            waveIn.StopRecording();
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
 private InfiniteStreaming(CaptureMode captureMode, MyctBridge myct)
 {
     _myct        = myct;
     _client      = SpeechClient.Create();
     _captureMode = captureMode;
 }
Example #16
0
        public static async Task <List <string> > reconocerVoz(int tiempo)
        {
            List <string> listaSoluciones = new List <string>();

            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                Debug.Log("Sin microfono");
                return(listaSoluciones);
            }
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();


            //Configuración de petición inicial
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "es-ES",
                    },
                    InterimResults  = true,
                    SingleUtterance = true     //dejará de reconocer cuando se detecte que se ha dejado de hablar
                }
            }
                );

            //Muestra las respuestas cuando llegan
            Task pintaRespuestas = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream.Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            Debug.Log(alternative.Transcript);
                            listaSoluciones.Add(alternative.Transcript);
                        }
                    }
                }
            });


            //leer de microfono y enviar a la API
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable += (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString.CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }
                        ).Wait();
                }
            };
            waveIn.StartRecording();
            Debug.Log("Habla");
            grabando = true;
            await Task.Delay(TimeSpan.FromSeconds(tiempo));

            //deja de grabar y termina
            waveIn.StopRecording();
            grabando = false;
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await pintaRespuestas;
            await SpeechClient.ShutdownDefaultChannelsAsync();

            return(listaSoluciones);
        }
Example #17
0
        async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            object writeLock = new object();
            bool   writeMore = true;

            if (tamam)
            {
                return(0);
            }


            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                metin.Content = "Mikrofon Yok!";
                return(-1);
            }
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();

            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "tr",
                    },
                    InterimResults = true,
                }
            });

            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(
                           default(System.Threading.CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream
                             .Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            if (!tamam)
                            {
                                yazi = alternative.Transcript;
                                timer.Start();
                            }
                        }
                    }
                }
            });



            var waveIn = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };


            waveIn.StartRecording();
            metin.Content        = "Şimdi Konuşabilirsiniz";
            kulak.Visibility     = Visibility.Visible;
            acikAgiz.IsEnabled   = false;
            kapaliAgiz.IsEnabled = false;
            try
            {
                await Task.Delay(TimeSpan.FromSeconds(seconds), cancellationTokenSource.Token);
            }
            catch (TaskCanceledException ex)
            {
                Console.WriteLine(ex.Message);
            }
            finally
            {
                cancellationTokenSource.Dispose();
            }

            acikAgiz.IsEnabled   = true;
            kapaliAgiz.IsEnabled = true;
            kulak.Visibility     = Visibility.Hidden;
            waveIn.StopRecording();

            lock (writeLock) writeMore = false;


            if (genelMod.IsChecked == true)
            {
                cevapla(yazi);
            }
            if (ceviriMod.IsChecked == true)
            {
                cevir(yazi);
            }

            await streamingCall.WriteCompleteAsync();

            await printResponses;

            metin.Content = yazi;

            return(0);
        }
Example #18
0
        private void transcribeBtn_Click(object sender, EventArgs e)
        {
            processingLabel.Visible = true;
            //need to add some sort of loading screen cause this takes a bit of time

            if (filePath.Contains(".mp3"))
            {
                //convert mp3 to wav - not required currently
            }

            newFilePath = filePath + "m.wav"; //have to create new file, but it will be deleted after it's used

            if (URIString == "")
            {
                if (audioProcessor.getDuration(filePath) <= new TimeSpan(0, 1, 0))
                {
                    audioProcessor.ConvertStereoToMono(filePath, newFilePath); //usually files are recorded stereo, we need mono here

                    var speech   = SpeechClient.Create();
                    var response = speech.Recognize(new RecognitionConfig()
                    {
                        Encoding = RecognitionConfig.Types.AudioEncoding.Linear16,
                        Model    = voiceModelDropdown.SelectedItem.ToString(),
                        EnableAutomaticPunctuation = enableAutoPunctuationDropdown.SelectedItem.ToString() == "Yes" ? true : false, //true if yes selected
                                                                                                                                    // SampleRateHertz = processor.getSampleRate(filePath),      //another customisable option
                        LanguageCode = languageCodeBox.Text.ToString(),
                        //maxAlternatives, profanityFilter, speechContext
                    }, RecognitionAudio.FromFile(newFilePath));

                    foreach (var result in response.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            richTextBox1.Text = alternative.Transcript;
                            transcribedText  += alternative.Transcript;
                        }
                    }
                    saveTranscriptBtn.Enabled = true;
                    processingLabel.Visible   = false;
                }

                else //if file is longer than 1 minute
                {
                    MessageBox.Show("File is longer than 1 minute, please upload the file to Google Cloud and use an URI to access it");
                    transcribeBtn.Enabled   = false;
                    processingLabel.Visible = false;
                }
            }

            else //if URI string has been provided
            {
                try
                {
                    // asyncTranscribe(URIString);
                    AsyncRecognizeGcs(URIString);
                    //"gs://s2t-test-bucket1/speech.mp3m.wav"
                }
                catch (UriFormatException UriException)
                {
                    //write to an error log maybe
                    MessageBox.Show("Invalid URI string");
                    cloudErrors.report(UriException);
                }
            }

            File.Delete(newFilePath);
        }
        static async Task <object> StreamingMicrophoneRecognizeAsync(int seconds = 60, string languageCode = "en-US")
        {
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 44100,
                        LanguageCode    = languageCode
                    },
                    InterimResults = true,
                }
            });

            Task printResponses = Task.Run(async() =>
            {
                var responseStream = streamingCall.GetResponseStream();
                while (await responseStream.MoveNextAsync())
                {
                    StreamingRecognizeResponse response = responseStream.Current;
                    Console.WriteLine(response.Results[0].Alternatives[0].Transcript); // Print most probable result.
                }
            });

            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(44100, 1); // 44100Hz Mono.
            waveIn.DataAvailable += (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }

                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString.CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };

            waveIn.StartRecording();
            Console.WriteLine("Speek now.");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            waveIn.StopRecording();
            lock (writeLock)
            {
                writeMore = false;
            }

            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
Example #20
0
        // [END speech_async_recognize_gcs]

        /// <summary>
        /// Stream the content of the file to the API in 32kb chunks.
        /// </summary>
        // [START speech_streaming_recognize]
        static async Task <object> StreamingRecognizeAsync(string filePath)
        {
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "en",
                    },
                    InterimResults = true,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream
                             .Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            Console.WriteLine(alternative.Transcript);
                        }
                    }
                }
            });

            // Stream the file content to the API.  Write 2 32kb chunks per
            // second.
            using (FileStream fileStream = new FileStream(filePath, FileMode.Open))
            {
                var buffer = new byte[32 * 1024];
                int bytesRead;
                while ((bytesRead = await fileStream.ReadAsync(
                            buffer, 0, buffer.Length)) > 0)
                {
                    await streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(buffer, 0, bytesRead),
                    });

                    await Task.Delay(500);
                }
                ;
            }
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
Example #21
0
        private void backgroundWorker1_DoWork(object sender, DoWorkEventArgs e)
        {
            Control.CheckForIllegalCrossThreadCalls = false;

            //waveIn.StopRecording();

            if (File.Exists("audio.raw"))
            {
                File.Delete("audio.raw");
            }

            writer = new WaveFileWriter(output, waveIn.WaveFormat);


            byte[] buffer = new byte[bwp.BufferLength];
            int    offset = 0;
            int    count  = bwp.BufferLength;

            var read = bwp.Read(buffer, offset, count);

            if (count > 0)
            {
                writer.Write(buffer, offset, read);
            }

            waveIn.Dispose();
            waveIn = null;
            writer.Close();
            writer = null;

            reader = new WaveFileReader("audio.raw"); // (new MemoryStream(bytes));
            waveOut.Init(reader);
            waveOut.PlaybackStopped += new EventHandler <StoppedEventArgs>(waveOut_PlaybackStopped);
            waveOut.Play();

            reader.Close();

            if (File.Exists("audio.raw"))
            {
                var speech = SpeechClient.Create();

                var response = speech.Recognize(new RecognitionConfig()
                {
                    Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                    SampleRateHertz = 16000,
                    LanguageCode    = cmbLanguage.SelectedValue.ToString()
                }, RecognitionAudio.FromFile("audio.raw"));


                txtRecord.Text = "";

                foreach (var result in response.Results)
                {
                    foreach (var alternative in result.Alternatives)
                    {
                        txtRecord.Text = txtRecord.Text + " " + alternative.Transcript;
                    }
                }

                if (txtRecord.Text.Length == 0)
                {
                    txtRecord.Text = "The recording was too long or no sound was detected.";
                }
            }
            else
            {
                txtRecord.Text = "Microphone can't connected";
            }
        }
Example #22
0
        static async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            //await Connect();

            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                Console.WriteLine("No microphone!");
                return(-1);
            }
            var credential = GoogleCredential.FromFile(@"D:\EsferaColor\TranscribingAudio\SpeakToText-c65312fe0200.json").CreateScoped(SpeechClient.DefaultScopes);
            var channel    = new Grpc.Core.Channel(SpeechClient.DefaultEndpoint.ToString(), credential.ToChannelCredentials());

            var speech        = SpeechClient.Create(channel);
            var streamingCall = speech.StreamingRecognize();

            //var speech = SpeechClient.Create(); /*AuthExplicitComputeEngine("640f1acceb995a6bc4deb4e766e76dca6c5bb7d0");*/
            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "es-Es",
                    },
                    InterimResults = true,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream
                             .Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            Console.WriteLine(alternative.Transcript);
                        }
                    }
                }
            });
            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Console.WriteLine("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
Example #23
0
        // [START speech_transcribe_streaming_mic]
        public async Task <object> Listen(int seconds, object s, EventArgs e)
        {
            List <string>           textTranslation = new List <string>();
            CancellationTokenSource token           = new CancellationTokenSource();

            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz            = 16000,
                        LanguageCode               = "en",
                        Model                      = "command_and_search",
                        EnableAutomaticPunctuation = true,
                    },
                    InterimResults = true,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream
                             .Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            Console.WriteLine(alternative.Transcript);
                            if (alternative.Transcript.Contains("activate"))
                            {
                                token.Cancel();
                                return;
                            }
                        }
                    }
                }
            });


            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber = 0;
            waveIn.WaveFormat   = new NAudio.Wave.WaveFormat(16000, 1);
            try
            {
                waveIn.DataAvailable +=
                    (object sender, NAudio.Wave.WaveInEventArgs args) =>
                {
                    lock (writeLock)
                    {
                        if (!writeMore)
                        {
                            Console.WriteLine("no write more");
                            return;
                        }

                        streamingCall.WriteAsync(
                            new StreamingRecognizeRequest()
                        {
                            AudioContent = Google.Protobuf.ByteString
                                           .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                        }).Wait();
                        //Console.WriteLine("after writeAsync");
                    }
                    //Console.WriteLine("after writelock");
                };
            } catch (Exception ex)
            {
                Console.WriteLine(ex.ToString());
                Environment.Exit(0);
            }
            waveIn.StartRecording();
            Console.WriteLine("Listening..");

            await Task.Delay(TimeSpan.FromSeconds(seconds), token.Token);

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock)
            {
                writeMore = false;
            }

            await streamingCall.WriteCompleteAsync();

            await printResponses;


            Console.WriteLine("Restarting..");

            return(textTranslation);
        }