Exemplo n.º 1
0
        public static async System.Threading.Tasks.Task MicrophoneToFileAsync(string fileName, TimeSpan timeout)
        {
            Console.Write("Listening for " + timeout.TotalSeconds + " seconds ...");
            // create wave input using microphone
            using (NAudio.Wave.WaveInEvent waveIn = new NAudio.Wave.WaveInEvent())
            {
                waveIn.DeviceNumber       = Options.options.audio.NAudio.inputDeviceNumber;
                waveIn.BufferMilliseconds = Options.options.audio.NAudio.waveInBufferMilliseconds;
                waveIn.WaveFormat         = new NAudio.Wave.WaveFormat(Options.options.audio.samplingRate, (int)Options.options.audio.bitDepth, Options.options.audio.channels); // usually only mono (one channel) is supported
                waveIn.DataAvailable     += WaveIn_DataAvailable;                                                                                                                // use event to fill buffer
                using (waveInFile = new NAudio.Wave.WaveFileWriter(Options.options.tempFolderPath + fileName, waveIn.WaveFormat))
                {
                    waveIn.StartRecording();

                    //Console.WriteLine("Hit enter when finished recording.");
                    //Console.ReadKey();
                    System.Threading.Tasks.Task.Delay(timeout).Wait();

                    waveIn.StopRecording();

                    waveInFile.Close();
                }
                Console.WriteLine("");
            }
        }
Exemplo n.º 2
0
 private void BtnStop_Click(object sender, EventArgs e)
 {
     if (wvin != null)
     {
         wvin.StopRecording();
         wvin = null;
     }
 }
Exemplo n.º 3
0
 public ECG(int deviceNumber)
 {
     wvin = new NAudio.Wave.WaveInEvent();
     wvin.DeviceNumber       = deviceNumber;
     wvin.WaveFormat         = new NAudio.Wave.WaveFormat(SAMPLERATE, BITRATE, CHANNELS);
     wvin.BufferMilliseconds = BUFFERMILLISEC;
     wvin.DataAvailable     += OnDataAvailable;
     Start();
 }
Exemplo n.º 4
0
                    public void Acquire(int input_device, Signal_Type ST, int output_device)
                    {
                        Running         = true;
                        Channels_in     = NAudio.Wave.WaveIn.GetCapabilities(input_device).Channels;
                        Response        = new List <short> [Channels_in];
                        block           = 2 * Channels_in;
                        WI              = new NAudio.Wave.WaveInEvent();
                        WI.WaveFormat   = new NAudio.Wave.WaveFormat(SampleFreq, 16, Channels_in);
                        WI.DeviceNumber = input_device;

                        WI.BufferMilliseconds = 100;
                        WI.NumberOfBuffers    = 3;
                        WI.RecordingStopped  += WI_RecordingStopped;
                        WI.DataAvailable     += WI_DataAvailable;
                        WO.DeviceNumber       = output_device;
                        for (int c = 0; c < Channels_in; c++)
                        {
                            Response[c] = new List <short>();
                        }

                        SignalProvider Signal;

                        switch (ST)
                        {
                        case Signal_Type.Pink_Noise:
                            Signal = new Noise_Provider(1, (int)CT_Averages, SampleFreq);
                            break;

                        case Signal_Type.MLS:
                            throw new NotImplementedException();

                        case Signal_Type.Swept_Sine:
                            Signal = new Sweep_Provider((float)Signal_Length, CT_Averages, 63, 20000, SampleFreq);
                            break;

                        case Signal_Type.Logarithmic_Swept_Sine:
                            throw new NotImplementedException();

                        default:
                            System.Windows.Forms.MessageBox.Show("Select a Signal...");
                            return;
                        }

                        TD_Signal = Signal.Signal;

                        WO.NumberOfBuffers = 1;
                        WO.DesiredLatency  = 3000 * CT_Averages;
                        WO.Volume          = 1.0f;
                        WO.Init(Signal);
                        WI.StartRecording();
                        WO.Play();
                        System.Threading.Thread.Sleep((int)(Signal_Time_s * (3 + CT_Averages) * 1000));
                        WO.Stop();
                        WI.StopRecording();
                        System.Threading.Thread.Sleep(100);
                        WI_RecordingStopped(this, null);
                    }
Exemplo n.º 5
0
 public ECG(int deviceNumber)
 {
     Console.WriteLine($"Preparing audio device: {deviceNumber}");
     wvin = new NAudio.Wave.WaveInEvent();
     wvin.DeviceNumber       = deviceNumber;
     wvin.WaveFormat         = new NAudio.Wave.WaveFormat(SAMPLERATE, BITRATE, CHANNELS);
     wvin.BufferMilliseconds = BUFFERMILLISEC;
     wvin.DataAvailable     += OnDataAvailable;
     Start();
 }
Exemplo n.º 6
0
 private void cbDevices_SelectedIndexChanged(object sender, EventArgs e)
 {
     wvin?.Dispose();
     wvin = new NAudio.Wave.WaveInEvent();
     wvin.DeviceNumber       = cbDevices.SelectedIndex;
     wvin.WaveFormat         = new NAudio.Wave.WaveFormat(rate: SAMPLE_RATE, bits: 16, channels: 1);
     wvin.DataAvailable     += OnDataAvailable;
     wvin.BufferMilliseconds = 20;
     wvin.StartRecording();
 }
Exemplo n.º 7
0
 public Listener(int deviceIndex, int sampleRate)
 {
     SampleRate = sampleRate;
     wvin       = new NAudio.Wave.WaveInEvent
     {
         DeviceNumber       = deviceIndex,
         WaveFormat         = new NAudio.Wave.WaveFormat(sampleRate, bits: 16, channels: 1),
         BufferMilliseconds = 20
     };
     wvin.DataAvailable += OnNewAudioData;
     wvin.StartRecording();
 }
Exemplo n.º 8
0
        private void MainForm_Load(object sender, EventArgs args)
        {
            var waveIn = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object waveSender, NAudio.Wave.WaveInEventArgs e) =>
            {
                //Dynamic waveform buffer offset, i want to f*****g die btw
                short[] waveValues = new short[(int)(e.BytesRecorded * .55)];
                for (int i = 0; i < e.BytesRecorded; i += 2)
                {
                    waveValues[(i / 2) + ((waveValues.Length - e.BytesRecorded / 2) / 2)] = (short)(BitConverter.ToInt16(e.Buffer, i) / 50);
                }
                try
                {
                    waveform.Invoke((MethodInvoker)(() => waveform.Series[0].Points.DataBindY(waveValues)));
                    UpdateVolumeMeter(Math.Abs(waveValues[waveValues.Length / 2]));
                }
                catch (Exception ex)
                {
                    Console.WriteLine(ex.Message);
                }
            };
            waveIn.StartRecording();



            new Thread((ThreadStart) delegate
            {
                while (true)
                {
                    StreamingMicRecognizeAsync(10).Wait();
                }
            }).Start();

            //Volume meter de-incrimenter
            #region
            progressReset.Interval = 50;
            progressReset.Elapsed += (o, arg) =>
            {
                if (progressBar.Value > 0)
                {
                    progressBar.Invoke((MethodInvoker)(() => progressBar.Value--));
                }
            };
            progressReset.Start();
            #endregion

            wave.ChartType = SeriesChartType.Area;
            wave.Color     = Color.Blue;
        }
Exemplo n.º 9
0
 private void BtnSetMicrophone_Click(object sender, EventArgs e)
 {
     if (btnSetMicrophone.Text == "open")
     {
         AudioMonitorInitialize(DeviceIndex: cbMicrophones.SelectedIndex);
         btnSetMicrophone.Text = "close";
     }
     else
     {
         btnSetMicrophone.Text = "open";
         wvin.StopRecording();
         wvin = null;
     }
 }
Exemplo n.º 10
0
        static void Main(string[] args)
        {
            var waveIn = new NAudio.Wave.WaveInEvent
            {
                DeviceNumber       = 0, // customize this to select your microphone device
                WaveFormat         = new NAudio.Wave.WaveFormat(rate: 44100, bits: 16, channels: 2),
                BufferMilliseconds = 50
            };

            waveIn.DataAvailable += ShowPeakStereo;
            waveIn.StartRecording();
            while (true)
            {
            }
        }
Exemplo n.º 11
0
        public void StartListening(int deviceIndex = 0, int sampleRate = 8000, int fftSize = 1024)
        {
            spec = new Spectrogram.Spectrogram(sampleRate, fftSize, 250);

            int bitRate            = 16;
            int channels           = 1;
            int bufferMilliseconds = 10;

            wvin = new NAudio.Wave.WaveInEvent();
            wvin.DeviceNumber       = deviceIndex;
            wvin.WaveFormat         = new NAudio.Wave.WaveFormat(sampleRate, bitRate, channels);
            wvin.DataAvailable     += OnDataAvailable;
            wvin.BufferMilliseconds = bufferMilliseconds;
            wvin.StartRecording();
        }
Exemplo n.º 12
0
 private void AudioMonitorInitialize(int DeviceIndex, int sampleRate = 8000, int bitRate = 16,
                                     int channels = 1, int bufferMilliseconds            = 20, bool start = true)
 {
     if (wvin == null)
     {
         wvin = new NAudio.Wave.WaveInEvent();
         wvin.DeviceNumber       = DeviceIndex;
         wvin.WaveFormat         = new NAudio.Wave.WaveFormat(sampleRate, bitRate, channels);
         wvin.DataAvailable     += OnDataAvailable;
         wvin.BufferMilliseconds = bufferMilliseconds;
         if (start)
         {
             wvin.StartRecording();
         }
     }
 }
Exemplo n.º 13
0
        private NAudio.Wave.WaveInEvent FindWaveIn()
        {
            int waveInDevices = NAudio.Wave.WaveIn.DeviceCount;

            for (int waveInDevice = 0; waveInDevice < waveInDevices; waveInDevice++)
            {
                NAudio.Wave.WaveInCapabilities devCaps = NAudio.Wave.WaveIn.GetCapabilities(waveInDevice);
                if (DevicesMatch(devCaps.ProductName, dev.FriendlyName))
                {
                    NAudio.Wave.WaveInEvent wi = new NAudio.Wave.WaveInEvent();
                    wi.DeviceNumber = waveInDevice;
                    wi.WaveFormat   = new NAudio.Wave.WaveFormat(44100, devCaps.Channels);
                    return(wi);
                }
            }
            return(null);
        }
Exemplo n.º 14
0
        private void AudioMonitorInitialize(
            int DeviceIndex        = 0,
            int sampleRate         = 8000,
            int bitRate            = 16,
            int channels           = 1,
            int bufferMilliseconds = 10,
            int fftSize            = 1024,
            int step = 250
            )
        {
            spec = new Spectrogram.Spectrogram(sampleRate, fftSize, step);

            wvin = new NAudio.Wave.WaveInEvent();
            wvin.DeviceNumber       = DeviceIndex;
            wvin.WaveFormat         = new NAudio.Wave.WaveFormat(sampleRate, bitRate, channels);
            wvin.DataAvailable     += OnDataAvailable;
            wvin.BufferMilliseconds = bufferMilliseconds;
            wvin.StartRecording();
        }
Exemplo n.º 15
0
        private BooleanMixerControl GetMuteControl()
        {
            var waveInEvent = new NAudio.Wave.WaveInEvent();

            try
            {
                //get mixer of default audio device
                var mixer = waveInEvent.GetMixerLine();
                var muter = mixer.Controls.FirstOrDefault(x => x.ControlType == NAudio.Mixer.MixerControlType.Mute) as BooleanMixerControl;
                if (muter == null)
                {
                    throw new Exception(BadMicrophoneMessage);
                }

                return(muter);
            }
            finally
            {
                waveInEvent.Dispose();
            }
        }
Exemplo n.º 16
0
        public void Start(int deviceIndex = 0, int fftPower = 14, string preLoadWavFile = null)
        {
            int sampleRate = 8000;
            int fftSize    = (int)(Math.Pow(2, fftPower));

            int tenMinutePixelWidth = 1000;
            int samplesInTenMinutes = sampleRate * 10 * 60;
            int segmentSize         = samplesInTenMinutes / tenMinutePixelWidth;

            spec = new Spectrogram.Spectrogram(sampleRate, fftSize, segmentSize);
            spec.displaySettings.brightness     = 5;
            spec.displaySettings.freqLow        = 1000;
            spec.displaySettings.freqHigh       = 1500;
            spec.displaySettings.tickSpacingHz  = 50;
            spec.displaySettings.tickSpacingSec = 30;

            formBrightness = new FormBrightness(spec);
            formFreqRange  = new FormFreqRange(spec);
            formFFT        = new FormFFT(spec);

            pbSpec.Width  = tenMinutePixelWidth;
            pbSpec.Height = spec.displaySettings.height;

            if (preLoadWavFile != null)
            {
                spec.AddExtend(Tools.ReadWav(preLoadWavFile));
            }
            else
            {
                int bitRate            = 16;
                int channels           = 1;
                int bufferMilliseconds = 20;
                wvin = new NAudio.Wave.WaveInEvent();
                wvin.DeviceNumber       = deviceIndex;
                wvin.WaveFormat         = new NAudio.Wave.WaveFormat(sampleRate, bitRate, channels);
                wvin.DataAvailable     += OnDataAvailable;
                wvin.BufferMilliseconds = bufferMilliseconds;
                wvin.StartRecording();
            }
        }
        private void BtnStart_Click(object sender, EventArgs e)
        {
            if (cmbBox_source.SelectedIndex != -1 && btnStart.Text == "Start")
            {
                int deviceNum = cmbBox_source.SelectedIndex;

                sourceEvent = new NAudio.Wave.WaveInEvent();
                sourceEvent.DeviceNumber   = deviceNum;
                sourceEvent.DataAvailable += OnDataAvailable;
                sourceEvent.StartRecording();

                btnStart.Text         = "Stop";
                cmbBox_source.Enabled = false;
                btnRefresh.Enabled    = false;
            }
            else if (btnStart.Text == "Stop")
            {
                sourceEvent.StopRecording();
                btnStart.Text         = "Start";
                cmbBox_source.Enabled = true;
                btnRefresh.Enabled    = true;
            }
        }
Exemplo n.º 18
0
        public WaveInAudioStream()
        {
            this.audioIn = new NAudio.Wave.WaveInEvent();
            this.audioIn.BufferMilliseconds = 64; // 1024 samples at 16kHz
            this.audioIn.NumberOfBuffers = 4;
            this.audioIn.WaveFormat = new NAudio.Wave.WaveFormat(rate: 16000, bits: 16, channels: 1);

            this.sampleBuffer = new NAudio.Wave.BufferedWaveProvider(this.audioIn.WaveFormat);
            this.sampleBuffer.DiscardOnBufferOverflow = true;

            this.audioIn.DataAvailable += delegate(object sender, NAudio.Wave.WaveInEventArgs args)
            {
                bool shouldSignalReader = false;
                byte[] b = null;
                TaskCompletionSource<byte[]> currentReadSource = null;
                lock (this)
                {
                    this.sampleBuffer.AddSamples(args.Buffer, 0, args.BytesRecorded);
                    if (this.readPending && this.sampleBuffer.BufferedBytes >= this.readSize)
                    {
                        this.readPending = false;
                        shouldSignalReader = true;
                        currentReadSource = this.readSource;
                        this.readSource = null;
                        b = new byte[this.readSize];
                        this.sampleBuffer.Read(b, 0, this.readSize);
                        // At this point (after the lock is released), we're ready for another ReadSamplesAsync.
                    }
                }
                if (shouldSignalReader)
                {
                    // We don't access this.readSource directly since another ReadSamplesAsync may have started after
                    // the lock was released.
                    currentReadSource.SetResult(b);
                }
            };
        }
        public async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                log.Debug("No microphone!");
                return(-1);
            }

            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "pl",
                    },
                    InterimResults = true,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                int mode = -1;
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream
                             .Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            if (result.IsFinal)
                            {
                                if (mode == -1)
                                {
                                    var firstWord = alternative.Transcript.IndexOf(" ") > -1
                                          ? alternative.Transcript.Substring(0, alternative.Transcript.IndexOf(" "))
                                          : alternative.Transcript;
                                    if (firstWord.ToLower() == resourceManager.GetString("text"))
                                    {
                                        mode = 1;
                                    }
                                    else if (firstWord.ToLower() == resourceManager.GetString("sign"))
                                    {
                                        mode = 2;
                                    }
                                    else if (firstWord.ToLower() == resourceManager.GetString("command"))
                                    {
                                        mode = 3;
                                    }
                                }
                                else
                                {
                                    log.Debug("Working in mode " + mode);
                                    switch (mode)
                                    {
                                    case 1:
                                        handler.InsertText(alternative.Transcript);
                                        break;

                                    case 2:
                                        handler.InsertSign(alternative.Transcript);
                                        break;

                                    case 3:
                                        handler.IssueCommand(alternative.Transcript);
                                        break;
                                    }
                                }
                            }
                        }
                    }
                }
            });
            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            log.Debug("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
Exemplo n.º 20
0
        public static async Task <List <string> > reconocerVoz(int tiempo)
        {
            List <string> listaSoluciones = new List <string>();

            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                Debug.Log("Sin microfono");
                return(listaSoluciones);
            }
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();


            //Configuración de petición inicial
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "es-ES",
                    },
                    InterimResults  = true,
                    SingleUtterance = true     //dejará de reconocer cuando se detecte que se ha dejado de hablar
                }
            }
                );

            //Muestra las respuestas cuando llegan
            Task pintaRespuestas = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream.Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            Debug.Log(alternative.Transcript);
                            listaSoluciones.Add(alternative.Transcript);
                        }
                    }
                }
            });


            //leer de microfono y enviar a la API
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable += (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString.CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }
                        ).Wait();
                }
            };
            waveIn.StartRecording();
            Debug.Log("Habla");
            grabando = true;
            await Task.Delay(TimeSpan.FromSeconds(tiempo));

            //deja de grabar y termina
            waveIn.StopRecording();
            grabando = false;
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await pintaRespuestas;
            await SpeechClient.ShutdownDefaultChannelsAsync();

            return(listaSoluciones);
        }
Exemplo n.º 21
0
        static async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
/*            SpeechClientBuilder builder = new SpeechClientBuilder
 *          {
 *              CredentialsPath = credentialsFilePath
 *          };
 *          SpeechClient speech = builder.Build();*/
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "en-GB",
                        // The `model` value must be one of the following:
                        // "video", "phone_call", "command_and_search", "default"
                        Model = "phone_call",
//                            EnableWordTimeOffsets = true,

/*                            DiarizationConfig = new SpeakerDiarizationConfig()
 *                          {
 *                              EnableSpeakerDiarization = true,
 *                              MinSpeakerCount = 2
 *                          }*/
                        UseEnhanced = true,
                    },
                    InterimResults = false,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                var responseStream = streamingCall.GetResponseStream();
                while (await responseStream.MoveNextAsync())
                {
                    StreamingRecognizeResponse response = responseStream.Current;
                    foreach (StreamingRecognitionResult result in response.Results)
                    {
                        foreach (SpeechRecognitionAlternative alternative in result.Alternatives)
                        {
                            Console.WriteLine(alternative.Transcript);
                        }
                    }
                }
            });
            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }

                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Console.WriteLine("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock)
            {
                writeMore = false;
            }

            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
        public async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                Console.WriteLine("No microphone!");
                return(-1);
            }
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "en",
                    },
                    InterimResults = true,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    var result     = streamingCall.ResponseStream.Current.Results.FirstOrDefault();
                    var transcript = result?.Alternatives.OrderBy(t => t.Confidence).Select(x => x.Transcript).ToList();
                    await Task.Run(() => eventHandler.eventSpeechRecognized(new VoiceRecognizedEvent
                    {
                        Transcripts = transcript
                    }));
                }
            });
            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (sender, args) =>
            {
                double decibel = -95;
                if (args.Buffer != null)
                {
                    decibel = CalculateDecibels(args.Buffer);
                    Console.WriteLine($"Decibel level: {decibel}");
                }
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Console.WriteLine("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
Exemplo n.º 23
0
 private static void StopAudioDevice()
 {
     if (sourceStream != null)
     {
         sourceStream.StopRecording();
         sourceStream.Dispose();
     }
     sourceStream = null;
 }
Exemplo n.º 24
0
        private static void StartAudioDevice()
        {
            List<NAudio.Wave.WaveInCapabilities> sources = new List<NAudio.Wave.WaveInCapabilities>();

            for (int i = 0; i < NAudio.Wave.WaveIn.DeviceCount; i++)
            {
                sources.Add(NAudio.Wave.WaveIn.GetCapabilities(i));
            }

            sourceStream = new NAudio.Wave.WaveInEvent();
            sourceStream.DeviceNumber = 0;
            sourceStream.WaveFormat = new NAudio.Wave.WaveFormat(44100, NAudio.Wave.WaveIn.GetCapabilities(0).Channels);
            sourceStream.StartRecording();
            sourceStream.DataAvailable += sourceStream_DataAvailable;
        }
Exemplo n.º 25
0
        static async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            //await Connect();

            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                Console.WriteLine("No microphone!");
                return(-1);
            }
            var credential = GoogleCredential.FromFile(@"D:\EsferaColor\TranscribingAudio\SpeakToText-c65312fe0200.json").CreateScoped(SpeechClient.DefaultScopes);
            var channel    = new Grpc.Core.Channel(SpeechClient.DefaultEndpoint.ToString(), credential.ToChannelCredentials());

            var speech        = SpeechClient.Create(channel);
            var streamingCall = speech.StreamingRecognize();

            //var speech = SpeechClient.Create(); /*AuthExplicitComputeEngine("640f1acceb995a6bc4deb4e766e76dca6c5bb7d0");*/
            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "es-Es",
                    },
                    InterimResults = true,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream
                             .Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            Console.WriteLine(alternative.Transcript);
                        }
                    }
                }
            });
            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Console.WriteLine("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
Exemplo n.º 26
0
        async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                Console.WriteLine("No Mic");
                return(-1);
            }
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();

            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "en",
                    },
                    InterimResults = true,
                }
            });

            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream
                             .Current.Results)
                    {
                        var sentanceResult  = result.Alternatives.Last().Transcript;
                        var newWordList     = sentanceResult.Split(' ');
                        var currentWordList = currentSentance.Split(' ');

                        if (newWordList.Length > currentWordList.Length ||
                            newWordList.First() != currentWordList.First())
                        {
                            currentSentance = sentanceResult;
                            textBox1.Invoke((MethodInvoker)(() => textBox1.AppendText(newWordList.Last() + Environment.NewLine)));
                            if (newWordList.Last() != lastTrigger)
                            {
                                textBox1.Invoke((MethodInvoker)(() => textBox1.AppendText("TRIGGER: " + newWordList.Last() + Environment.NewLine)));
                                lastTrigger = newWordList.Last();
                            }
                            Console.WriteLine(newWordList.Last());
                        }
                    }
                }
            });

            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Console.WriteLine("Reconnecting stream");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
Exemplo n.º 27
0
        private async Task <string> StreamingMicRecognizeAsync(int seconds)
        {
            string responses = string.Empty;

            try
            {
                if (NAudio.Wave.WaveIn.DeviceCount < 1)
                {
                    MessageBox.Show("No microphone!");
                    return("No micrphone found.");
                }
                var speech        = SpeechClient.Create();
                var streamingCall = speech.StreamingRecognize();
                // Write the initial request with the config.
                await streamingCall.WriteAsync(
                    new StreamingRecognizeRequest()
                {
                    StreamingConfig = new StreamingRecognitionConfig()
                    {
                        Config = new RecognitionConfig()
                        {
                            Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                            SampleRateHertz = 16000,
                            LanguageCode    = "en",
                        },
                        InterimResults = false,
                    }
                });

                // Print responses as they arrive.
                Task printResponses = Task.Run(async() =>
                {
                    StringBuilder builder = new StringBuilder();
                    while (await streamingCall.ResponseStream.MoveNext(default(CancellationToken)))
                    {
                        foreach (var result in streamingCall.ResponseStream
                                 .Current.Results)
                        {
                            foreach (var alternative in result.Alternatives)
                            {
                                builder.Append(alternative.Transcript);
                            }
                        }
                    }

                    txtSpeech.Dispatcher.Invoke(() =>
                    {
                        txtSpeech.Text = builder.ToString();
                    }

                                                );
                });

                // Read from the microphone and stream to API.
                object writeLock = new object();
                bool   writeMore = true;
                var    waveIn    = new NAudio.Wave.WaveInEvent();
                waveIn.DeviceNumber   = 0;
                waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
                waveIn.DataAvailable +=
                    (object sender, NAudio.Wave.WaveInEventArgs args) =>
                {
                    lock (writeLock)
                    {
                        if (!writeMore)
                        {
                            return;
                        }
                        streamingCall.WriteAsync(
                            new StreamingRecognizeRequest()
                        {
                            AudioContent = Google.Protobuf.ByteString.CopyFrom(args.Buffer, 0, args.BytesRecorded)
                        }).Wait();
                    }
                };

                try
                {
                    waveIn.StartRecording();
                    await Task.Delay(TimeSpan.FromSeconds(seconds), cancelRecordingTokenSource.Token);
                }
                catch (TaskCanceledException)
                {
                    waveIn.StopRecording();
                }

                lock (writeLock) writeMore = false;
                await streamingCall.WriteCompleteAsync();

                await printResponses;
            }
            catch (Exception ex)
            {
                Trace.WriteLine(ex);
            }
            return(responses);
        }
        public static async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            await streamingCall.WriteAsync(new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "en"
                    },
                    InterimResults = true
                }
            });

            //Stama risposte in tempo reale
            Task printResponses = Task.Run(async() =>
            {
                var responseStream = streamingCall.GetResponseStream();
                while (await responseStream.MoveNextAsync())
                {
                    StreamingRecognizeResponse response = responseStream.Current;
                    foreach (StreamingRecognitionResult result in response.Results)
                    {
                        foreach (SpeechRecognitionAlternative alternative in result.Alternatives)
                        {
                            Debug.WriteLine(alternative.Transcript);
                        }
                    }
                }
            });
            //Lettura dal microfono e stream alle API di Google
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable += (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString.CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Debug.WriteLine("Parla ora!");
            await Task.Delay(TimeSpan.FromSeconds(seconds)); //il task è programmato per finire dopo seconds secondi

            //stop recording and shut down
            waveIn.StopRecording();
            lock (writeLock)
            {
                writeMore = false;
            }
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
Exemplo n.º 29
0
        public static async Task <string> StreamingMicRecognizeAsync(string languageCode)
        {
            string result = string.Empty;

            var manualResetEvent = new ManualResetEvent(false);

            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                throw new Exception("No microphone!");
            }
            var speech        = SpeechClient.Create();
            var streamingCall = speech.GrpcClient.StreamingRecognize();
            // Write the initial request with the config.
            await streamingCall.RequestStream.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRate   = 16000,
                        LanguageCode = languageCode
                    },
                    InterimResults = false,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(default(CancellationToken)))
                {
                    if (streamingCall.ResponseStream.Current.Results.Count == 1)
                    {
                        result = streamingCall.ResponseStream.Current.Results.Single().Alternatives.Single().Transcript;
                        manualResetEvent.Set();
                    }
                }
            });
            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.RequestStream.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Console.WriteLine("Speak now.");
            await Task.Run(() => { manualResetEvent.WaitOne(100000); });

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock) writeMore = false;
            await streamingCall.RequestStream.CompleteAsync();

            await printResponses;

            return(result);
        }
Exemplo n.º 30
0
        // [END speech_streaming_recognize]

        // [START speech_streaming_mic_recognize]
        static async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                Console.WriteLine("No microphone!");
                return(-1);
            }
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "en",
                    },
                    InterimResults = true,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream
                             .Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            Console.WriteLine(alternative.Transcript);
                        }
                    }
                }
            });
            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Console.WriteLine("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
        static async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "tr",
                    },
                    InterimResults = true,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                //AutoItX.Run("notepad.exe", null);
                int test = AutoItX.WinWaitActive("Untitled - Notepad");
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    double dogruluk_orani = streamingCall.ResponseStream.Current.Results[0].Alternatives[0].Confidence;

                    if (dogruluk_orani > 0.60)
                    {
                        Console.WriteLine("D/O: " + dogruluk_orani + " | " + streamingCall.ResponseStream.Current.Results[0].Alternatives[0].Transcript);
                        AutoItX.Send(streamingCall.ResponseStream.Current.Results[0].Alternatives[0].Transcript + "\n");
                    }
                    else
                    {
                        Console.WriteLine("Anlaşılamadı...");
                    }

                    foreach (StreamingRecognitionResult result in streamingCall.ResponseStream.Current.Results)
                    {
                        foreach (SpeechRecognitionAlternative alternative in result.Alternatives)
                        {
                            Console.WriteLine(alternative.Transcript);
                        }
                    }
                }
            });
            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Console.WriteLine("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
        // [START dialogflow_detect_intent_streaming]
        public static async Task <object> DetectIntentFromStreamAsync(
            string projectId,
            string sessionId,
            string filePath)
        {
            var sessionsClient = SessionsClient.Create();
            var sessionName    = SessionName.FromProjectSession(projectId, sessionId).ToString();

            // Initialize streaming call, retrieving the stream object
            var streamingDetectIntent = sessionsClient.StreamingDetectIntent();

            // Define a task to process results from the API
            var responseHandlerTask = Task.Run(async() =>
            {
                var responseStream = streamingDetectIntent.GetResponseStream();
                while (await responseStream.MoveNextAsync())
                {
                    var response    = responseStream.Current;
                    var queryResult = response.QueryResult;

                    if (queryResult != null)
                    {
                        Console.WriteLine($"Query text: {queryResult.QueryText}");
                        if (queryResult.Intent != null)
                        {
                            Console.Write("Intent detected:");
                            Console.WriteLine(queryResult.Intent.DisplayName);
                        }
                    }
                }
            });

            // Instructs the speech recognizer how to process the audio content.
            // Note: hard coding audioEncoding, sampleRateHertz for simplicity.
            var queryInput = new QueryInput
            {
                AudioConfig = new InputAudioConfig
                {
                    AudioEncoding   = AudioEncoding.Linear16,
                    LanguageCode    = "en-US",
                    SampleRateHertz = 16000
                }
            };

            // The first request must **only** contain the audio configuration:
            await streamingDetectIntent.WriteAsync(new StreamingDetectIntentRequest
            {
                QueryInput = queryInput,
                Session    = sessionName
            });

            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }

                    streamingDetectIntent.WriteAsync(
                        new StreamingDetectIntentRequest()
                    {
                        InputAudio = Google.Protobuf.ByteString
                                     .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Console.WriteLine("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock)
            {
                writeMore = false;
            }

            // Tell the service you are done sending data
            await streamingDetectIntent.WriteCompleteAsync();

            // This will complete once all server responses have been processed.
            await responseHandlerTask;

            return(0);
        }
Exemplo n.º 33
0
        //Captures Speech and Converts to text; displays messages.
        private async void CaptureVoice(object obj)
        {
            NotRunning   = false;
            VoiceCapture = "Status: Loading";

            List <string> words = new List <string>();

            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "ja-JP",
                    },
                    InterimResults = true,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream
                             .Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            Console.WriteLine(alternative.Transcript);
                            words.Add(alternative.Transcript);
                        }
                    }
                }
            });
            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            VoiceCapture = "Status: Speak Now";
            Console.WriteLine("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(5));

            // Stop recording and shut down.
            waveIn.StopRecording();
            VoiceCapture = "Status: Processing";
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            if (words.Contains(BackSide) == true || words.Contains(FrontSide) == true)
            {
                VoiceCapture = "Status: Correct";
                Console.WriteLine("True");
            }
            else
            {
                VoiceCapture = "Status: Incorrect";
                Console.WriteLine("False");
            }

            NotRunning = true;
        }