예제 #1
0
        static LocalPCInformation()
        {
            void StartStopRecording()
            {
                // We must start recording to be able to capture audio in, but only do this if the user has the option set. Allowing them
                // to turn it off will give them piece of mind we're not spying on them and will stop the Windows 10 mic icon appearing.
                try {
                    if (Global.Configuration.EnableAudioCapture)
                    {
                        waveInEvent.StartRecording();
                    }
                    else
                    {
                        waveInEvent.StopRecording();
                    }
                } catch { }
            }

            StartStopRecording();
            Global.Configuration.PropertyChanged += (sender, e) => {
                if (e.PropertyName == "EnableAudioCapture")
                {
                    StartStopRecording();
                }
            };
        }
예제 #2
0
파일: Audio.cs 프로젝트: BSalita/Woundify
        public static async System.Threading.Tasks.Task MicrophoneToFileAsync(string fileName, TimeSpan timeout)
        {
            Console.Write("Listening for " + timeout.TotalSeconds + " seconds ...");
            // create wave input using microphone
            using (NAudio.Wave.WaveInEvent waveIn = new NAudio.Wave.WaveInEvent())
            {
                waveIn.DeviceNumber       = Options.options.audio.NAudio.inputDeviceNumber;
                waveIn.BufferMilliseconds = Options.options.audio.NAudio.waveInBufferMilliseconds;
                waveIn.WaveFormat         = new NAudio.Wave.WaveFormat(Options.options.audio.samplingRate, (int)Options.options.audio.bitDepth, Options.options.audio.channels); // usually only mono (one channel) is supported
                waveIn.DataAvailable     += WaveIn_DataAvailable;                                                                                                                // use event to fill buffer
                using (waveInFile = new NAudio.Wave.WaveFileWriter(Options.options.tempFolderPath + fileName, waveIn.WaveFormat))
                {
                    waveIn.StartRecording();

                    //Console.WriteLine("Hit enter when finished recording.");
                    //Console.ReadKey();
                    System.Threading.Tasks.Task.Delay(timeout).Wait();

                    waveIn.StopRecording();

                    waveInFile.Close();
                }
                Console.WriteLine("");
            }
        }
예제 #3
0
        static LocalPCInformation()
        {
            try
            {
                _CPUCounter = new PerformanceCounter("Processor", "% Processor Time", "_Total");
            }
            catch (Exception exc)
            {
                Global.logger.LogLine("Failed to create PerformanceCounter. Try: https://stackoverflow.com/a/34615451 Exception: " + exc);
            }

            void StartStopRecording()
            {
                // We must start recording to be able to capture audio in, but only do this if the user has the option set. Allowing them
                // to turn it off will give them piece of mind we're not spying on them and will stop the Windows 10 mic icon appearing.
                try {
                    if (Global.Configuration.EnableAudioCapture)
                    {
                        waveInEvent.StartRecording();
                    }
                    else
                    {
                        waveInEvent.StopRecording();
                    }
                } catch { }
            }

            StartStopRecording();
            Global.Configuration.PropertyChanged += (sender, e) => {
                if (e.PropertyName == "EnableAudioCapture")
                {
                    StartStopRecording();
                }
            };
        }
예제 #4
0
                    public void Acquire(int input_device, Signal_Type ST, int output_device)
                    {
                        Running         = true;
                        Channels_in     = NAudio.Wave.WaveIn.GetCapabilities(input_device).Channels;
                        Response        = new List <short> [Channels_in];
                        block           = 2 * Channels_in;
                        WI              = new NAudio.Wave.WaveInEvent();
                        WI.WaveFormat   = new NAudio.Wave.WaveFormat(SampleFreq, 16, Channels_in);
                        WI.DeviceNumber = input_device;

                        WI.BufferMilliseconds = 100;
                        WI.NumberOfBuffers    = 3;
                        WI.RecordingStopped  += WI_RecordingStopped;
                        WI.DataAvailable     += WI_DataAvailable;
                        WO.DeviceNumber       = output_device;
                        for (int c = 0; c < Channels_in; c++)
                        {
                            Response[c] = new List <short>();
                        }

                        SignalProvider Signal;

                        switch (ST)
                        {
                        case Signal_Type.Pink_Noise:
                            Signal = new Noise_Provider(1, (int)CT_Averages, SampleFreq);
                            break;

                        case Signal_Type.MLS:
                            throw new NotImplementedException();

                        case Signal_Type.Swept_Sine:
                            Signal = new Sweep_Provider((float)Signal_Length, CT_Averages, 63, 20000, SampleFreq);
                            break;

                        case Signal_Type.Logarithmic_Swept_Sine:
                            throw new NotImplementedException();

                        default:
                            System.Windows.Forms.MessageBox.Show("Select a Signal...");
                            return;
                        }

                        TD_Signal = Signal.Signal;

                        WO.NumberOfBuffers = 1;
                        WO.DesiredLatency  = 3000 * CT_Averages;
                        WO.Volume          = 1.0f;
                        WO.Init(Signal);
                        WI.StartRecording();
                        WO.Play();
                        System.Threading.Thread.Sleep((int)(Signal_Time_s * (3 + CT_Averages) * 1000));
                        WO.Stop();
                        WI.StopRecording();
                        System.Threading.Thread.Sleep(100);
                        WI_RecordingStopped(this, null);
                    }
예제 #5
0
 private void cbDevices_SelectedIndexChanged(object sender, EventArgs e)
 {
     wvin?.Dispose();
     wvin = new NAudio.Wave.WaveInEvent();
     wvin.DeviceNumber       = cbDevices.SelectedIndex;
     wvin.WaveFormat         = new NAudio.Wave.WaveFormat(rate: SAMPLE_RATE, bits: 16, channels: 1);
     wvin.DataAvailable     += OnDataAvailable;
     wvin.BufferMilliseconds = 20;
     wvin.StartRecording();
 }
예제 #6
0
 public Listener(int deviceIndex, int sampleRate)
 {
     SampleRate = sampleRate;
     wvin       = new NAudio.Wave.WaveInEvent
     {
         DeviceNumber       = deviceIndex,
         WaveFormat         = new NAudio.Wave.WaveFormat(sampleRate, bits: 16, channels: 1),
         BufferMilliseconds = 20
     };
     wvin.DataAvailable += OnNewAudioData;
     wvin.StartRecording();
 }
예제 #7
0
        private void MainForm_Load(object sender, EventArgs args)
        {
            var waveIn = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object waveSender, NAudio.Wave.WaveInEventArgs e) =>
            {
                //Dynamic waveform buffer offset, i want to f*****g die btw
                short[] waveValues = new short[(int)(e.BytesRecorded * .55)];
                for (int i = 0; i < e.BytesRecorded; i += 2)
                {
                    waveValues[(i / 2) + ((waveValues.Length - e.BytesRecorded / 2) / 2)] = (short)(BitConverter.ToInt16(e.Buffer, i) / 50);
                }
                try
                {
                    waveform.Invoke((MethodInvoker)(() => waveform.Series[0].Points.DataBindY(waveValues)));
                    UpdateVolumeMeter(Math.Abs(waveValues[waveValues.Length / 2]));
                }
                catch (Exception ex)
                {
                    Console.WriteLine(ex.Message);
                }
            };
            waveIn.StartRecording();



            new Thread((ThreadStart) delegate
            {
                while (true)
                {
                    StreamingMicRecognizeAsync(10).Wait();
                }
            }).Start();

            //Volume meter de-incrimenter
            #region
            progressReset.Interval = 50;
            progressReset.Elapsed += (o, arg) =>
            {
                if (progressBar.Value > 0)
                {
                    progressBar.Invoke((MethodInvoker)(() => progressBar.Value--));
                }
            };
            progressReset.Start();
            #endregion

            wave.ChartType = SeriesChartType.Area;
            wave.Color     = Color.Blue;
        }
예제 #8
0
        static void Main(string[] args)
        {
            var waveIn = new NAudio.Wave.WaveInEvent
            {
                DeviceNumber       = 0, // customize this to select your microphone device
                WaveFormat         = new NAudio.Wave.WaveFormat(rate: 44100, bits: 16, channels: 2),
                BufferMilliseconds = 50
            };

            waveIn.DataAvailable += ShowPeakStereo;
            waveIn.StartRecording();
            while (true)
            {
            }
        }
예제 #9
0
        public void StartListening(int deviceIndex = 0, int sampleRate = 8000, int fftSize = 1024)
        {
            spec = new Spectrogram.Spectrogram(sampleRate, fftSize, 250);

            int bitRate            = 16;
            int channels           = 1;
            int bufferMilliseconds = 10;

            wvin = new NAudio.Wave.WaveInEvent();
            wvin.DeviceNumber       = deviceIndex;
            wvin.WaveFormat         = new NAudio.Wave.WaveFormat(sampleRate, bitRate, channels);
            wvin.DataAvailable     += OnDataAvailable;
            wvin.BufferMilliseconds = bufferMilliseconds;
            wvin.StartRecording();
        }
예제 #10
0
 private void AudioMonitorInitialize(int DeviceIndex, int sampleRate = 8000, int bitRate = 16,
                                     int channels = 1, int bufferMilliseconds            = 20, bool start = true)
 {
     if (wvin == null)
     {
         wvin = new NAudio.Wave.WaveInEvent();
         wvin.DeviceNumber       = DeviceIndex;
         wvin.WaveFormat         = new NAudio.Wave.WaveFormat(sampleRate, bitRate, channels);
         wvin.DataAvailable     += OnDataAvailable;
         wvin.BufferMilliseconds = bufferMilliseconds;
         if (start)
         {
             wvin.StartRecording();
         }
     }
 }
예제 #11
0
        private void AudioMonitorInitialize(
            int DeviceIndex        = 0,
            int sampleRate         = 8000,
            int bitRate            = 16,
            int channels           = 1,
            int bufferMilliseconds = 10,
            int fftSize            = 1024,
            int step = 250
            )
        {
            spec = new Spectrogram.Spectrogram(sampleRate, fftSize, step);

            wvin = new NAudio.Wave.WaveInEvent();
            wvin.DeviceNumber       = DeviceIndex;
            wvin.WaveFormat         = new NAudio.Wave.WaveFormat(sampleRate, bitRate, channels);
            wvin.DataAvailable     += OnDataAvailable;
            wvin.BufferMilliseconds = bufferMilliseconds;
            wvin.StartRecording();
        }
예제 #12
0
        public void Start(int deviceIndex = 0, int fftPower = 14, string preLoadWavFile = null)
        {
            int sampleRate = 8000;
            int fftSize    = (int)(Math.Pow(2, fftPower));

            int tenMinutePixelWidth = 1000;
            int samplesInTenMinutes = sampleRate * 10 * 60;
            int segmentSize         = samplesInTenMinutes / tenMinutePixelWidth;

            spec = new Spectrogram.Spectrogram(sampleRate, fftSize, segmentSize);
            spec.displaySettings.brightness     = 5;
            spec.displaySettings.freqLow        = 1000;
            spec.displaySettings.freqHigh       = 1500;
            spec.displaySettings.tickSpacingHz  = 50;
            spec.displaySettings.tickSpacingSec = 30;

            formBrightness = new FormBrightness(spec);
            formFreqRange  = new FormFreqRange(spec);
            formFFT        = new FormFFT(spec);

            pbSpec.Width  = tenMinutePixelWidth;
            pbSpec.Height = spec.displaySettings.height;

            if (preLoadWavFile != null)
            {
                spec.AddExtend(Tools.ReadWav(preLoadWavFile));
            }
            else
            {
                int bitRate            = 16;
                int channels           = 1;
                int bufferMilliseconds = 20;
                wvin = new NAudio.Wave.WaveInEvent();
                wvin.DeviceNumber       = deviceIndex;
                wvin.WaveFormat         = new NAudio.Wave.WaveFormat(sampleRate, bitRate, channels);
                wvin.DataAvailable     += OnDataAvailable;
                wvin.BufferMilliseconds = bufferMilliseconds;
                wvin.StartRecording();
            }
        }
        private void BtnStart_Click(object sender, EventArgs e)
        {
            if (cmbBox_source.SelectedIndex != -1 && btnStart.Text == "Start")
            {
                int deviceNum = cmbBox_source.SelectedIndex;

                sourceEvent = new NAudio.Wave.WaveInEvent();
                sourceEvent.DeviceNumber   = deviceNum;
                sourceEvent.DataAvailable += OnDataAvailable;
                sourceEvent.StartRecording();

                btnStart.Text         = "Stop";
                cmbBox_source.Enabled = false;
                btnRefresh.Enabled    = false;
            }
            else if (btnStart.Text == "Stop")
            {
                sourceEvent.StopRecording();
                btnStart.Text         = "Start";
                cmbBox_source.Enabled = true;
                btnRefresh.Enabled    = true;
            }
        }
예제 #14
0
 public void Start()
 {
     Console.WriteLine($"Starting recording...");
     wvin.StartRecording();
 }
        public static async Task StreamingMicRecognizeAsync(int ms, ISpeechOutput output, Action finish, float minConfidence, int captureDeviceIndex)
        {
            var streamingCall = Client.StreamingRecognize();
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "ru"
                    },
                    InterimResults = false,
                }
            });

            var  normalSpeechDetected = false;
            Task printResponses       = Task.Run(async() =>
            {
                var outText = string.Empty;

                while (await streamingCall.ResponseStream.MoveNext())
                {
                    foreach (var result in streamingCall.ResponseStream.Current.Results)
                    {
                        var confidence = result.Alternatives.Max(x => x.Confidence);
                        Debug.WriteLine("SpeechRecognition");
                        foreach (var alt in result.Alternatives)
                        {
                            Debug.WriteLine(alt.Transcript + " " + alt.Confidence);
                        }

                        if (confidence >= minConfidence)
                        {
                            var alternative = result.Alternatives.LastOrDefault(x => x.Confidence == confidence);
                            output.IntermediateResult(alternative.Transcript);
                            outText = alternative.Transcript;
                            normalSpeechDetected = true;
                        }
                    }
                }

                if (normalSpeechDetected)
                {
                    output.Result(outText);
                }
            });

            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = captureDeviceIndex;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }

                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            output.RecordStarted();
            try
            {
                waveIn.StartRecording();
                await Task.Delay(TimeSpan.FromMilliseconds(ms));

                if (!normalSpeechDetected)
                {
                    output.RecordCanceled();
                }

                waveIn.StopRecording();
                lock (writeLock)
                {
                    writeMore = false;
                }

                await streamingCall.WriteCompleteAsync();

                await printResponses;
                finish?.Invoke();
                output.RecordFinished();
            }
            catch
            {
                output.RecordCanceled();
            }
        }
        public async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                log.Debug("No microphone!");
                return(-1);
            }

            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "pl",
                    },
                    InterimResults = true,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                int mode = -1;
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream
                             .Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            if (result.IsFinal)
                            {
                                if (mode == -1)
                                {
                                    var firstWord = alternative.Transcript.IndexOf(" ") > -1
                                          ? alternative.Transcript.Substring(0, alternative.Transcript.IndexOf(" "))
                                          : alternative.Transcript;
                                    if (firstWord.ToLower() == resourceManager.GetString("text"))
                                    {
                                        mode = 1;
                                    }
                                    else if (firstWord.ToLower() == resourceManager.GetString("sign"))
                                    {
                                        mode = 2;
                                    }
                                    else if (firstWord.ToLower() == resourceManager.GetString("command"))
                                    {
                                        mode = 3;
                                    }
                                }
                                else
                                {
                                    log.Debug("Working in mode " + mode);
                                    switch (mode)
                                    {
                                    case 1:
                                        handler.InsertText(alternative.Transcript);
                                        break;

                                    case 2:
                                        handler.InsertSign(alternative.Transcript);
                                        break;

                                    case 3:
                                        handler.IssueCommand(alternative.Transcript);
                                        break;
                                    }
                                }
                            }
                        }
                    }
                }
            });
            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            log.Debug("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
예제 #17
0
        public static async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                Console.WriteLine("No microphone!");
                return(-1);
            }
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            // config로 초기 요청을 작성하십시오.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "ko-KR",
                        Model           = "command_and_search",
                        UseEnhanced     = true,
                        SpeechContexts  = { new SpeechContext()
                                            {
                                                Phrases = { "티미야", "인터넷", "켜", "꺼" }
                                            } }
                    },
                    InterimResults = true
                }
            });

            // 응답이 도착하면 인쇄하십시오.
            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream
                             .Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            resultText = alternative.Transcript;

                            //Console.WriteLine(resultText);
                        }
                    }
                }
            });
            // 마이크에서 읽고 API로 스트리밍합니다.
            object writeLock = new object();

            writeMore = true;
            var waveIn = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Beep(512, 50);
            Beep(640, 50);
            Console.WriteLine("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // 녹음을 중지하고 종료하십시오.
            waveIn.StopRecording();
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
예제 #18
0
        //Captures Speech and Converts to text; displays messages.
        private async void CaptureVoice(object obj)
        {
            NotRunning   = false;
            VoiceCapture = "Status: Loading";

            List <string> words = new List <string>();

            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "ja-JP",
                    },
                    InterimResults = true,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream
                             .Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            Console.WriteLine(alternative.Transcript);
                            words.Add(alternative.Transcript);
                        }
                    }
                }
            });
            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            VoiceCapture = "Status: Speak Now";
            Console.WriteLine("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(5));

            // Stop recording and shut down.
            waveIn.StopRecording();
            VoiceCapture = "Status: Processing";
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            if (words.Contains(BackSide) == true || words.Contains(FrontSide) == true)
            {
                VoiceCapture = "Status: Correct";
                Console.WriteLine("True");
            }
            else
            {
                VoiceCapture = "Status: Incorrect";
                Console.WriteLine("False");
            }

            NotRunning = true;
        }
예제 #19
0
        // [END speech_streaming_recognize]

        // [START speech_streaming_mic_recognize]
        static async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                Console.WriteLine("No microphone!");
                return(-1);
            }
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "en",
                    },
                    InterimResults = true,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream
                             .Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            Console.WriteLine(alternative.Transcript);
                        }
                    }
                }
            });
            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Console.WriteLine("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
예제 #20
0
        private async Task <string> StreamingMicRecognizeAsync(int seconds)
        {
            string responses = string.Empty;

            try
            {
                if (NAudio.Wave.WaveIn.DeviceCount < 1)
                {
                    MessageBox.Show("No microphone!");
                    return("No micrphone found.");
                }
                var speech        = SpeechClient.Create();
                var streamingCall = speech.StreamingRecognize();
                // Write the initial request with the config.
                await streamingCall.WriteAsync(
                    new StreamingRecognizeRequest()
                {
                    StreamingConfig = new StreamingRecognitionConfig()
                    {
                        Config = new RecognitionConfig()
                        {
                            Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                            SampleRateHertz = 16000,
                            LanguageCode    = "en",
                        },
                        InterimResults = false,
                    }
                });

                // Print responses as they arrive.
                Task printResponses = Task.Run(async() =>
                {
                    StringBuilder builder = new StringBuilder();
                    while (await streamingCall.ResponseStream.MoveNext(default(CancellationToken)))
                    {
                        foreach (var result in streamingCall.ResponseStream
                                 .Current.Results)
                        {
                            foreach (var alternative in result.Alternatives)
                            {
                                builder.Append(alternative.Transcript);
                            }
                        }
                    }

                    txtSpeech.Dispatcher.Invoke(() =>
                    {
                        txtSpeech.Text = builder.ToString();
                    }

                                                );
                });

                // Read from the microphone and stream to API.
                object writeLock = new object();
                bool   writeMore = true;
                var    waveIn    = new NAudio.Wave.WaveInEvent();
                waveIn.DeviceNumber   = 0;
                waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
                waveIn.DataAvailable +=
                    (object sender, NAudio.Wave.WaveInEventArgs args) =>
                {
                    lock (writeLock)
                    {
                        if (!writeMore)
                        {
                            return;
                        }
                        streamingCall.WriteAsync(
                            new StreamingRecognizeRequest()
                        {
                            AudioContent = Google.Protobuf.ByteString.CopyFrom(args.Buffer, 0, args.BytesRecorded)
                        }).Wait();
                    }
                };

                try
                {
                    waveIn.StartRecording();
                    await Task.Delay(TimeSpan.FromSeconds(seconds), cancelRecordingTokenSource.Token);
                }
                catch (TaskCanceledException)
                {
                    waveIn.StopRecording();
                }

                lock (writeLock) writeMore = false;
                await streamingCall.WriteCompleteAsync();

                await printResponses;
            }
            catch (Exception ex)
            {
                Trace.WriteLine(ex);
            }
            return(responses);
        }
        public async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                Console.WriteLine("No microphone!");
                return(-1);
            }
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "en",
                    },
                    InterimResults = true,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    var result     = streamingCall.ResponseStream.Current.Results.FirstOrDefault();
                    var transcript = result?.Alternatives.OrderBy(t => t.Confidence).Select(x => x.Transcript).ToList();
                    await Task.Run(() => eventHandler.eventSpeechRecognized(new VoiceRecognizedEvent
                    {
                        Transcripts = transcript
                    }));
                }
            });
            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (sender, args) =>
            {
                double decibel = -95;
                if (args.Buffer != null)
                {
                    decibel = CalculateDecibels(args.Buffer);
                    Console.WriteLine($"Decibel level: {decibel}");
                }
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Console.WriteLine("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
예제 #22
0
        private static void StartAudioDevice()
        {
            List<NAudio.Wave.WaveInCapabilities> sources = new List<NAudio.Wave.WaveInCapabilities>();

            for (int i = 0; i < NAudio.Wave.WaveIn.DeviceCount; i++)
            {
                sources.Add(NAudio.Wave.WaveIn.GetCapabilities(i));
            }

            sourceStream = new NAudio.Wave.WaveInEvent();
            sourceStream.DeviceNumber = 0;
            sourceStream.WaveFormat = new NAudio.Wave.WaveFormat(44100, NAudio.Wave.WaveIn.GetCapabilities(0).Channels);
            sourceStream.StartRecording();
            sourceStream.DataAvailable += sourceStream_DataAvailable;
        }
        // [START dialogflow_detect_intent_streaming]
        public static async Task <object> DetectIntentFromStreamAsync(
            string projectId,
            string sessionId,
            string filePath)
        {
            var sessionsClient = SessionsClient.Create();
            var sessionName    = SessionName.FromProjectSession(projectId, sessionId).ToString();

            // Initialize streaming call, retrieving the stream object
            var streamingDetectIntent = sessionsClient.StreamingDetectIntent();

            // Define a task to process results from the API
            var responseHandlerTask = Task.Run(async() =>
            {
                var responseStream = streamingDetectIntent.GetResponseStream();
                while (await responseStream.MoveNextAsync())
                {
                    var response    = responseStream.Current;
                    var queryResult = response.QueryResult;

                    if (queryResult != null)
                    {
                        Console.WriteLine($"Query text: {queryResult.QueryText}");
                        if (queryResult.Intent != null)
                        {
                            Console.Write("Intent detected:");
                            Console.WriteLine(queryResult.Intent.DisplayName);
                        }
                    }
                }
            });

            // Instructs the speech recognizer how to process the audio content.
            // Note: hard coding audioEncoding, sampleRateHertz for simplicity.
            var queryInput = new QueryInput
            {
                AudioConfig = new InputAudioConfig
                {
                    AudioEncoding   = AudioEncoding.Linear16,
                    LanguageCode    = "en-US",
                    SampleRateHertz = 16000
                }
            };

            // The first request must **only** contain the audio configuration:
            await streamingDetectIntent.WriteAsync(new StreamingDetectIntentRequest
            {
                QueryInput = queryInput,
                Session    = sessionName
            });

            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }

                    streamingDetectIntent.WriteAsync(
                        new StreamingDetectIntentRequest()
                    {
                        InputAudio = Google.Protobuf.ByteString
                                     .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Console.WriteLine("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock)
            {
                writeMore = false;
            }

            // Tell the service you are done sending data
            await streamingDetectIntent.WriteCompleteAsync();

            // This will complete once all server responses have been processed.
            await responseHandlerTask;

            return(0);
        }
예제 #24
0
        async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                Console.WriteLine("No Mic");
                return(-1);
            }
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();

            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "en",
                    },
                    InterimResults = true,
                }
            });

            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream
                             .Current.Results)
                    {
                        var sentanceResult  = result.Alternatives.Last().Transcript;
                        var newWordList     = sentanceResult.Split(' ');
                        var currentWordList = currentSentance.Split(' ');

                        if (newWordList.Length > currentWordList.Length ||
                            newWordList.First() != currentWordList.First())
                        {
                            currentSentance = sentanceResult;
                            textBox1.Invoke((MethodInvoker)(() => textBox1.AppendText(newWordList.Last() + Environment.NewLine)));
                            if (newWordList.Last() != lastTrigger)
                            {
                                textBox1.Invoke((MethodInvoker)(() => textBox1.AppendText("TRIGGER: " + newWordList.Last() + Environment.NewLine)));
                                lastTrigger = newWordList.Last();
                            }
                            Console.WriteLine(newWordList.Last());
                        }
                    }
                }
            });

            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Console.WriteLine("Reconnecting stream");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
예제 #25
0
        public static async Task <List <string> > reconocerVoz(int tiempo)
        {
            List <string> listaSoluciones = new List <string>();

            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                Debug.Log("Sin microfono");
                return(listaSoluciones);
            }
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();


            //Configuración de petición inicial
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "es-ES",
                    },
                    InterimResults  = true,
                    SingleUtterance = true     //dejará de reconocer cuando se detecte que se ha dejado de hablar
                }
            }
                );

            //Muestra las respuestas cuando llegan
            Task pintaRespuestas = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream.Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            Debug.Log(alternative.Transcript);
                            listaSoluciones.Add(alternative.Transcript);
                        }
                    }
                }
            });


            //leer de microfono y enviar a la API
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable += (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString.CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }
                        ).Wait();
                }
            };
            waveIn.StartRecording();
            Debug.Log("Habla");
            grabando = true;
            await Task.Delay(TimeSpan.FromSeconds(tiempo));

            //deja de grabar y termina
            waveIn.StopRecording();
            grabando = false;
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await pintaRespuestas;
            await SpeechClient.ShutdownDefaultChannelsAsync();

            return(listaSoluciones);
        }
        public static async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            await streamingCall.WriteAsync(new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding        = RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "en"
                    },
                    InterimResults = true
                }
            });

            //Stama risposte in tempo reale
            Task printResponses = Task.Run(async() =>
            {
                var responseStream = streamingCall.GetResponseStream();
                while (await responseStream.MoveNextAsync())
                {
                    StreamingRecognizeResponse response = responseStream.Current;
                    foreach (StreamingRecognitionResult result in response.Results)
                    {
                        foreach (SpeechRecognitionAlternative alternative in result.Alternatives)
                        {
                            Debug.WriteLine(alternative.Transcript);
                        }
                    }
                }
            });
            //Lettura dal microfono e stream alle API di Google
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable += (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString.CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Debug.WriteLine("Parla ora!");
            await Task.Delay(TimeSpan.FromSeconds(seconds)); //il task è programmato per finire dopo seconds secondi

            //stop recording and shut down
            waveIn.StopRecording();
            lock (writeLock)
            {
                writeMore = false;
            }
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
예제 #27
0
        public static async Task <string> StreamingMicRecognizeAsync(string languageCode)
        {
            string result = string.Empty;

            var manualResetEvent = new ManualResetEvent(false);

            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                throw new Exception("No microphone!");
            }
            var speech        = SpeechClient.Create();
            var streamingCall = speech.GrpcClient.StreamingRecognize();
            // Write the initial request with the config.
            await streamingCall.RequestStream.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRate   = 16000,
                        LanguageCode = languageCode
                    },
                    InterimResults = false,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(default(CancellationToken)))
                {
                    if (streamingCall.ResponseStream.Current.Results.Count == 1)
                    {
                        result = streamingCall.ResponseStream.Current.Results.Single().Alternatives.Single().Transcript;
                        manualResetEvent.Set();
                    }
                }
            });
            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.RequestStream.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Console.WriteLine("Speak now.");
            await Task.Run(() => { manualResetEvent.WaitOne(100000); });

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock) writeMore = false;
            await streamingCall.RequestStream.CompleteAsync();

            await printResponses;

            return(result);
        }
예제 #28
0
        static async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
/*            SpeechClientBuilder builder = new SpeechClientBuilder
 *          {
 *              CredentialsPath = credentialsFilePath
 *          };
 *          SpeechClient speech = builder.Build();*/
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "en-GB",
                        // The `model` value must be one of the following:
                        // "video", "phone_call", "command_and_search", "default"
                        Model = "phone_call",
//                            EnableWordTimeOffsets = true,

/*                            DiarizationConfig = new SpeakerDiarizationConfig()
 *                          {
 *                              EnableSpeakerDiarization = true,
 *                              MinSpeakerCount = 2
 *                          }*/
                        UseEnhanced = true,
                    },
                    InterimResults = false,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                var responseStream = streamingCall.GetResponseStream();
                while (await responseStream.MoveNextAsync())
                {
                    StreamingRecognizeResponse response = responseStream.Current;
                    foreach (StreamingRecognitionResult result in response.Results)
                    {
                        foreach (SpeechRecognitionAlternative alternative in result.Alternatives)
                        {
                            Console.WriteLine(alternative.Transcript);
                        }
                    }
                }
            });
            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }

                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Console.WriteLine("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock)
            {
                writeMore = false;
            }

            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
        static async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            var speech        = SpeechClient.Create();
            var streamingCall = speech.StreamingRecognize();
            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "tr",
                    },
                    InterimResults = true,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                //AutoItX.Run("notepad.exe", null);
                int test = AutoItX.WinWaitActive("Untitled - Notepad");
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    double dogruluk_orani = streamingCall.ResponseStream.Current.Results[0].Alternatives[0].Confidence;

                    if (dogruluk_orani > 0.60)
                    {
                        Console.WriteLine("D/O: " + dogruluk_orani + " | " + streamingCall.ResponseStream.Current.Results[0].Alternatives[0].Transcript);
                        AutoItX.Send(streamingCall.ResponseStream.Current.Results[0].Alternatives[0].Transcript + "\n");
                    }
                    else
                    {
                        Console.WriteLine("Anlaşılamadı...");
                    }

                    foreach (StreamingRecognitionResult result in streamingCall.ResponseStream.Current.Results)
                    {
                        foreach (SpeechRecognitionAlternative alternative in result.Alternatives)
                        {
                            Console.WriteLine(alternative.Transcript);
                        }
                    }
                }
            });
            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Console.WriteLine("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
예제 #30
0
        static async Task <object> StreamingMicRecognizeAsync(int seconds)
        {
            //await Connect();

            if (NAudio.Wave.WaveIn.DeviceCount < 1)
            {
                Console.WriteLine("No microphone!");
                return(-1);
            }
            var credential = GoogleCredential.FromFile(@"D:\EsferaColor\TranscribingAudio\SpeakToText-c65312fe0200.json").CreateScoped(SpeechClient.DefaultScopes);
            var channel    = new Grpc.Core.Channel(SpeechClient.DefaultEndpoint.ToString(), credential.ToChannelCredentials());

            var speech        = SpeechClient.Create(channel);
            var streamingCall = speech.StreamingRecognize();

            //var speech = SpeechClient.Create(); /*AuthExplicitComputeEngine("640f1acceb995a6bc4deb4e766e76dca6c5bb7d0");*/
            // Write the initial request with the config.
            await streamingCall.WriteAsync(
                new StreamingRecognizeRequest()
            {
                StreamingConfig = new StreamingRecognitionConfig()
                {
                    Config = new RecognitionConfig()
                    {
                        Encoding =
                            RecognitionConfig.Types.AudioEncoding.Linear16,
                        SampleRateHertz = 16000,
                        LanguageCode    = "es-Es",
                    },
                    InterimResults = true,
                }
            });

            // Print responses as they arrive.
            Task printResponses = Task.Run(async() =>
            {
                while (await streamingCall.ResponseStream.MoveNext(
                           default(CancellationToken)))
                {
                    foreach (var result in streamingCall.ResponseStream
                             .Current.Results)
                    {
                        foreach (var alternative in result.Alternatives)
                        {
                            Console.WriteLine(alternative.Transcript);
                        }
                    }
                }
            });
            // Read from the microphone and stream to API.
            object writeLock = new object();
            bool   writeMore = true;
            var    waveIn    = new NAudio.Wave.WaveInEvent();

            waveIn.DeviceNumber   = 0;
            waveIn.WaveFormat     = new NAudio.Wave.WaveFormat(16000, 1);
            waveIn.DataAvailable +=
                (object sender, NAudio.Wave.WaveInEventArgs args) =>
            {
                lock (writeLock)
                {
                    if (!writeMore)
                    {
                        return;
                    }
                    streamingCall.WriteAsync(
                        new StreamingRecognizeRequest()
                    {
                        AudioContent = Google.Protobuf.ByteString
                                       .CopyFrom(args.Buffer, 0, args.BytesRecorded)
                    }).Wait();
                }
            };
            waveIn.StartRecording();
            Console.WriteLine("Speak now.");
            await Task.Delay(TimeSpan.FromSeconds(seconds));

            // Stop recording and shut down.
            waveIn.StopRecording();
            lock (writeLock) writeMore = false;
            await streamingCall.WriteCompleteAsync();

            await printResponses;

            return(0);
        }
예제 #31
0
 public void Start()
 {
     wvin.StartRecording();
 }