Ejemplo n.º 1
0
    // Desc: This is the "secret-sauce" of PocketSphinx. This method actually provides Sphinx with data and checks to see if we detect a keyphrase.
    private void ProcessAudio(AudioClip audio)
    {
        // Create a new array for our audio data.
        var newData = new float[audio.samples * audio.channels];

        // Get our data from our AudioClip.
        audio.GetData(newData, 0);

        // Convert audio data into byte data.
        byte[] byteData = ConvertToBytes(newData, audio.channels);
        // Process the raw byte data with our decoder.
        d.ProcessRaw(byteData, byteData.Length, false, false);

        // Checks if we recognize a keyphrase.
        if (d.Hyp() != null)
        {
            // Fire our event.
            if (OnSpeechRecognized != null)
            {
                OnSpeechRecognized.Invoke(d.Hyp().Hypstr);
            }
            // Stop the decoder.
            d.EndUtt();
            // Start the decoder again.
            d.StartUtt();
        }
    }
        void speechEngine_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            String value = e.Result.Semantics.Value.ToString();

            if (e.Result.Confidence >= sensitivity)
            {
                Action action;
                if (speechActions.TryGetValue(value, out action))
                {
                    action.Invoke();
                }
            }

            if (OnSpeechRecognized != null)
            {
                OnSpeechRecognized.Invoke(value, e.Result.Confidence);
            }
        }
Ejemplo n.º 3
0
    IEnumerator OnSpeechResult(string speech)
    {
        //if valid return result
        if (speech != "error" && speech.Length > 0)
        {
            speechRecognized?.Invoke(speech);
            yield return(new WaitForEndOfFrame());
        }
        else
        {
            print(speech);
        }

        m_DictationRecognizer.Stop();
        //wait for dictation to stop and listen for keyword again
        while (m_DictationRecognizer.Status == SpeechSystemStatus.Running)
        {
            yield return(new WaitForEndOfFrame());
        }
        ListenForKeyword();
    }
        private async Task <int> StreamingMicRecognizeAsync()
        {
            try
            {
                _writeMore = true;
                timer      = new Stopwatch();
                timer.Start();
                if (WaveIn.DeviceCount < 1)
                {
                    throw new ApplicationException("No microphone!");
                }

                _speechClient = SpeechClient.Create();
                var stream = _speechClient.StreamingRecognize();
                streams.Add(stream);
                var speechContext = new SpeechContext();
                speechContext.Phrases.AddRange(new[]
                                               { "int", "for", "true", "false", "public", "private", "bool", "static", "void", "переменная" }
                                               /*.Concat(_variableProvider.GetVariables().Select(v => v.Name))*/);
                // Write the initial request with the config.
                StreamingRecognizeRequest recognizeRequest = GetStreamingRecognizeRequest(speechContext);
                await stream.WriteAsync(recognizeRequest);

                // Print responses as they arrive.

                Task printResponses = Task.Run(async() =>
                {
                    while (await stream.ResponseStream.MoveNext(default(CancellationToken)))
                    {
                        foreach (StreamingRecognitionResult streamingRecognitionResult in stream
                                 .ResponseStream
                                 .Current.Results)
                        {
                            if (streamingRecognitionResult.IsFinal)
                            {
                                var transcript = streamingRecognitionResult.Alternatives[0].Transcript;
                                OnSpeechRecognized?.Invoke(this, new SpeechRecognizerEventArgs(transcript));
                                if (timer.Elapsed.TotalSeconds >= threshold)
                                {
                                    Restart();
                                }
                            }
                        }
                    }
                });
                // Read from the microphone and stream to API.
                ActivateMicrophone();
                Console.WriteLine("Speak now.");
                //await Task.Delay(TimeSpan.FromSeconds(seconds));
                // Stop recording and shut down.
                //StopRecognition();
                await printResponses;
                //await printResponses;
                return(0);
            }
            catch (Exception e)
            {
                Debug.WriteLine(e);
            }

            return(-1);
        }