Пример #1
0
                          void Start()
 {
     AudioSource audioSource = gameObject.AddComponent <AudioSource>() :
                                   Debug.Log(audioSource) :
                                       audioSource.spatialize = spatialize:
                                                                bufferedAudioStream = new BufferedAudioStream(audioSource) :
                                                                                          decoder = new Decoder() :
 }
Пример #2
0
        void Awake()
        {
            AudioSource audioSource = gameObject.AddComponent <AudioSource>();

            Debug.Log(audioSource);
            audioSource.spatialize = spatialize;
            bufferedAudioStream    = new BufferedAudioStream(audioSource);
            decoder = new Decoder();
        }
Пример #3
0
        public void BufferedAudioStream_TestFill()
        {
            BufferedAudioStream bas = new BufferedAudioStream(3);

            byte[] data = new byte[] { 0, 1, 2 };
            bas.Write(data, 0, data.Length);
            Assert.AreEqual(3, bas.BytesAvailable);
            CollectionAssert.AreEqual(data, bas.Read());
            Assert.AreEqual(0, bas.BytesAvailable);
        }
Пример #4
0
        void Awake()
        {
            audioSource         = gameObject.AddComponent <AudioSource>();
            bufferedAudioStream = new BufferedAudioStream(audioSource);

            const int frequency          = 48000;
            const int bufferSizeMS       = 150;
            const int bufferSizeElements = bufferSizeMS * frequency / 1000;

            scratchBuffer = new float[bufferSizeElements];
        }
Пример #5
0
        /// <summary>
        /// Initializes a new instance of the <see cref="MicrosoftSpeechRecognizer"/> class.
        /// </summary>
        /// <param name="pipeline">The pipeline to add the component to.</param>
        /// <param name="configuration">The component configuration.</param>
        public MicrosoftSpeechRecognizer(Pipeline pipeline, MicrosoftSpeechRecognizerConfiguration configuration)
            : base(pipeline)
        {
            this.Configuration = configuration ?? new MicrosoftSpeechRecognizerConfiguration();

            // create receiver of grammar updates
            this.ReceiveGrammars = pipeline.CreateReceiver <IEnumerable <string> >(this, this.SetGrammars, nameof(this.ReceiveGrammars), true);

            // create receiver of grammar updates by name
            this.ReceiveGrammarNames = pipeline.CreateReceiver <string[]>(this, this.EnableGrammars, nameof(this.ReceiveGrammarNames), true);

            // assign the default Out emitter to the RecognitionResults group
            this.originatingTimeConsistencyCheckGroup.Add(this.Out, EmitterGroup.RecognitionResults);

            // create the additional output streams
            this.PartialRecognitionResults = this.CreateEmitterInGroup <IStreamingSpeechRecognitionResult>(pipeline, nameof(this.PartialRecognitionResults), EmitterGroup.IntentData);
            this.IntentData = this.CreateEmitterInGroup <IntentData>(pipeline, nameof(this.IntentData), EmitterGroup.IntentData);

            // create output streams for speech event args
            this.SpeechDetected            = this.CreateEmitterInGroup <SpeechDetectedEventArgs>(pipeline, nameof(this.SpeechDetected), EmitterGroup.SpeechEvents);
            this.SpeechHypothesized        = this.CreateEmitterInGroup <SpeechHypothesizedEventArgs>(pipeline, nameof(this.SpeechHypothesized), EmitterGroup.SpeechEvents);
            this.SpeechRecognized          = this.CreateEmitterInGroup <SpeechRecognizedEventArgs>(pipeline, nameof(this.SpeechRecognized), EmitterGroup.SpeechEvents);
            this.SpeechRecognitionRejected = this.CreateEmitterInGroup <SpeechRecognitionRejectedEventArgs>(pipeline, nameof(this.SpeechRecognitionRejected), EmitterGroup.SpeechEvents);
            this.RecognizeCompleted        = this.CreateEmitterInGroup <RecognizeCompletedEventArgs>(pipeline, nameof(this.RecognizeCompleted), EmitterGroup.SpeechEvents);
            this.EmulateRecognizeCompleted = this.CreateEmitterInGroup <EmulateRecognizeCompletedEventArgs>(pipeline, nameof(this.EmulateRecognizeCompleted), EmitterGroup.SpeechEvents);

            // create output streams for audio state event args
            this.AudioSignalProblemOccurred = this.CreateEmitterInGroup <AudioSignalProblemOccurredEventArgs>(pipeline, nameof(this.AudioSignalProblemOccurred), EmitterGroup.AudioEvents);
            this.AudioStateChanged          = this.CreateEmitterInGroup <AudioStateChangedEventArgs>(pipeline, nameof(this.AudioStateChanged), EmitterGroup.AudioEvents);
            this.AudioLevelUpdated          = this.CreateEmitterInGroup <AudioLevelUpdatedEventArgs>(pipeline, nameof(this.AudioLevelUpdated), EmitterGroup.AudioEvents);

            // create output stream for the grammar event args
            this.LoadGrammarCompleted    = this.CreateEmitterInGroup <LoadGrammarCompletedEventArgs>(pipeline, nameof(this.LoadGrammarCompleted), EmitterGroup.StateUpdateEvents);
            this.RecognizerUpdateReached = this.CreateEmitterInGroup <RecognizerUpdateReachedEventArgs>(pipeline, nameof(this.RecognizerUpdateReached), EmitterGroup.StateUpdateEvents);

            // create table of last stream group originating times
            this.lastPostedOriginatingTimes = new Dictionary <EmitterGroup, DateTime>();

            // Create a BufferedAudioStream with an internal buffer large enough
            // to accommodate the specified number of milliseconds of audio data.
            this.inputAudioStream = new BufferedAudioStream(
                this.Configuration.InputFormat.AvgBytesPerSec * this.Configuration.BufferLengthInMs / 1000);

            this.recognizeCompleteManualReset = new ManualResetEvent(false);

            // create the recognition engine
            this.speechRecognitionEngine = this.CreateSpeechRecognitionEngine();
        }
        /// <summary>
        /// Initializes a new instance of the <see cref="SystemVoiceActivityDetector"/> class.
        /// </summary>
        /// <param name="pipeline">The pipeline to add the component to.</param>
        /// <param name="configuration">The component configuration.</param>
        public SystemVoiceActivityDetector(Pipeline pipeline, SystemVoiceActivityDetectorConfiguration configuration)
            : base(pipeline)
        {
            this.configuration = configuration ?? new SystemVoiceActivityDetectorConfiguration();

            // Create a BufferedAudioStream with an internal buffer large enough
            // to accommodate the specified number of milliseconds of audio data.
            this.inputAudioStream = new BufferedAudioStream(
                this.Configuration.InputFormat.AvgBytesPerSec * this.Configuration.BufferLengthInMs / 1000);

            this.messageOriginatingTimes = new Queue <DateTime>();
            this.recognizeComplete       = new ManualResetEvent(false);

            // create the recognition engine
            this.speechRecognitionEngine = this.CreateSpeechRecognitionEngine();

            // start the speech recognition engine
            this.speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
        }
Пример #7
0
        /// <summary>
        /// Initializes a new instance of the <see cref="MicrosoftSpeechRecognizer"/> class.
        /// </summary>
        /// <param name="pipeline">The pipeline to add the component to.</param>
        /// <param name="configuration">The component configuration.</param>
        public MicrosoftSpeechRecognizer(Pipeline pipeline, MicrosoftSpeechRecognizerConfiguration configuration)
            : base(pipeline)
        {
            pipeline.RegisterPipelineStartHandler(this, this.OnPipelineStart);

            this.Configuration = configuration;

            // create receiver of grammar updates
            this.ReceiveGrammars = pipeline.CreateReceiver <IEnumerable <string> >(this, this.SetGrammars, nameof(this.ReceiveGrammars), true);

            // create receiver of grammar updates by name
            this.ReceiveGrammarNames = pipeline.CreateReceiver <string[]>(this, this.EnableGrammars, nameof(this.ReceiveGrammarNames), true);

            // create the additional output streams
            this.PartialRecognitionResults = pipeline.CreateEmitter <IStreamingSpeechRecognitionResult>(this, nameof(this.PartialRecognitionResults));
            this.IntentData = pipeline.CreateEmitter <IntentData>(this, nameof(this.IntentData));

            // create output streams for all the event args
            this.SpeechDetected             = pipeline.CreateEmitter <SpeechDetectedEventArgs>(this, nameof(SpeechDetectedEventArgs));
            this.SpeechHypothesized         = pipeline.CreateEmitter <SpeechHypothesizedEventArgs>(this, nameof(SpeechHypothesizedEventArgs));
            this.SpeechRecognized           = pipeline.CreateEmitter <SpeechRecognizedEventArgs>(this, nameof(SpeechRecognizedEventArgs));
            this.SpeechRecognitionRejected  = pipeline.CreateEmitter <SpeechRecognitionRejectedEventArgs>(this, nameof(SpeechRecognitionRejectedEventArgs));
            this.AudioSignalProblemOccurred = pipeline.CreateEmitter <AudioSignalProblemOccurredEventArgs>(this, nameof(AudioSignalProblemOccurredEventArgs));
            this.AudioStateChanged          = pipeline.CreateEmitter <AudioStateChangedEventArgs>(this, nameof(AudioStateChangedEventArgs));
            this.RecognizeCompleted         = pipeline.CreateEmitter <RecognizeCompletedEventArgs>(this, nameof(RecognizeCompletedEventArgs));
            this.AudioLevelUpdated          = pipeline.CreateEmitter <AudioLevelUpdatedEventArgs>(this, nameof(AudioLevelUpdatedEventArgs));
            this.EmulateRecognizeCompleted  = pipeline.CreateEmitter <EmulateRecognizeCompletedEventArgs>(this, nameof(EmulateRecognizeCompletedEventArgs));
            this.LoadGrammarCompleted       = pipeline.CreateEmitter <LoadGrammarCompletedEventArgs>(this, nameof(LoadGrammarCompletedEventArgs));
            this.RecognizerUpdateReached    = pipeline.CreateEmitter <RecognizerUpdateReachedEventArgs>(this, nameof(RecognizerUpdateReachedEventArgs));

            // create table of last stream originating times
            this.lastPostedOriginatingTimes = new Dictionary <IEmitter, DateTime>();

            // Create a BufferedAudioStream with an internal buffer large enough
            // to accommodate the specified number of milliseconds of audio data.
            this.inputAudioStream = new BufferedAudioStream(
                this.Configuration.InputFormat.AvgBytesPerSec * this.Configuration.BufferLengthInMs / 1000);

            this.recognizeCompleteManualReset = new ManualResetEvent(false);

            // create the recognition engine
            this.speechRecognitionEngine = this.CreateSpeechRecognitionEngine();
        }
Пример #8
0
        public void BufferedAudioStream_TestBlockingRead()
        {
            BufferedAudioStream bas = new BufferedAudioStream(3);

            // This task should block until the write happens
            Task.Run(() =>
            {
                CollectionAssert.AreEqual(new byte[] { 0, 1, 2 }, bas.Read());
            });

            // We are writing more than the capacity, which should block the
            // write until the read frees up more space for it to complete.
            byte[] data = new byte[] { 0, 1, 2, 3, 4, 5 };
            bas.Write(data, 0, data.Length);

            Assert.AreEqual(3, bas.BytesAvailable);
            CollectionAssert.AreEqual(new byte[] { 3, 4, 5 }, bas.Read());
            Assert.AreEqual(0, bas.BytesAvailable);
        }
Пример #9
0
        public void BufferedAudioStream_TestBlockingWrite()
        {
            BufferedAudioStream bas = new BufferedAudioStream(3);

            byte[] data = new byte[] { 0, 1, 2 };
            bas.Write(data, 0, data.Length);

            // This task should block until the read happens
            Task writeTask = Task.Run(() =>
            {
                byte[] moreData = new byte[] { 3, 4, 5 };
                bas.Write(moreData, 0, moreData.Length);
            });

            // Read in two chunks. The first chunk will be the data that was
            // pre-populated in the stream. Once that chunk is read the write
            // should unblock and the next chunk should get written to the stream.
            Assert.AreEqual(3, bas.BytesAvailable);
            CollectionAssert.AreEqual(new byte[] { 0, 1, 2 }, bas.Read());
            writeTask.Wait();
            Assert.AreEqual(3, bas.BytesAvailable);
            CollectionAssert.AreEqual(new byte[] { 3, 4, 5 }, bas.Read());
            Assert.AreEqual(0, bas.BytesAvailable);
        }