コード例 #1
0
        static void Main( string[] args )
        {
            // 音源を取得するインスタンスを生成する
            using ( KinectAudioSource source = new KinectAudioSource() ) {
                // SingleChannelAec:シングルチャネルのマイクで、エコーキャンセルを使用する
                // OptibeamArrayOnly:マルチチャネルのマイクのみを使用する(エコーキャンセルを使用しない)
                // OptibeamArrayAndAec:マルチチャネルのマイクと、エコーキャンセルを使用する)
                // SingleChannelNsAgc:シングルチャネルのマイクのみを使用する(エコーキャンセルを使用しない)
                source.SystemMode = SystemMode.OptibeamArrayOnly;
                source.BeamChanged += new EventHandler<BeamChangedEventArgs>( source_BeamChanged );

                using ( Stream audioStream = source.Start() ) {
                    Console.WriteLine( "Start... Press any key" );

                    byte[] buffer = new byte[4096];
                    Win32.StreamingWavePlayer player = new Win32.StreamingWavePlayer( 16000, 16, 1, 100 );
                    while ( !Console.KeyAvailable ) {
                        int count = audioStream.Read( buffer, 0, buffer.Length );
                        player.Output( buffer );

                        // 詳細な音源方向の信頼性がある場合、音源方向を更新する
                        if ( source.SoundSourcePositionConfidence > 0.9 ) {
                            Console.Write( "詳細な音源方向(推定) : {0}\t\tビーム方向 : {1}\r",
                                source.SoundSourcePosition, source.MicArrayBeamAngle );
                        }
                    }
                }
            }
        }
コード例 #2
0
        public SoundDataProcessor(KeyboardInputProcessor keyboard)
        {
            ConfigureLogManager();
            keyboardProcessor = keyboard;
            kinectAudioResourse = new KinectAudioSource();
            kinectAudioResourse.FeatureMode = true;
            kinectAudioResourse.AutomaticGainControl = false; //Important to turn this off for speech recognition
            kinectAudioResourse.SystemMode = SystemMode.OptibeamArrayOnly; //No AEC for this sample

            ri = SpeechRecognitionEngine.InstalledRecognizers().Where(r => r.Id == recognizerId).FirstOrDefault();

            if (ri == null)
            {
                Trace.WriteLine("Could not find speech recognizer: {0}. Please refer to the sample requirements.", recognizerId);
                throw new System.InvalidOperationException("Could not find speech recognizer: {0}. Please refer to the sample requirements." + recognizerId);
            }

            sre = new SpeechRecognitionEngine(ri.Id);
            speechCommands = new Choices();

            speechCommands.Add("jump");
            speechCommands.Add("reload");
            speechCommands.Add("aim");
            speechCommands.Add("knife");
            speechCommands.Add("grenade");

            speechCommands.Add("menu");
            speechCommands.Add("pause");
            speechCommands.Add("select");
            speechCommands.Add("okay");
            speechCommands.Add("enter");
            speechCommands.Add("up");
            speechCommands.Add("down");
            speechCommands.Add("left");
            speechCommands.Add("right");

            gb = new GrammarBuilder();
            gb.Culture = ri.Culture;
            gb.Append(speechCommands);

            grammar = new Grammar(gb);
            sre.LoadGrammar(grammar);

            audioSourceStream = kinectAudioResourse.Start();
            sre.SetInputToAudioStream(audioSourceStream,
                                                  new SpeechAudioFormatInfo(
                                                      EncodingFormat.Pcm, 16000, 16, 1,
                                                      32000, 2, null));
            sre.RecognizeAsync(RecognizeMode.Multiple);
            handleRequests = new Semaphore(0, (int)SemaphoreConstants.MAX_CONCURRENT_REQUESTS);
            requestSoundData = new Semaphore((int)SemaphoreConstants.MAX_CONCURRENT_REQUESTS, (int)SemaphoreConstants.MAX_CONCURRENT_REQUESTS);

            dataQueue = new Queue<SoundData>();
            threadExit = false;

            soundProcessorThread = new Thread(SoundProcessorModule);
            soundProcessorThread.Name = "SoundProcessorThread";
            soundProcessorThread.SetApartmentState(ApartmentState.MTA);
            soundProcessorThread.Start();
        }
コード例 #3
0
ファイル: SpeechRecognizer.cs プロジェクト: cdbean/CAGA
        public SpeechRecognizer(string file, KinectSensor sensor)
        {
            this.grammarFile = file;
            this.kinectSensor = sensor;
            audioSource = kinectSensor.AudioSource;
            audioSource.AutomaticGainControlEnabled = false;
            audioSource.BeamAngleMode = BeamAngleMode.Adaptive;

            Func<RecognizerInfo, bool> matchingFunc = r =>
            {
                string value;
                r.AdditionalInfo.TryGetValue("Kinect", out value);
                return "True".Equals(value, StringComparison.InvariantCultureIgnoreCase) && "en-US".Equals(r.Culture.Name, StringComparison.InvariantCultureIgnoreCase);
            };
            var recognizerInfo = SpeechRecognitionEngine.InstalledRecognizers().Where(matchingFunc).FirstOrDefault();
            if (recognizerInfo == null)
                return;

            speechRecognitionEngine = new SpeechRecognitionEngine(recognizerInfo.Id);

            var grammar = new Grammar(grammarFile);
            speechRecognitionEngine.LoadGrammar(grammar);

            audioStream = audioSource.Start();
            speechRecognitionEngine.SetInputToAudioStream(audioStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));

            speechRecognitionEngine.AudioStateChanged += onAudioStateChanged;
            speechRecognitionEngine.SpeechRecognized += onSpeechRecognized;
            speechRecognitionEngine.RecognizeCompleted += onSpeechRecognizeCompleted;
            speechRecognitionEngine.EmulateRecognizeCompleted += onEmulateRecognizeCompleted;
        }
コード例 #4
0
        public ComponentControl()
        {
            this.AudioSource = new KinectAudioSource();

            this.AudioSource.FeatureMode = true;
            this.AudioSource.AutomaticGainControl = false;
            this.AudioSource.SystemMode = SystemMode.OptibeamArrayOnly;
            this.AudioSource.BeamChanged += new EventHandler<BeamChangedEventArgs>(AudioSource_BeamChanged);

            this.Recognizer = SpeechRecognitionEngine.InstalledRecognizers().Where(r => r.Id == RecognizerId).FirstOrDefault();

            if(this.Recognizer == null) {
                throw new Exception("Could not find Kinect speech recognizer");
            }

            this.Engine = new SpeechRecognitionEngine(Recognizer.Id);
            this.Engine.UnloadAllGrammars();

            this.LoadGrammer();

            this.AudioStream = this.AudioSource.Start();
            this.Engine.SetInputToAudioStream(this.AudioStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1,
                                                      32000, 2, null));

            this.Engine.SpeechHypothesized += new EventHandler<SpeechHypothesizedEventArgs>(Engine_SpeechHypothesized);

            this.Engine.RecognizeAsync(RecognizeMode.Multiple);
            Console.WriteLine("Speech recognition initialized");
        }
コード例 #5
0
 private void CleanUpAudioResources()
 {
     if (source != null)
     {
         source.Stop();
         source = null;
     }
 }
コード例 #6
0
        static void Main()
        {
            using (var source = new KinectAudioSource())
            {
                source.FeatureMode = true;
                source.AutomaticGainControl = false;
                source.SystemMode = SystemMode.OptibeamArrayOnly;

                RecognizerInfo ri = GetKinectRecognizer();

                if (ri == null)
                {
                    Console.WriteLine("Could not find Kinect speech recognizer. Please refer to the sample requirements.");
                    return;
                }

                Console.WriteLine("Using: {0}", ri.Name);

                using (var sre = new SpeechRecognitionEngine(ri.Id))
                {
                    //declare commands to be used
                    var commands = new Choices();
                    commands.Add("activate");
                    commands.Add("off");
                    commands.Add("open");
                    commands.Add("manual");
                    commands.Add("hold");
                    commands.Add("land");
                    commands.Add("stabilize");

                    var gb = new GrammarBuilder {Culture = ri.Culture};
                    //Specify the culture to match the recognizer in case we are running in a different culture.                                 
                    gb.Append(commands);

                    // Create the actual Grammar instance, and then load it into the speech recognizer.
                    var g = new Grammar(gb);

                    sre.LoadGrammar(g);
                    sre.SpeechRecognized += SreSpeechRecognized;
                    sre.SpeechRecognitionRejected += SreSpeechRejected;

                    using (Stream s = source.Start())
                    {
                        sre.SetInputToAudioStream(s,
                                                  new SpeechAudioFormatInfo(
                                                      EncodingFormat.Pcm, 16000, 16, 1,
                                                      32000, 2, null));

                        Console.WriteLine("Recognizing... Press ENTER to stop");

                        sre.RecognizeAsync(RecognizeMode.Multiple);
                        Console.ReadLine();
                        Console.WriteLine("Stopping recognizer ...");
                        sre.RecognizeAsyncStop();
                    }
                }
            }
        }
コード例 #7
0
        public CommandRecognitionAdapter(Dictionary<string, Action> callbacks)
        {
            this.callbacks = callbacks;

            var source = new KinectAudioSource();
            source.FeatureMode = true;
            source.AutomaticGainControl = false;
            source.SystemMode = SystemMode.OptibeamArrayOnly;
        }
コード例 #8
0
 public AudioIn_KinectXbox(FIFOStream stream)
 {
     mStream = stream;
      mSrc = new KinectAudioSource();
      mSrc.SystemMode = SystemMode.OptibeamArrayOnly;
      mThread = new Thread(new ThreadStart(KinectThreadProc));
      mRunningEvent = new ManualResetEvent(false);
      mRunning = false;
 }
コード例 #9
0
ファイル: Program.cs プロジェクト: thoschmidt/shoopdoup
        static void Main(string[] args)
        {                    
            using (var source = new KinectAudioSource())
            {
                source.FeatureMode = true;
                source.AutomaticGainControl = false; //Important to turn this off for speech recognition
				source.SystemMode = SystemMode.OptibeamArrayOnly; //No AEC for this sample

                RecognizerInfo ri = GetKinectRecognizer();

                if (ri == null)
                {
                    Console.WriteLine("Could not find Kinect speech recognizer. Please refer to the sample requirements.");
                    return;
                }

                Console.WriteLine("Using: {0}", ri.Name);

                using (var sre = new SpeechRecognitionEngine(ri.Id))
                {                
                    var colors = new Choices();
                    colors.Add("red");
                    colors.Add("green");
                    colors.Add("blue");

                    var gb = new GrammarBuilder();
                    //Specify the culture to match the recognizer in case we are running in a different culture.                                 
                    gb.Culture = ri.Culture;
                    gb.Append(colors);
                    

                    // Create the actual Grammar instance, and then load it into the speech recognizer.
                    var g = new Grammar(gb);                    

                    sre.LoadGrammar(g);
                    sre.SpeechRecognized += SreSpeechRecognized;
                    sre.SpeechHypothesized += SreSpeechHypothesized;
                    sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;

                    using (Stream s = source.Start())
                    {
                        sre.SetInputToAudioStream(s,
                                                  new SpeechAudioFormatInfo(
                                                      EncodingFormat.Pcm, 16000, 16, 1,
                                                      32000, 2, null));

						Console.WriteLine("Recognizing. Say: 'red', 'green' or 'blue'. Press ENTER to stop");

                        sre.RecognizeAsync(RecognizeMode.Multiple);
                        Console.ReadLine();
                        Console.WriteLine("Stopping recognizer ...");
                        sre.RecognizeAsyncStop();                       
                    }
                }
            }
        }
コード例 #10
0
ファイル: Program.cs プロジェクト: thoschmidt/shoopdoup
        static void Main(string[] args)
        {
            var buffer = new byte[4096];
            const int recordTime = 20; //seconds
            const int recordingLength = recordTime * 2 * 16000; //10 seconds, 16 bits per sample, 16khz
            const string outputFileName = "out.wav";
            
            //We need to run in high priority to avoid dropping samples 
            Thread.CurrentThread.Priority = ThreadPriority.Highest;

            //Instantiate the KinectAudioSource to do audio capture
            using (var source = new KinectAudioSource())
            {
		        source.SystemMode = SystemMode.OptibeamArrayOnly;

                //Register for beam tracking change notifications
                source.BeamChanged += source_BeamChanged;

                using (var fileStream = new FileStream(outputFileName, FileMode.Create))
                {
                    using (var sampleStream = new StreamWriter(new FileStream("samples.log", FileMode.Create)))
                    {
                        WriteWavHeader(fileStream, recordingLength);

                        Console.WriteLine("Recording for {0} seconds", recordTime);

                        //Start capturing audio                               
                        using (var audioStream = source.Start())
                        {
                            //Simply copy the data from the stream down to the file
                            int count, totalCount = 0;
                            while ((count = audioStream.Read(buffer, 0, buffer.Length)) > 0 && totalCount < recordingLength)
                            {
                                for (int i = 0; i < buffer.Length; i += 2)
                                {
                                    short sample = (short)(buffer[i] | (buffer[i + 1] << 8));
                                    sampleStream.WriteLine(sample);
                                }

                                fileStream.Write(buffer, 0, count);
                                totalCount += count;

                                //If we have high confidence, print the position
                                if (source.SoundSourcePositionConfidence > 0.9)
                                    Console.Write("Sound source position (radians): {0}\t\tBeam: {1}\r", source.SoundSourcePosition, source.MicArrayBeamAngle);
                            }
                        }
                    }
                }

                Console.WriteLine("Recording saved to {0}", outputFileName);
            }
        }
コード例 #11
0
        public override void ConnectedToMaster()
        {
            Console.WriteLine("Starting...");
            sensor = (from sensorToCheck in KinectSensor.KinectSensors where sensorToCheck.Status == KinectStatus.Connected select sensorToCheck).FirstOrDefault();
            if (sensor == null)
            {
                Console.WriteLine("\n Kinect sensor not detected !\n");
                return;
            }

            source = sensor.AudioSource;
            source.AutomaticGainControlEnabled = true;

            try { sensor.Start(); }
            catch (Exception)
            {
                Console.WriteLine("Sensor cannot Start! \n");
                return;
            }

            source.Start();
            started = true;
            source.SoundSourceAngleChanged += delegate
            {
                if (!IsConnected) return;
                bool send = false;
                
                if (source.SoundSourceAngleConfidence > confidenceThreshold)
                {
                    if (SoundDirectionMutex.WaitOne(0))
                    {
                        if ((Math.Abs(lastSpeechDirection - source.SoundSourceAngle) < repeatedDirectionAngle))
                        {
                            if ((System.DateTime.Now - lastSpeechDirectionTime).TotalSeconds > repeatedDirectionInterval) send = true;
                        }
                        else
                        {
                            send = true;
                        }

                        if (send)
                        {
                            Console.WriteLine("SoundLocated{ angle: " + source.SoundSourceAngle + "; confidence: " + source.SoundSourceAngleConfidence);
                            kinectPublisher.SoundDirectionLocated(source.SoundSourceAngle, source.SoundSourceAngleConfidence);
                            lastSpeechDirectionTime = DateTime.Now;
                            lastSpeechDirection = source.SoundSourceAngle;
                        }
                        SoundDirectionMutex.ReleaseMutex();
                    }
                }
            };
            return;
        }
コード例 #12
0
ファイル: SpeechRecognizer.cs プロジェクト: ardakara/Youmote
        public void Start(KinectAudioSource kinectSource)
        {
            this.CheckDisposed();

            this.kinectAudioSource = kinectSource;
            this.kinectAudioSource.AutomaticGainControlEnabled = false;
            this.kinectAudioSource.BeamAngleMode = BeamAngleMode.Adaptive;
            var kinectStream = this.kinectAudioSource.Start();
            this.sre.SetInputToAudioStream(
                kinectStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
            this.sre.RecognizeAsync(RecognizeMode.Multiple);
        }
コード例 #13
0
 private void Initialize()
 {
     if (_kinectSensor == null)
         return;
     _speechRecognizer = CreateSpeechRecognizer();
     _speechRecognizer.SetInputToDefaultAudioDevice();
     _speechRecognizer.RecognizeAsync(RecognizeMode.Multiple);
     _kinectSensor.AllFramesReady += KinectSensorAllFramesReady;
     _kinectSensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30);
     _kinectSensor.Start();
     _kinectAudioSource = _kinectSensor.AudioSource;
     _kinectAudioSource.Start();
     Message = "Kinect connected";
 }
コード例 #14
0
        static void Main(string[] args)
        {
            KinectSensor sensor = KinectSensor.KinectSensors[0];
            sensor.Start();

            kinectaudiosource = sensor.AudioSource;
            SoundTracking();

            Console.WriteLine("請按下空白建結束");
            while ( Console.ReadKey().Key != ConsoleKey.Spacebar)
            {
            }

            sensor.Stop();
        }
コード例 #15
0
ファイル: SpeechRec.cs プロジェクト: thefoofighter/KinectTest
        public void initSpeech()
        {
            kinectSource = new KinectAudioSource();
            kinectSource.FeatureMode = true;
            kinectSource.AutomaticGainControl = false;
            kinectSource.SystemMode = SystemMode.OptibeamArrayOnly;

            var rec = (from r in SpeechRecognitionEngine.InstalledRecognizers() where r.Id == RecognizerId select r).FirstOrDefault();

            speechEngine = new SpeechRecognitionEngine(rec.Id);

            var choices = new Choices();
            choices.Add("select scalpal");
            choices.Add("select syringe");
            choices.Add("select suction");
            choices.Add("select hand");
            choices.Add("nurse scalpal");
            choices.Add("nurse syringe");
            choices.Add("nurse suction");
            choices.Add("nurse hand");
            choices.Add("show console");
            choices.Add("hide console");
            choices.Add("begin incision");
            choices.Add("end incision");

            choices.Add("inject");
            GrammarBuilder gb = new GrammarBuilder();
            gb.Culture = rec.Culture;
            gb.Append(choices);

            var g = new Grammar(gb);

            speechEngine.LoadGrammar(g);
            speechEngine.SpeechHypothesized += new EventHandler<SpeechHypothesizedEventArgs>(sre_SpeechHypothesized);
            speechEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);
            speechEngine.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(sre_SpeechRecognitionRejected);

            Console.WriteLine("Recognizing Speech");

            stream = kinectSource.Start();

            speechEngine.SetInputToAudioStream(stream,
                          new SpeechAudioFormatInfo(
                              EncodingFormat.Pcm, 16000, 16, 1,
                              32000, 2, null));

            speechEngine.RecognizeAsync(RecognizeMode.Multiple);
        }
コード例 #16
0
        public void Initialize()
        {
            if(initialized)
            {
                return;
            }

            KinectAudioSource audioSource = new KinectAudioSource(); // test Kinect

            ri = SpeechRecognitionEngine.InstalledRecognizers().Where(r => r.Id == RECOGNIZER_ID).FirstOrDefault();

            if (ri == null)
            {
                throw new Exception("Could not find speech recognizer " + RECOGNIZER_ID + ".");
            }

            initialized = true;
        }
コード例 #17
0
ファイル: Speech.cs プロジェクト: NUIC/2-Kinects-1-Machine
        RecognizerInfo InitializeKinectAudio()
        {
            Console.Write("Getting audio source from Kinect...");
            source = new KinectAudioSource();

            source.FeatureMode = true;
            source.AutomaticGainControl = false; //Important to turn this off for speech recognition
            source.SystemMode = SystemMode.OptibeamArrayOnly; //No AEC for this sample

            RecognizerInfo ri = GetKinectRecognizer();

            if (ri == null)
            {
                Console.WriteLine("Could not find Kinect speech recognizer. Please refer to the sample requirements.");
                return null;
            }

            Console.WriteLine("Audio source created");
            return ri;
        }
コード例 #18
0
        static void Main( string[] args )
        {
            try {
                using ( var source = new KinectAudioSource() ) {
                    source.FeatureMode = true;
                    source.AutomaticGainControl = false; //Important to turn this off for speech recognition
                    source.SystemMode = SystemMode.OptibeamArrayOnly; //No AEC for this sample

                    var colors = new Choices();
                    colors.Add( "red" );
                    colors.Add( "green" );
                    colors.Add( "blue" );
                    colors.Add( "end" );
                    colors.Add( "赤" );
                    colors.Add( "ミドリ" );
                    colors.Add( "あお" );

                    Recognizer r = new Recognizer( "ja-JP", colors );
                    r.SpeechRecognized += SreSpeechRecognized;
                    r.SpeechHypothesized += SreSpeechHypothesized;
                    r.SpeechRecognitionRejected += SreSpeechRecognitionRejected;
                    Console.WriteLine( "Using: {0}", r.Name );

                    using ( Stream s = source.Start() ) {
                        r.SetInputToAudioStream( s, new SpeechAudioFormatInfo(
                                                        EncodingFormat.Pcm, 16000, 16, 1,
                                                        32000, 2, null ) );

                        Console.WriteLine( "Recognizing. Say: 'red', 'green' or 'blue'. Press ENTER to stop" );

                        r.RecognizeAsync( RecognizeMode.Multiple );
                        Console.ReadLine();
                        Console.WriteLine( "Stopping recognizer ..." );
                        r.RecognizeAsyncStop();
                    }
                }
            }
            catch ( Exception ex ) {
                Console.WriteLine( ex.Message );
            }
        }
コード例 #19
0
        public void StartAudioStream()
        {
            try
            {
                kinectAudio = kinect.AudioSource;
                kinectAudio.BeamAngleMode = BeamAngleMode.Manual;
                kinectAudio.ManualBeamAngle = Math.PI / 180.0 * 10.0; //angle in radians
                //kinectAudio.BeamAngleMode = BeamAngleMode.Adaptive;
                kinect.AudioSource.EchoCancellationMode = EchoCancellationMode.None;
                kinect.AudioSource.AutomaticGainControlEnabled = false;
                var stream = kinectAudio.Start();
                speechRecognitionEngine.SetInputToAudioStream(stream,
                                                              new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1,
                                                                                        32000, 2, null));
                speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch
            {

            }
        }
コード例 #20
0
        void Record()
        {
            using (KinectAudioSource source = new KinectAudioSource
            {
                FeatureMode = true,
                AutomaticGainControl = false,
                SystemMode = SystemMode.OptibeamArrayOnly
            })
            {
                RecognizerInfo recognizerInfo = SpeechRecognitionEngine.InstalledRecognizers().Where(r => r.Id == RecognizerId).FirstOrDefault();

                if (recognizerInfo == null)
                    return;

                SpeechRecognitionEngine speechRecognitionEngine = new SpeechRecognitionEngine(recognizerInfo.Id);

                var gb = new GrammarBuilder {Culture = recognizerInfo.Culture};
                gb.Append(choices);

                var grammar = new Grammar(gb);

                speechRecognitionEngine.LoadGrammar(grammar);
                using (Stream sourceStream = source.Start())
                {
                    speechRecognitionEngine.SetInputToAudioStream(sourceStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));

                    isRunning = true;
                    while (isRunning)
                    {
                        RecognitionResult result = speechRecognitionEngine.Recognize();

                        if (result != null && OrderDetected != null && result.Confidence > 0.7)
                            OrderDetected(result.Text);
                    }
                }
            }
        }
コード例 #21
0
        static void Main( string[] args )
        {
            // 音源を取得するインスタンスを生成する
            using ( KinectAudioSource source = new KinectAudioSource() ) {
                // SingleChannelAec:シングルチャネルのマイクで、エコーキャンセルを使用する
                // OptibeamArrayOnly:マルチチャネルのマイクのみを使用する(エコーキャンセルを使用しない)
                // OptibeamArrayAndAec:マルチチャネルのマイクと、エコーキャンセルを使用する)
                // SingleChannelNsAgc:シングルチャネルのマイクのみを使用する(エコーキャンセルを使用しない)
                source.SystemMode = SystemMode.SingleChannelAec;

                using ( Stream audioStream = source.Start() ) {
                    Console.WriteLine( "Start... Press any key" );

                    byte[] buffer = new byte[4096];
                    Win32.StreamingWavePlayer player = new Win32.StreamingWavePlayer( 16000, 16, 1, 100 );
                    while ( !Console.KeyAvailable ) {
                        var a = audioStream.Position;
                        var b = audioStream.Seek( 0, SeekOrigin.Current );
                        int count = audioStream.Read( buffer, 0, buffer.Length );
                        player.Output( buffer );
                    }
                }
            }
        }
コード例 #22
0
ファイル: MainWindow.xaml.cs プロジェクト: KinAudio/Master
        protected void MainWindow_Loaded(object sender, RoutedEventArgs e)
        {

            // Set and start the first connected sensor
            this.StartSensor();
            
            if (this.sensor != null)
            {
             
                // Set the audio source
                this.audioSource = this.sensor.AudioSource;

                // Notify status changes
                KinectSensor.KinectSensors.StatusChanged += new System.EventHandler<StatusChangedEventArgs>(KinectSensors_StatusChanged);

                // Turn on the streams on
                this.sensor.ColorStream.Enable();
                this.sensor.DepthStream.Enable();
                this.sensor.SkeletonStream.Enable();
                
                // Set the informations via the MainWindowViewModel
                this.SetKinectInfo();
            }
        }
コード例 #23
0
    public static void WriteWavFile(KinectAudioSource source, FileStream fileStream, Stream audioStream)
    {
        var size = 0;
        //write wav header placeholder
        WriteWavHeader(fileStream, size);
        //using (var audioStream = source.Start())

        {
            //chunk audio stream to file
            while (audioStream.Read(buffer, 0, buffer.Length) > 0 && _isRecording)
            {
                fileStream.Write(buffer, 0, buffer.Length);
                size += buffer.Length;

            }
        }

        //write real wav header
        long prePosition = fileStream.Position;
        fileStream.Seek(0, SeekOrigin.Begin);
        WriteWavHeader(fileStream, size);
        fileStream.Seek(prePosition, SeekOrigin.Begin);
        fileStream.Flush();
    }
コード例 #24
0
        private void LoadContent()
        {
            kinectDevice = new Runtime();
            kinectDevice.Initialize(RuntimeOptions.UseDepthAndPlayerIndex | RuntimeOptions.UseSkeletalTracking | RuntimeOptions.UseColor);

            kinectDevice.SkeletonEngine.TransformSmooth = true;
            kinectDevice.VideoStream.Open(ImageStreamType.Video, 2, ImageResolution.Resolution640x480, ImageType.Color);
            kinectDevice.DepthStream.Open(ImageStreamType.Depth, 2, ImageResolution.Resolution320x240, ImageType.DepthAndPlayerIndex);

            kinectDevice.SkeletonFrameReady += new EventHandler<SkeletonFrameReadyEventArgs>(kinectDevice_SkeletonFrameReady);
            kinectDevice.VideoFrameReady += new EventHandler<ImageFrameReadyEventArgs>(kinectDevice_VideoFrameReady);            

            kinectAudio = new KinectAudioSource();

            kinectAudio.FeatureMode = true;
            kinectAudio.AutomaticGainControl = false;
            kinectAudio.SystemMode = SystemMode.OptibeamArrayOnly;

            ri = SpeechRecognitionEngine.InstalledRecognizers().Where(r => r.Id == RecognizerId).FirstOrDefault();
            sre = new SpeechRecognitionEngine(ri.Id);
            audioChoices = new Choices();
            audioChoices.Add("stop");
            audioChoices.Add("start");
            audioChoices.Add("kinect shutdown");
            audioChoices.Add("reset time");
            audioChoices.Add("spree");
            audioChoices.Add("reset hand");
            audioChoices.Add("faster");
            audioChoices.Add("slower");
            grammerBuilder = new GrammarBuilder();
            grammerBuilder.Culture = ri.Culture;
            grammerBuilder.Append(audioChoices);
            grammer = new Grammar(grammerBuilder);

            sre.LoadGrammar(grammer);

            sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);

            sre.SetInputToAudioStream(kinectAudio.Start(), new SpeechAudioFormatInfo( EncodingFormat.Pcm, 16000,16,1,32000,2, null));
            sre.RecognizeAsync(RecognizeMode.Multiple);

            player = new NinjaPlayer(this);

            backGround = Content.Load<Texture2D>("wood_paneling");
            font = Content.Load<SpriteFont>("font");

            sound = new SoundPlayer();
        }
コード例 #25
0
ファイル: MainWindow.xaml.cs プロジェクト: TheIronMarx/ARMS
        private void startAudioListening()
        {
            audioSource = new KinectAudioSource();
            audioSource.FeatureMode = true;
            audioSource.AutomaticGainControl = true;
            audioSource.SystemMode = SystemMode.OptibeamArrayOnly;

            Stream aStream = audioSource.Start();
            sre.SetInputToAudioStream(aStream,
                                        new SpeechAudioFormatInfo(
                                            EncodingFormat.Pcm, 16000, 16, 1,
                                            32000, 2, null));

            sre.RecognizeAsync(RecognizeMode.Multiple);
        }
コード例 #26
0
ファイル: Speech.cs プロジェクト: rravisrinivas/KNUI-Maps
        public static void listen()
        {
            System.Windows.MessageBox.Show("Listening");
            System.Windows.MessageBox.Show(Thread.CurrentThread.GetApartmentState().ToString());

            using (var source = new KinectAudioSource())
            {
                source.FeatureMode = true;
                source.AutomaticGainControl = false; //Important to turn this off for speech recognition
                source.SystemMode = SystemMode.OptibeamArrayOnly; //No AEC for this sample
                //onMessage("Constructor is doing good");
                RecognizerInfo ri = SpeechRecognitionEngine.InstalledRecognizers().Where(r => r.Id == RecognizerId).FirstOrDefault();

                if (ri == null)
                {
                    //Console.WriteLine("Could not find speech recognizer: {0}. Please refer to the sample requirements.", RecognizerId);
                    //onMessage("Could not find speech recognizer: {0}. Please refer to the sample requirements." + RecognizerId);
                    System.Windows.MessageBox.Show("RI is null");
                    return;
                }

                //onMessage("Using: {0}" + ri.Name);
                using (var sre = new SpeechRecognitionEngine(ri.Id))
                {
                    sre_speech = sre;
                    var choices = new Choices();
                    choices.Add("microsoft");
                    choices.Add("google");
                    choices.Add("facebook");
                    choices.Add("bellevue");

                    var gb = new GrammarBuilder();
                    //Specify the culture to match the recognizer in case we are running in a different culture.
                    gb.Culture = ri.Culture;
                    gb.Append(choices);

                    // Create the actual Grammar instance, and then load it into the speech recognizer.
                    var g = new Grammar(gb);

                    sre.LoadGrammar(g);
                    sre.SpeechRecognized += SreSpeechRecognized;
                    sre.SpeechHypothesized += SreSpeechHypothesized;
                    sre.SpeechRecognitionRejected += SreSpeechRecognitionRejected;

                    using (Stream s = source.Start())
                    {
                        sre.SetInputToAudioStream(s,
                                                  new SpeechAudioFormatInfo(
                                                      EncodingFormat.Pcm, 16000, 16, 1,
                                                      32000, 2, null));

                        //Console.WriteLine("Recognizing. Say: 'red', 'green' or 'blue'. Press ENTER to stop");
                       sre.RecognizeAsync(RecognizeMode.Multiple);
                        //Console.ReadLine();
                        //Console.WriteLine("Stopping recognizer ...");
                        //sre.RecognizeAsyncStop();
                    }

                }
            }
        }
コード例 #27
0
ファイル: Engine.cs プロジェクト: KayoticSully/SpeakEasy
        /// \brief Gets the Kinect sensor and initializes it
        /// 
        /// \return boolean. True if sensor is initalized, false otherwise.
        private bool sensorInit()
        {
            // Obtain a KinectSensor if any are available
            sensor = (from sensorToCheck in KinectSensor.KinectSensors where sensorToCheck.Status == KinectStatus.Connected select sensorToCheck).FirstOrDefault();
            if (sensor == null)
            {
                Console.WriteLine("Could not connect to Kinect.");
                return false;
            }

            Console.Write("Sensor Starting ... ");
            sensor.Start();
            Console.WriteLine("Sensor Ready");

            source = sensor.AudioSource; // Obtain the KinectAudioSource to do audio capture and set options
            source.EchoCancellationMode = EchoCancellationMode.CancellationAndSuppression; // No AEC
            source.AutomaticGainControlEnabled = false; // Important to turn this off for speech recognition

            recogInfo = GetKinectRecognizer();

            if (recogInfo == null)
            {
                Console.WriteLine("Could not find Kinect speech recognizer.");
                return false;
            }

            Console.WriteLine("Using: {0}", recogInfo.Name);

            // NOTE: Need to wait 4 seconds for device to be ready right after initialization
            int wait = 4;
            while (wait > 0)
            {
                Console.Write("Device will be ready for speech recognition in {0} second(s).\r", wait--);
                Thread.Sleep(1000);
            }

            Console.WriteLine(); // clear line
            return true;
        }
コード例 #28
0
        /// <summary>
        /// Starts speech recognition using audio stream from specified KinectAudioSource.
        /// </summary>
        /// <param name="audioSource">
        /// Audio source to use as input to speech recognizer.
        /// </param>
        public System.IO.Stream Start(KinectAudioSource audioSource)
        {
            if (null == audioSource)
            {
                return null;
            }
            this.kinectAudioSource = audioSource;
            this.kinectAudioSource.AutomaticGainControlEnabled = false;
            this.kinectAudioSource.NoiseSuppression = true;
            this.kinectAudioSource.BeamAngleMode = BeamAngleMode.Adaptive;

            this.kinectAudioSource.SoundSourceAngleChanged += this.SoundSourceChanged;
            this.speechEngine.SpeechRecognized += this.SreSpeechRecognized;
            this.speechEngine.SpeechRecognitionRejected += this.SreSpeechRecognitionRejected;

            var kinectStream = this.kinectAudioSource.Start();
            this.speechEngine.SetInputToAudioStream(
                kinectStream, new SpeechAudioFormatInfo(AudioFormat, AudioSamplesPerSecond, AudioBitsPerSample, AudioChannels, AudioAverageBytesPerSecond, AudioBlockAlign, null));
            this.speechEngine.RecognizeAsync(RecognizeMode.Multiple);
            return kinectStream;
        }
コード例 #29
0
        void InitializeSpeech()
        {
            try
            {
                kinectAudioSource = new KinectAudioSource();
                kinectAudioSource.SystemMode = SystemMode.OptibeamArrayOnly;
                kinectAudioSource.FeatureMode = true;
                kinectAudioSource.AutomaticGainControl = false;
                kinectAudioSource.NoiseSuppression = true;
                kinectAudioSource.MicArrayMode = MicArrayMode.MicArrayAdaptiveBeam;
                var kinectStream = kinectAudioSource.Start();

                speechRecognitionEngine.SetInputToAudioStream(kinectStream, new SpeechAudioFormatInfo(
                                                               EncodingFormat.Pcm, 16000, 16, 1,
                                                               32000, 2, null));
                speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);
            }
            catch (InvalidOperationException)
            {
                // kinect unplugged or something. s'okay
            }
        }
コード例 #30
0
        public void Cleanup()
        {
            if (speechRecognitionEngine != null)
            {
                speechRecognitionEngine.UnloadAllGrammars();
                speechRecognitionEngine.Dispose();
                speechRecognitionEngine = null;
            }

            if (kinectAudioSource != null)
            {
                kinectAudioSource.Stop();
                kinectAudioSource.Dispose();
                kinectAudioSource = null;
            }
        }
コード例 #31
0
        static void Main(string[] args)
        {
            try {
                // Kinectが接続されているかどうかを確認する
                if (KinectSensor.KinectSensors.Count == 0)
                {
                    throw new Exception("Kinectを接続してください");
                }

                // 認識器の一覧を表示し、使用する認識器を取得する
                ShowRecognizer();
                //RecognizerInfo info = GetRecognizer( "en-US" );
                RecognizerInfo info = GetRecognizer("ja-JP");
                Console.WriteLine("Using: {0}", info.Name);

                // 認識させる単語を登録する
                Choices colors = new Choices();
                colors.Add("red");
                colors.Add("green");
                colors.Add("blue");
                colors.Add("赤");
                colors.Add("ミドリ");
                colors.Add("あお");

                // 文法の設定を行う
                GrammarBuilder builder = new GrammarBuilder();
                builder.Culture = info.Culture;
                builder.Append(colors);
                Grammar grammar = new Grammar(builder);

                // 認識エンジンの設定と、単語が認識されたときの通知先の登録を行う
                SpeechRecognitionEngine engine = new SpeechRecognitionEngine(info.Id);
                engine.LoadGrammar(grammar);
                engine.SpeechRecognized +=
                    new EventHandler <SpeechRecognizedEventArgs>(engine_SpeechRecognized);

                // Kinectの動作を開始する
                KinectSensor kinect = KinectSensor.KinectSensors[0];
                kinect.Start();

                // 音声のインタフェースを取得し、動作を開始する
                KinectAudioSource audio = kinect.AudioSource;
                using (Stream s = audio.Start()) {
                    // 認識エンジンに音声ストリームを設定する
                    engine.SetInputToAudioStream(s, new SpeechAudioFormatInfo(
                                                     EncodingFormat.Pcm, 16000, 16, 1,
                                                     32000, 2, null));

                    Console.WriteLine("Recognizing. Press ENTER to stop");

                    // 非同期で、音声認識を開始する
                    engine.RecognizeAsync(RecognizeMode.Multiple);
                    Console.ReadLine();
                    Console.WriteLine("Stopping recognizer ...");

                    // 音声認識を停止する
                    engine.RecognizeAsyncStop();
                }
            }
            catch (Exception ex) {
                Console.WriteLine(ex.Message);
            }
        }