static void RunGame() { // init picovoice platform string keywordPath = $"pico_chess_{_platform}.ppn"; string contextPath = $"chess_{_platform}.rhn"; using Picovoice picovoice = new Picovoice(keywordPath, WakeWordCallback, contextPath, InferenceCallback); DrawBoard("\n"); // create and start recording short[] recordingBuffer = new short[picovoice.FrameLength]; ALCaptureDevice captureDevice = ALC.CaptureOpenDevice(null, picovoice.SampleRate, ALFormat.Mono16, picovoice.FrameLength * 2); { ALC.CaptureStart(captureDevice); while (!_quitGame) { int samplesAvailable = ALC.GetAvailableSamples(captureDevice); if (samplesAvailable > picovoice.FrameLength) { ALC.CaptureSamples(captureDevice, ref recordingBuffer[0], picovoice.FrameLength); picovoice.Process(recordingBuffer); } Thread.Yield(); } // stop and clean up resources Console.WriteLine("Bye!"); ALC.CaptureStop(captureDevice); ALC.CaptureCloseDevice(captureDevice); } }
internal void PlatformStop() { if (State == MicrophoneState.Started) { ALC.CaptureStop(_captureDevice); CheckALCError("Failed to stop capture."); Update(); // to ensure that BufferReady doesn't get invoked after Stop() ALC.CaptureCloseDevice(_captureDevice); CheckALCError("Failed to close capture device."); _captureDevice = IntPtr.Zero; } State = MicrophoneState.Stopped; }
private void DoRecording() { int sampleRate = 44100; using MemoryStream stream = new MemoryStream(); using BinaryWriter writer = new BinaryWriter(stream); short[] recording = new short[1024]; int numSamples = 0; Thread.Sleep(200); ALCaptureDevice captureDevice = ALC.CaptureOpenDevice(null, sampleRate, ALFormat.Mono16, 1024); { ALC.CaptureStart(captureDevice); while (_record) { int current = 0; while (current < recording.Length) { int samplesAvailable = ALC.GetAvailableSamples(captureDevice); if (samplesAvailable > 512) { int samplesToRead = Math.Min(samplesAvailable, recording.Length - current); ALC.CaptureSamples(captureDevice, ref recording[current], samplesToRead); current += samplesToRead; } Thread.Yield(); } byte[] result = new byte[current * sizeof(short)]; Buffer.BlockCopy(recording, 0, result, 0, result.Length); writer.Write(result); numSamples += current; } ALC.CaptureStop(captureDevice); ALC.CaptureCloseDevice(captureDevice); } writer.Flush(); stream.Flush(); WriteDataToFile(stream, numSamples, sampleRate); }
/// <summary> /// Creates an input audio stream, instantiates an instance of Rhino object, and infers the intent from spoken commands. /// </summary> /// <param name="contextPath"> /// Absolute path to file containing context model (file with `.rhn` extension). A context represents the set of /// expressions(spoken commands), intents, and intent arguments(slots) within a domain of interest. /// </param> /// <param name="modelPath">Absolute path to the file containing model parameters. If not set it will be set to the default location.</param> /// <param name="sensitivity"> /// Inference sensitivity. It should be a number within [0, 1]. A higher sensitivity value results in /// fewer misses at the cost of (potentially) increasing the erroneous inference rate. If not set, the default value of 0.5 will be used. /// </param> /// <param name="audioDeviceIndex">Optional argument. If provided, audio is recorded from this input device. Otherwise, the default audio input device is used.</param> /// <param name="outputPath">Optional argument. If provided, recorded audio will be stored in this location at the end of the run.</param> public static void RunDemo(string contextPath, string modelPath, float sensitivity, int?audioDeviceIndex = null, string outputPath = null) { Rhino rhino = null; BinaryWriter outputFileWriter = null; int totalSamplesWritten = 0; try { // init rhino speech-to-intent engine rhino = Rhino.Create(contextPath, modelPath, sensitivity); // open stream to output file if (!string.IsNullOrWhiteSpace(outputPath)) { outputFileWriter = new BinaryWriter(new FileStream(outputPath, FileMode.OpenOrCreate, FileAccess.Write)); WriteWavHeader(outputFileWriter, 1, 16, 16000, 0); } // choose audio device string deviceName = null; if (audioDeviceIndex != null) { List <string> captureDeviceList = ALC.GetStringList(GetEnumerationStringList.CaptureDeviceSpecifier).ToList(); if (captureDeviceList != null && audioDeviceIndex.Value < captureDeviceList.Count) { deviceName = captureDeviceList[audioDeviceIndex.Value]; } else { throw new ArgumentException("No input device found with the specified index. Use --show_audio_devices to show" + "available inputs", "--audio_device_index"); } } Console.WriteLine(rhino.ContextInfo); Console.WriteLine("Listening...\n"); // create and start recording short[] recordingBuffer = new short[rhino.FrameLength]; ALCaptureDevice captureDevice = ALC.CaptureOpenDevice(deviceName, 16000, ALFormat.Mono16, rhino.FrameLength * 2); { ALC.CaptureStart(captureDevice); while (!Console.KeyAvailable) { int samplesAvailable = ALC.GetAvailableSamples(captureDevice); if (samplesAvailable > rhino.FrameLength) { ALC.CaptureSamples(captureDevice, ref recordingBuffer[0], rhino.FrameLength); bool isFinalized = rhino.Process(recordingBuffer); if (isFinalized) { Inference inference = rhino.GetInference(); if (inference.IsUnderstood) { Console.WriteLine("{"); Console.WriteLine($" intent : '{inference.Intent}'"); Console.WriteLine(" slots : {"); foreach (KeyValuePair <string, string> slot in inference.Slots) { Console.WriteLine($" {slot.Key} : '{slot.Value}'"); } Console.WriteLine(" }"); Console.WriteLine("}"); } else { Console.WriteLine("Didn't understand the command."); } } if (outputFileWriter != null) { foreach (short sample in recordingBuffer) { outputFileWriter.Write(sample); } totalSamplesWritten += recordingBuffer.Length; } } Thread.Yield(); } // stop and clean up resources Console.WriteLine("Stopping..."); ALC.CaptureStop(captureDevice); ALC.CaptureCloseDevice(captureDevice); } } finally { if (outputFileWriter != null) { // write size to header and clean up WriteWavHeader(outputFileWriter, 1, 16, 16000, totalSamplesWritten); outputFileWriter.Flush(); outputFileWriter.Dispose(); } rhino?.Dispose(); } }
/// <summary> /// Creates an input audio stream, instantiates an instance of Porcupine object, and monitors the audio stream for /// occurrencec of the wake word(s). It prints the time of detection for each occurrence and the wake word. /// </summary> /// <param name="modelPath">Absolute path to the file containing model parameters. If not set it will be set to the default location.</param> /// <param name="keywordPaths">Absolute paths to keyword model files. If not set it will be populated from `keywords` argument.</param> /// <param name="keywordPaths">Absolute paths to keyword model files. If not set it will be populated from `keywords` argument.</param> /// <param name="sensitivities"> /// Sensitivities for detecting keywords. Each value should be a number within [0, 1]. A higher sensitivity results in fewer /// misses at the cost of increasing the false alarm rate. If not set 0.5 will be used. /// </param> /// <param name="keywords"> /// List of keywords (phrases) for detection. The list of available (default) keywords can be retrieved /// using `Porcupine.KEYWORDS`. If `keyword_paths` is set then this argument will be ignored. /// </param> /// <param name="audioDeviceIndex">Optional argument. If provided, audio is recorded from this input device. Otherwise, the default audio input device is used.</param> /// <param name="outputPath">Optional argument. If provided, recorded audio will be stored in this location at the end of the run.</param> public static void RunDemo(string modelPath, List <string> keywordPaths, List <string> keywords, List <float> sensitivities, int?audioDeviceIndex = null, string outputPath = null) { Porcupine porcupine = null; BinaryWriter outputFileWriter = null; int totalSamplesWritten = 0; try { // init porcupine wake word engine porcupine = Porcupine.Create(modelPath, keywordPaths, keywords, sensitivities); // get keyword names for labeling detection results if (keywords == null) { keywords = keywordPaths.Select(k => Path.GetFileNameWithoutExtension(k).Split("_")[0]).ToList(); } // open stream to output file if (!string.IsNullOrWhiteSpace(outputPath)) { outputFileWriter = new BinaryWriter(new FileStream(outputPath, FileMode.OpenOrCreate, FileAccess.Write)); WriteWavHeader(outputFileWriter, 1, 16, 16000, 0); } // choose audio device string deviceName = null; if (audioDeviceIndex != null) { List <string> captureDeviceList = ALC.GetStringList(GetEnumerationStringList.CaptureDeviceSpecifier).ToList(); if (captureDeviceList != null && audioDeviceIndex.Value < captureDeviceList.Count) { deviceName = captureDeviceList[audioDeviceIndex.Value]; } else { throw new ArgumentException("No input device found with the specified index. Use --show_audio_devices to show" + "available inputs", "--audio_device_index"); } } Console.Write("Listening for {"); for (int i = 0; i < keywords.Count; i++) { Console.Write($" {keywords[i]}({sensitivities[i]})"); } Console.Write(" }\n"); // create and start recording short[] recordingBuffer = new short[porcupine.FrameLength]; ALCaptureDevice captureDevice = ALC.CaptureOpenDevice(deviceName, 16000, ALFormat.Mono16, porcupine.FrameLength * 2); { ALC.CaptureStart(captureDevice); while (!Console.KeyAvailable) { int samplesAvailable = ALC.GetAvailableSamples(captureDevice); if (samplesAvailable > porcupine.FrameLength) { ALC.CaptureSamples(captureDevice, ref recordingBuffer[0], porcupine.FrameLength); int result = porcupine.Process(recordingBuffer); if (result >= 0) { Console.WriteLine($"[{DateTime.Now.ToLongTimeString()}] Detected '{keywords[result]}'"); } if (outputFileWriter != null) { foreach (short sample in recordingBuffer) { outputFileWriter.Write(sample); } totalSamplesWritten += recordingBuffer.Length; } } Thread.Yield(); } // stop and clean up resources Console.WriteLine("Stopping..."); ALC.CaptureStop(captureDevice); ALC.CaptureCloseDevice(captureDevice); } } finally { if (outputFileWriter != null) { // write size to header and clean up WriteWavHeader(outputFileWriter, 1, 16, 16000, totalSamplesWritten); outputFileWriter.Flush(); outputFileWriter.Dispose(); } porcupine?.Dispose(); } }