Пример #1
0
        /// <summary>
        /// Lists available audio input devices.
        /// </summary>
        public static void ShowAudioDevices()
        {
            Console.WriteLine("Available audio devices: \n");
            List <string> captureDeviceList = ALC.GetStringList(GetEnumerationStringList.CaptureDeviceSpecifier).ToList();

            for (int i = 0; i < captureDeviceList.Count; i++)
            {
                Console.WriteLine($"\tDevice {i}: {captureDeviceList[i]}");
            }
        }
Пример #2
0
    private static string findDeviceName()
    {
        // Start with the default device.
        var deviceName = ALC.GetString(ALDevice.Null, AlcGetString.DefaultDeviceSpecifier);

        // Find all remaining devices. Prefer OpenAL Soft if it exists.
        var devices = ALC.GetStringList(GetEnumerationStringList.DeviceSpecifier);

        foreach (var d in devices)
        {
            if (d.Contains("OpenAL Soft"))
            {
                deviceName = d;
            }
        }

        return(deviceName);
    }
Пример #3
0
        public static OpenALHost Open(Vector3 forward, Vector3 up)
        {
            var devices = ALC.GetStringList(GetEnumerationStringList.DeviceSpecifier);

            // Get the default device, then go though all devices and select the AL soft device if it exists.
            string deviceName = ALC.GetString(ALDevice.Null, AlcGetString.DefaultDeviceSpecifier);

            foreach (var d in devices)
            {
                if (d.Contains("OpenAL Soft"))
                {
                    deviceName = d;
                }
            }

            var device  = ALC.OpenDevice(deviceName);
            var context = ALC.CreateContext(device, new int[] {
                AlSoft.HRTF, AlSoft.Enable
            });

            ALC.MakeContextCurrent(context);

            return(new OpenALHost(device, context, forward, up));
        }
Пример #4
0
        /// <summary>
        /// Creates an input audio stream, instantiates an instance of Rhino object, and infers the intent from spoken commands.
        /// </summary>
        /// <param name="contextPath">
        /// Absolute path to file containing context model (file with `.rhn` extension). A context represents the set of
        /// expressions(spoken commands), intents, and intent arguments(slots) within a domain of interest.
        /// </param>
        /// <param name="modelPath">Absolute path to the file containing model parameters. If not set it will be set to the default location.</param>
        /// <param name="sensitivity">
        /// Inference sensitivity. It should be a number within [0, 1]. A higher sensitivity value results in
        /// fewer misses at the cost of (potentially) increasing the erroneous inference rate. If not set, the default value of 0.5 will be used.
        /// </param>
        /// <param name="audioDeviceIndex">Optional argument. If provided, audio is recorded from this input device. Otherwise, the default audio input device is used.</param>
        /// <param name="outputPath">Optional argument. If provided, recorded audio will be stored in this location at the end of the run.</param>
        public static void RunDemo(string contextPath, string modelPath, float sensitivity, int?audioDeviceIndex = null, string outputPath = null)
        {
            Rhino        rhino               = null;
            BinaryWriter outputFileWriter    = null;
            int          totalSamplesWritten = 0;

            try
            {
                // init rhino speech-to-intent engine
                rhino = Rhino.Create(contextPath, modelPath, sensitivity);

                // open stream to output file
                if (!string.IsNullOrWhiteSpace(outputPath))
                {
                    outputFileWriter = new BinaryWriter(new FileStream(outputPath, FileMode.OpenOrCreate, FileAccess.Write));
                    WriteWavHeader(outputFileWriter, 1, 16, 16000, 0);
                }

                // choose audio device
                string deviceName = null;
                if (audioDeviceIndex != null)
                {
                    List <string> captureDeviceList = ALC.GetStringList(GetEnumerationStringList.CaptureDeviceSpecifier).ToList();
                    if (captureDeviceList != null && audioDeviceIndex.Value < captureDeviceList.Count)
                    {
                        deviceName = captureDeviceList[audioDeviceIndex.Value];
                    }
                    else
                    {
                        throw new ArgumentException("No input device found with the specified index. Use --show_audio_devices to show" +
                                                    "available inputs", "--audio_device_index");
                    }
                }

                Console.WriteLine(rhino.ContextInfo);
                Console.WriteLine("Listening...\n");

                // create and start recording
                short[]         recordingBuffer = new short[rhino.FrameLength];
                ALCaptureDevice captureDevice   = ALC.CaptureOpenDevice(deviceName, 16000, ALFormat.Mono16, rhino.FrameLength * 2);
                {
                    ALC.CaptureStart(captureDevice);
                    while (!Console.KeyAvailable)
                    {
                        int samplesAvailable = ALC.GetAvailableSamples(captureDevice);
                        if (samplesAvailable > rhino.FrameLength)
                        {
                            ALC.CaptureSamples(captureDevice, ref recordingBuffer[0], rhino.FrameLength);
                            bool isFinalized = rhino.Process(recordingBuffer);
                            if (isFinalized)
                            {
                                Inference inference = rhino.GetInference();
                                if (inference.IsUnderstood)
                                {
                                    Console.WriteLine("{");
                                    Console.WriteLine($"  intent : '{inference.Intent}'");
                                    Console.WriteLine("  slots : {");
                                    foreach (KeyValuePair <string, string> slot in inference.Slots)
                                    {
                                        Console.WriteLine($"    {slot.Key} : '{slot.Value}'");
                                    }
                                    Console.WriteLine("  }");
                                    Console.WriteLine("}");
                                }
                                else
                                {
                                    Console.WriteLine("Didn't understand the command.");
                                }
                            }

                            if (outputFileWriter != null)
                            {
                                foreach (short sample in recordingBuffer)
                                {
                                    outputFileWriter.Write(sample);
                                }
                                totalSamplesWritten += recordingBuffer.Length;
                            }
                        }
                        Thread.Yield();
                    }

                    // stop and clean up resources
                    Console.WriteLine("Stopping...");
                    ALC.CaptureStop(captureDevice);
                    ALC.CaptureCloseDevice(captureDevice);
                }
            }
            finally
            {
                if (outputFileWriter != null)
                {
                    // write size to header and clean up
                    WriteWavHeader(outputFileWriter, 1, 16, 16000, totalSamplesWritten);
                    outputFileWriter.Flush();
                    outputFileWriter.Dispose();
                }
                rhino?.Dispose();
            }
        }
Пример #5
0
        /// <summary>
        /// Creates an input audio stream, instantiates an instance of Porcupine object, and monitors the audio stream for
        /// occurrencec of the wake word(s). It prints the time of detection for each occurrence and the wake word.
        /// </summary>
        /// <param name="modelPath">Absolute path to the file containing model parameters. If not set it will be set to the default location.</param>
        /// <param name="keywordPaths">Absolute paths to keyword model files. If not set it will be populated from `keywords` argument.</param>
        /// <param name="keywordPaths">Absolute paths to keyword model files. If not set it will be populated from `keywords` argument.</param>
        /// <param name="sensitivities">
        /// Sensitivities for detecting keywords. Each value should be a number within [0, 1]. A higher sensitivity results in fewer
        /// misses at the cost of increasing the false alarm rate. If not set 0.5 will be used.
        /// </param>
        /// <param name="keywords">
        /// List of keywords (phrases) for detection. The list of available (default) keywords can be retrieved
        /// using `Porcupine.KEYWORDS`. If `keyword_paths` is set then this argument will be ignored.
        /// </param>
        /// <param name="audioDeviceIndex">Optional argument. If provided, audio is recorded from this input device. Otherwise, the default audio input device is used.</param>
        /// <param name="outputPath">Optional argument. If provided, recorded audio will be stored in this location at the end of the run.</param>
        public static void RunDemo(string modelPath, List <string> keywordPaths, List <string> keywords, List <float> sensitivities,
                                   int?audioDeviceIndex = null, string outputPath = null)
        {
            Porcupine    porcupine           = null;
            BinaryWriter outputFileWriter    = null;
            int          totalSamplesWritten = 0;

            try
            {
                // init porcupine wake word engine
                porcupine = Porcupine.Create(modelPath, keywordPaths, keywords, sensitivities);

                // get keyword names for labeling detection results
                if (keywords == null)
                {
                    keywords = keywordPaths.Select(k => Path.GetFileNameWithoutExtension(k).Split("_")[0]).ToList();
                }

                // open stream to output file
                if (!string.IsNullOrWhiteSpace(outputPath))
                {
                    outputFileWriter = new BinaryWriter(new FileStream(outputPath, FileMode.OpenOrCreate, FileAccess.Write));
                    WriteWavHeader(outputFileWriter, 1, 16, 16000, 0);
                }

                // choose audio device
                string deviceName = null;
                if (audioDeviceIndex != null)
                {
                    List <string> captureDeviceList = ALC.GetStringList(GetEnumerationStringList.CaptureDeviceSpecifier).ToList();
                    if (captureDeviceList != null && audioDeviceIndex.Value < captureDeviceList.Count)
                    {
                        deviceName = captureDeviceList[audioDeviceIndex.Value];
                    }
                    else
                    {
                        throw new ArgumentException("No input device found with the specified index. Use --show_audio_devices to show" +
                                                    "available inputs", "--audio_device_index");
                    }
                }

                Console.Write("Listening for {");
                for (int i = 0; i < keywords.Count; i++)
                {
                    Console.Write($" {keywords[i]}({sensitivities[i]})");
                }
                Console.Write("  }\n");

                // create and start recording
                short[]         recordingBuffer = new short[porcupine.FrameLength];
                ALCaptureDevice captureDevice   = ALC.CaptureOpenDevice(deviceName, 16000, ALFormat.Mono16, porcupine.FrameLength * 2);
                {
                    ALC.CaptureStart(captureDevice);
                    while (!Console.KeyAvailable)
                    {
                        int samplesAvailable = ALC.GetAvailableSamples(captureDevice);
                        if (samplesAvailable > porcupine.FrameLength)
                        {
                            ALC.CaptureSamples(captureDevice, ref recordingBuffer[0], porcupine.FrameLength);
                            int result = porcupine.Process(recordingBuffer);
                            if (result >= 0)
                            {
                                Console.WriteLine($"[{DateTime.Now.ToLongTimeString()}] Detected '{keywords[result]}'");
                            }

                            if (outputFileWriter != null)
                            {
                                foreach (short sample in recordingBuffer)
                                {
                                    outputFileWriter.Write(sample);
                                }
                                totalSamplesWritten += recordingBuffer.Length;
                            }
                        }
                        Thread.Yield();
                    }

                    // stop and clean up resources
                    Console.WriteLine("Stopping...");
                    ALC.CaptureStop(captureDevice);
                    ALC.CaptureCloseDevice(captureDevice);
                }
            }
            finally
            {
                if (outputFileWriter != null)
                {
                    // write size to header and clean up
                    WriteWavHeader(outputFileWriter, 1, 16, 16000, totalSamplesWritten);
                    outputFileWriter.Flush();
                    outputFileWriter.Dispose();
                }
                porcupine?.Dispose();
            }
        }
Пример #6
0
        static void Main(string[] args)
        {
            Console.WriteLine("Starting OpenAL!");
            var devices = ALC.GetStringList(GetEnumerationStringList.DeviceSpecifier);

            // Get the default device, then go though all devices and select the AL soft device if it exists.
            string deviceName = ALC.GetString(ALDevice.Null, AlcGetString.DefaultDeviceSpecifier);

            foreach (var d in devices)
            {
                if (d.Contains("OpenAL Soft"))
                {
                    deviceName = d;
                }
            }

            var device  = ALC.OpenDevice(deviceName);
            var context = ALC.CreateContext(device, (int[])null);

            ALC.MakeContextCurrent(context);

            CheckALError("Start");

            // Playback the recorded data
            CheckALError("Before data");
            AL.GenBuffer(out int alBuffer);
            AL.GenBuffer(out int backBuffer);
            AL.Listener(ALListenerf.Gain, 0.1f);

            AL.GenSource(out int alSource);
            AL.Source(alSource, ALSourcef.Gain, 1f);

            // var get samples from map
            var map     = @"D:\H2vMaps\01a_tutorial.map";
            var factory = new MapFactory(Path.GetDirectoryName(map));
            var h2map   = factory.Load(Path.GetFileName(map));

            if (h2map is not H2vMap scene)
            {
                throw new NotSupportedException("Only Vista maps are supported in this tool");
            }

            var soundMapping = scene.GetTag(scene.Globals.SoundInfos[0].SoundMap);

            var soundTags = scene.GetLocalTagsOfType <SoundTag>();

            var maxSoundId = soundTags.Max(s => s.SoundEntryIndex);

            // TODO: multiplayer sounds are referencing the wrong ugh! tag

            var i = 0;

            foreach (var snd in soundTags)
            {
                var enc = snd.Encoding switch
                {
                    EncodingType.ImaAdpcmMono => AudioEncoding.MonoImaAdpcm,
                    EncodingType.ImaAdpcmStereo => AudioEncoding.StereoImaAdpcm,
                    _ => AudioEncoding.Mono16,
                };

                var sr = snd.SampleRate switch
                {
                    Core.Tags.SampleRate.hz22k05 => Audio.SampleRate._22k05,
                    Core.Tags.SampleRate.hz44k1 => Audio.SampleRate._44k1,
                    _ => Audio.SampleRate._44k1
                };

                if (enc != AudioEncoding.Mono16)
                {
                    continue;
                }

                var name = snd.Name.Substring(snd.Name.LastIndexOf("\\", snd.Name.LastIndexOf("\\") - 1) + 1).Replace('\\', '_');

                Console.WriteLine($"[{i++}] {snd.Option1}-{snd.Option2}-{snd.Option3}-{snd.SampleRate}-{snd.Encoding}-{snd.Format2}-{snd.Unknown}-{snd.UsuallyMaxValue}-{snd.UsuallyZero} {name}");

                var filenameFormat = $"{name}.{snd.SampleRate}-{snd.Encoding}-{snd.Format2}-{snd.Unknown}-{snd.UsuallyZero}-{snd.UsuallyMaxValue}.{{0}}.sound";

                var soundEntry = soundMapping.SoundEntries[snd.SoundEntryIndex];

                for (var s = 0; s < soundEntry.NamedSoundClipCount; s++)
                {
                    var clipIndex = soundEntry.NamedSoundClipIndex + s;

                    var clipInfo = soundMapping.NamedSoundClips[clipIndex];

                    var clipFilename = string.Format(filenameFormat, s);

                    var clipSize = 0;
                    for (var c = 0; c < clipInfo.SoundDataChunkCount; c++)
                    {
                        var chunk = soundMapping.SoundDataChunks[clipInfo.SoundDataChunkIndex + c];
                        clipSize += (int)(chunk.Length & 0x3FFFFFFF);
                    }

                    Span <byte> clipData        = new byte[clipSize];
                    var         clipDataCurrent = 0;

                    for (var c = 0; c < clipInfo.SoundDataChunkCount; c++)
                    {
                        var chunk = soundMapping.SoundDataChunks[clipInfo.SoundDataChunkIndex + c];

                        var len       = (int)(chunk.Length & 0x3FFFFFFF);
                        var chunkData = scene.ReadData(chunk.Offset.Location, chunk.Offset, len);

                        chunkData.Span.CopyTo(clipData.Slice(clipDataCurrent));
                        clipDataCurrent += len;
                    }

                    Interlocked.Exchange(ref backBuffer, Interlocked.Exchange(ref alBuffer, backBuffer));
                    AL.SourceStop(alSource);
                    BufferData(enc, sr, clipData.Slice(96), alBuffer);
                    CheckALError("After buffer");
                    AL.Source(alSource, ALSourcei.Buffer, alBuffer);
                    AL.SourcePlay(alSource);

                    while (AL.GetSourceState(alSource) == ALSourceState.Playing)
                    {
                        Thread.Sleep(100);
                    }

                    // Only play first variant
                    break;
                }
            }

            ALC.MakeContextCurrent(ALContext.Null);
            ALC.DestroyContext(context);
            ALC.CloseDevice(device);

            Console.WriteLine("done");
            Console.ReadLine();
        }