コード例 #1
0
ファイル: Program.cs プロジェクト: jedijosh920/KeyboardAudio
        /// <summary>
        /// The main.
        /// </summary>
        /// <param name="args">
        /// The args.
        /// </param>
        static void Main(string[] args)
        {
            var audioBuffer = new byte[256];
            var fftData = new byte[256];
            var fft = new double[256];
            double fftavg = 0;
            float amplitude = 10.0f;

            var fftTransoformer = new LomontFFT();

            var writers = new List<IWriter>();
            writers.Add(new KeyboardWriter());
            writers.Add(new ConsoleWriter());

            var audioCapture = new AudioCapture(AudioCapture.DefaultDevice, 8000, ALFormat.Mono8, 256);
            audioCapture.Start();
            audioCapture.ReadSamples(audioBuffer, 256);
           
            while (true)
            {
                for (int j = 0; j < 92; j++)
                {
                    // reset mem
                    for (int i = 0; i < 256; i++)
                    {
                        audioBuffer[i] = 0;
                        fftData[i] = 0;
                        fft[i] = 0;
                    }

                    audioCapture.ReadSamples(audioBuffer, 256);

                    for (int i = 0; i < 256; i++)
                    {
                        fft[i] = (audioBuffer[i] - 128) * amplitude;
                    }

                    fftTransoformer.TableFFT(fft, true);
                    
                    for (int i = 0; i < 256; i += 2)
                    {
                        double fftmag = Math.Sqrt((fft[i] * fft[i]) + (fft[i + 1] * fft[i + 1]));
                        fftavg += fftmag;
                        fftData[i] = (byte)fftmag;
                        fftData[i + 1] = fftData[i];
                    }

                    fftavg /= 10;

                    writers.ForEach(x => x.Write(j, fftData));

                    //Thread.Sleep(15);
                    Thread.Sleep(20);
                }
            }
        }
コード例 #2
0
ファイル: Game1.cs プロジェクト: Aztherion/WASAPI.Net
        /// <summary>
        /// Allows the game to perform any initialization it needs to before starting to run.
        /// This is where it can query for any required services and load any non-graphic
        /// related content.  Calling base.Initialize will enumerate through any components
        /// and initialize them as well.
        /// </summary>
        protected override void Initialize()
        {
            // TODO: Add your initialization logic here
            _dynamicSound = new DynamicSoundEffectInstance(48000, AudioChannels.Mono, 32);
            _audioCapture = new AudioCapture();
            _audioCapture.BufferReady += BufferReady;
            _audioCapture.Start();

            base.Initialize();
        }
コード例 #3
0
ファイル: Sounds.cs プロジェクト: kmsimproject/OpenBVE
        // --- initialization and deinitialization ---

        /// <summary>Initializes audio. A call to Deinitialize must be made when terminating the program.</summary>
        /// <returns>Whether initializing audio was successful.</returns>
        public void Initialize(HostInterface host, SoundRange range)
        {
            Deinitialize();

            CurrentHost = host;

            switch (range)
            {
            case SoundRange.Low:
                OuterRadiusFactorMinimum      = 2.0;
                OuterRadiusFactorMaximum      = 8.0;
                OuterRadiusFactorMaximumSpeed = 1.0;
                break;

            case SoundRange.Medium:
                OuterRadiusFactorMinimum      = 4.0;
                OuterRadiusFactorMaximum      = 16.0;
                OuterRadiusFactorMaximumSpeed = 2.0;
                break;

            case SoundRange.High:
                OuterRadiusFactorMinimum      = 6.0;
                OuterRadiusFactorMaximum      = 24.0;
                OuterRadiusFactorMaximumSpeed = 3.0;
                break;
            }
            OuterRadiusFactor      = Math.Sqrt(OuterRadiusFactorMinimum * OuterRadiusFactorMaximum);
            OuterRadiusFactorSpeed = 0.0;
            OpenAlDevice           = Alc.OpenDevice(null);
            string deviceName = Alc.GetString(OpenAlDevice, AlcGetString.DefaultDeviceSpecifier);

            if ((Environment.OSVersion.Platform == PlatformID.Win32S | Environment.OSVersion.Platform == PlatformID.Win32Windows | Environment.OSVersion.Platform == PlatformID.Win32NT) && deviceName == "Generic Software")
            {
                /*
                 * Creative OpenAL implementation on Windows seems to be limited to max 16 simulataneous sounds
                 * Now shipping OpenAL Soft, but detect this and don't glitch
                 * Further note that the current version of OpenAL Soft (1.20.0 at the time of writing) does not like OpenTK
                 * The version in use is 1.17.0 found here: https://openal-soft.org/openal-binaries/
                 */
                systemMaxSounds = 16;
            }
            try
            {
                OpenAlMic = new AudioCapture(AudioCapture.DefaultDevice, SamplingRate, ALFormat.Mono16, BufferSize);
            }
            catch
            {
            }

            if (OpenAlDevice != IntPtr.Zero)
            {
                OpenAlContext = Alc.CreateContext(OpenAlDevice, (int[])null);
                if (OpenAlContext != ContextHandle.Zero)
                {
                    Alc.MakeContextCurrent(OpenAlContext);
                    try
                    {
                        AL.SpeedOfSound(343.0f);
                    }
                    catch
                    {
                        MessageBox.Show(Translations.GetInterfaceString("errors_sound_openal_version"), Translations.GetInterfaceString("program_title"), MessageBoxButtons.OK, MessageBoxIcon.Hand);
                    }
                    AL.DistanceModel(ALDistanceModel.None);
                    return;
                }
                Alc.CloseDevice(OpenAlDevice);
                OpenAlDevice = IntPtr.Zero;
                OpenAlMic.Dispose();
                OpenAlMic = null;
                MessageBox.Show(Translations.GetInterfaceString("errors_sound_openal_context"), Translations.GetInterfaceString("program_title"), MessageBoxButtons.OK, MessageBoxIcon.Hand);
                return;
            }
            OpenAlContext = ContextHandle.Zero;
            MessageBox.Show(Translations.GetInterfaceString("errors_sound_openal_device"), Translations.GetInterfaceString("program_title"), MessageBoxButtons.OK, MessageBoxIcon.Hand);
        }
コード例 #4
0
ファイル: Sounds.cs プロジェクト: tsdworks/RAGLINK
        // --- initialization and deinitialization ---

        /// <summary>Initializes audio. A call to Deinitialize must be made when terminating the program.</summary>
        /// <returns>Whether initializing audio was successful.</returns>
        internal static void Initialize()
        {
            Deinitialize();
            switch (Interface.CurrentOptions.SoundRange)
            {
            case Interface.SoundRange.Low:
                OuterRadiusFactorMinimum      = 2.0;
                OuterRadiusFactorMaximum      = 8.0;
                OuterRadiusFactorMaximumSpeed = 1.0;
                break;

            case Interface.SoundRange.Medium:
                OuterRadiusFactorMinimum      = 4.0;
                OuterRadiusFactorMaximum      = 16.0;
                OuterRadiusFactorMaximumSpeed = 2.0;
                break;

            case Interface.SoundRange.High:
                OuterRadiusFactorMinimum      = 6.0;
                OuterRadiusFactorMaximum      = 24.0;
                OuterRadiusFactorMaximumSpeed = 3.0;
                break;
            }
            OuterRadiusFactor      = Math.Sqrt(OuterRadiusFactorMinimum * OuterRadiusFactorMaximum);
            OuterRadiusFactorSpeed = 0.0;
            OpenAlDevice           = Alc.OpenDevice(null);
            try
            {
                OpenAlMic = new AudioCapture(AudioCapture.DefaultDevice, SamplingRate, ALFormat.Mono16, BufferSize);
            }
            catch
            {
            }

            if (OpenAlDevice != IntPtr.Zero)
            {
                OpenAlContext = Alc.CreateContext(OpenAlDevice, (int[])null);
                if (OpenAlContext != ContextHandle.Zero)
                {
                    Alc.MakeContextCurrent(OpenAlContext);
                    try
                    {
                        AL.SpeedOfSound(343.0f);
                    }
                    catch
                    {
                        MessageBox.Show(Translations.GetInterfaceString("errors_sound_openal_version"), Translations.GetInterfaceString("program_title"), MessageBoxButtons.OK, MessageBoxIcon.Hand);
                    }
                    AL.DistanceModel(ALDistanceModel.None);
                    return;
                }
                Alc.CloseDevice(OpenAlDevice);
                OpenAlDevice = IntPtr.Zero;
                OpenAlMic.Dispose();
                OpenAlMic = null;
                MessageBox.Show(Translations.GetInterfaceString("errors_sound_openal_context"), Translations.GetInterfaceString("program_title"), MessageBoxButtons.OK, MessageBoxIcon.Hand);
                return;
            }
            OpenAlContext = ContextHandle.Zero;
            MessageBox.Show(Translations.GetInterfaceString("errors_sound_openal_device"), Translations.GetInterfaceString("program_title"), MessageBoxButtons.OK, MessageBoxIcon.Hand);
        }
コード例 #5
0
ファイル: Capture.cs プロジェクト: feliwir/SharpAudio
 public void CreateMediaFoundation()
 {
     TestCapture(AudioCapture.CreateDefault());
 }
コード例 #6
0
ファイル: OpenALRecorder.cs プロジェクト: Nirklav/TCPChat
        private void Initialize(string deviceName, AudioQuality quality)
        {
            try
              {
            this.quality = quality;
            this.samplesSize = DefaultBufferSize;

            ALFormat format;

            if (quality.Channels == 1)
              format = quality.Bits == 8 ? ALFormat.Mono8 : ALFormat.Mono16;
            else
              format = quality.Bits == 8 ? ALFormat.Stereo8 : ALFormat.Stereo16;

            lock (syncObj)
            {
              buffer = new byte[quality.Channels * (quality.Bits / 8) * samplesSize * 2];

              if (string.IsNullOrEmpty(deviceName))
            deviceName = AudioCapture.DefaultDevice;

              if (!AudioCapture.AvailableDevices.Contains(deviceName))
            deviceName = AudioCapture.DefaultDevice;

              capture = new AudioCapture(deviceName, quality.Frequency, format, samplesSize);
            }
              }
              catch (Exception e)
              {
            if (capture != null)
              capture.Dispose();

            capture = null;

            ClientModel.Logger.Write(e);
            throw new ModelException(ErrorCode.AudioNotEnabled, "Audio recorder do not initialized.", e, deviceName);
              }
        }
コード例 #7
0
ファイル: Program.cs プロジェクト: areilly711/psi
        /// <summary>
        /// Builds and runs a speech recognition pipeline using the Bing speech recognizer. Requires a valid Cognitive Services
        /// subscription key. See https://docs.microsoft.com/en-us/azure/cognitive-services/cognitive-services-apis-create-account.
        /// </summary>
        /// <remarks>
        /// If you are getting a <see cref="System.InvalidOperationException"/> with the message 'BingSpeechRecognizer returned
        /// OnConversationError with error code: LoginFailed. Original error text: Transport error', this most likely is due to
        /// an invalid subscription key. Please check your Azure portal at https://portal.azure.com and ensure that you have
        /// added a subscription to the Bing Speech API on your account.
        /// </remarks>
        /// <param name="outputLogPath">The path under which to write log data.</param>
        /// <param name="inputLogPath">The path from which to read audio input data.</param>
        public static void RunBingSpeech(string outputLogPath = null, string inputLogPath = null)
        {
            // Create the pipeline object.
            using (Pipeline pipeline = Pipeline.Create())
            {
                // Use either live audio from the microphone or audio from a previously saved log
                IProducer <AudioBuffer> audioInput = null;
                if (inputLogPath != null)
                {
                    // Open the MicrophoneAudio stream from the last saved log
                    var store = Store.Open(pipeline, Program.AppName, inputLogPath);
                    audioInput = store.OpenStream <AudioBuffer>($"{Program.AppName}.MicrophoneAudio");
                }
                else
                {
                    // Create the AudioCapture component to capture audio from the default device in 16 kHz 1-channel
                    // PCM format as required by both the voice activity detector and speech recognition components.
                    audioInput = new AudioCapture(pipeline, new AudioCaptureConfiguration()
                    {
                        OutputFormat = WaveFormat.Create16kHz1Channel16BitPcm()
                    });
                }

                // Perform voice activity detection using the voice activity detector component
                var vad = new SystemVoiceActivityDetector(pipeline);
                audioInput.PipeTo(vad);

                // Create Bing speech recognizer component
                var recognizer = new BingSpeechRecognizer(pipeline, new BingSpeechRecognizerConfiguration()
                {
                    SubscriptionKey = Program.bingSubscriptionKey, RecognitionMode = SpeechRecognitionMode.Interactive
                });

                // The input audio to the Bing speech recognizer needs to be annotated with a voice activity flag.
                // This can be constructed by using the Psi Join() operator to combine the audio and VAD streams.
                var annotatedAudio = audioInput.Join(vad);

                // Subscribe the recognizer to the annotated audio
                annotatedAudio.PipeTo(recognizer);

                // Partial and final speech recognition results are posted on the same stream. Here
                // we use Psi's Where() operator to filter out only the final recognition results.
                var finalResults = recognizer.Out.Where(result => result.IsFinal);

                // Print the recognized text of the final recognition result to the console.
                finalResults.Do(result => Console.WriteLine(result.Text));

                // Create a data store to log the data to if necessary. A data store is necessary
                // only if output logging is enabled.
                var dataStore = CreateDataStore(pipeline, outputLogPath);

                // For disk logging only
                if (dataStore != null)
                {
                    // Log the microphone audio and recognition results
                    audioInput.Write($"{Program.AppName}.MicrophoneAudio", dataStore);
                    finalResults.Write($"{Program.AppName}.FinalRecognitionResults", dataStore);
                    vad.Write($"{Program.AppName}.VoiceActivity", dataStore);
                }

                // Register an event handler to catch pipeline errors
                pipeline.PipelineCompletionEvent += PipelineCompletionEvent;

                // Run the pipeline
                pipeline.RunAsync();

                // Bing speech transcribes speech to text
                Console.WriteLine("Say anything");

                Console.WriteLine("Press any key to exit...");
                Console.ReadKey(true);
            }
        }
コード例 #8
0
        public RecorderDiagnostic()
        {
            Trace.WriteLine("--- AudioCapture related errors ---");
            IsDeviceAvailable = false;

            try
            {
                r = new AudioCapture(AudioCapture.DefaultDevice, 16000, ALFormat.Mono16, 4096);
            }
            catch (AudioDeviceException ade)
            {
                Trace.WriteLine("AudioCapture Exception caught: " + ade.Message);
                return;
            }
            IsDeviceAvailable = true;
            DeviceName = r.CurrentDevice;
            CheckRecorderError("Alc.CaptureOpenDevice");

            r.Start();
            CheckRecorderError("Alc.CaptureStart");
            Thread.Sleep(100);
            r.Stop();
            CheckRecorderError("Alc.CaptureStop");

            byte[] Buffer = new byte[8192];

            Thread.Sleep(10);  // Wait for a few samples to become available.
            int SamplesBefore = r.AvailableSamples;

            CheckRecorderError("Alc.GetInteger(...CaptureSamples...)");
            r.ReadSamples(Buffer, (SamplesBefore > 4096 ? 4096 : SamplesBefore));
            CheckRecorderError("Alc.CaptureSamples");

            int SamplesCaptured = SamplesBefore - r.AvailableSamples;

            uint ZeroCounter = 0;
            for (int i = 0; i < SamplesCaptured * 2; i++)
            {
                if (Buffer[i] == 0)
                    ZeroCounter++;
            }

            for (int i = 0; i < SamplesCaptured; i++)
            {
                short sample = BitConverter.ToInt16(Buffer, i * 2);
                if (sample > MaxSample)
                    MaxSample = sample;
                if (sample < MinSample)
                    MinSample = sample;
            }

            if (ZeroCounter < SamplesCaptured * 2 && SamplesCaptured > 0)
                BufferContentsAllZero = false;
            else
                BufferContentsAllZero = true;

            r.Dispose();
            CheckRecorderError("Alc.CaptureCloseDevice");

            // no playback test needed due to Parrot test app.
            /*
            uint buf;
            AL.GenBuffer(out buf);
            AL.BufferData(buf, ALFormat.Mono16, BufferPtr, SamplesCaptured * 2, 16000);
            uint src;
            AL.GenSource(out src);
            AL.BindBufferToSource(src, buf);
            AL.Listener(ALListenerf.Gain, 16.0f);
            AL.SourcePlay(src);
            while (AL.GetSourceState(src) == ALSourceState.Playing)
            {
                Thread.Sleep(0);
            }
            AL.SourceStop(src);

            AL.DeleteSource(ref src);
            AL.DeleteBuffer(ref buf);
            */
        }
コード例 #9
0
        static void Main(string[] args)
        {
            MessageFactory = new MessageFactory(true);

            CallSocket = new EasySocket(() => new Socket(AddressFamily.InterNetwork, SocketType.Dgram, ProtocolType.Udp));
            ApiSocket  = new EasySocket(() => new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp));
            ApiSocket.PreparePacket += ApiReceived;

            AudioController = new AudioController(new AudioFormat(), CallSocket, format => new PcmCodec(format));
            AudioController.Registration(format => new AudioPlayback(format));
            AudioController.Registration(format => AudioCapture = new AudioCapture(format));

            CallController = new CallController(AudioController);

            var commandParser = new CommandParser('!')
            {
                new CommandBuilder <ConnectCommand>("connect")
                .Parameter("ip", (cmd, value) => cmd.Address = IPAddress.Parse(value))
                .Parameter("p", (cmd, value) => cmd.Port     = ushort.Parse(value))
                .Build(ConnectionHandle),

                new CommandBuilder <DisconnectCommand>("disconnect")
                .Build(DisconnectHandle),

                new CommandBuilder <LoginCommand>("login")
                .Parameter("u", (cmd, value) => cmd.User = value)
                .Build(LoginHandle),

                new CommandBuilder <LogoutCommand>("logout")
                .Build(LogoutHandle),

                new CommandBuilder <MessageCommand>("send")
                .Parameter("u", (cmd, value) => cmd.Target  = value)
                .Parameter("m", (cmd, value) => cmd.Message = value)
                .Build(MessageHandle),

                new CommandBuilder <CallCommand>("call")
                .Parameter("u", (cmd, value) => cmd.Target = value)
                .Build(CallHandle),

                new CommandBuilder <CallInviteCommand>("invite")
                .Parameter("s", (cmd, value) => cmd.SessionId = int.Parse(value))
                .Build(CallInviteHandle),

                new CommandBuilder <HangUpCommand>("hangup")
                .Parameter("s", (cmd, value) => cmd.SessionId = int.Parse(value))
                .Build(CallHangUpHandle),

                new CommandBuilder <MuteCommand>("mute")
                .Parameter("r", (cmd, value) => cmd.RouteId = int.Parse(value))
                .Build(MuteHandle),

                new CommandBuilder <UnmuteCommand>("unmute")
                .Parameter("r", (cmd, value) => cmd.RouteId = int.Parse(value))
                .Build(UnMuteHandle),
            };

            while (true)
            {
                var line = Console.ReadLine();

                _ = commandParser
                    .HandleAsync(line)
                    .ContinueWith(s =>
                {
                    if (!s.IsFaulted)
                    {
                        return;
                    }

                    Console.WriteLine($"Command is failed: {s.Exception.InnerException}");
                });
            }
        }
コード例 #10
0
        public async Task TranscribeConversationsAsync(IEnumerable <string> voiceSignatureStringUsers)
        {
            uint samplesPerSecond = 16000;
            byte bitsPerSample    = 16;
            byte channels         = 8; // 7 + 1 channels

            var config = SpeechConfig.FromSubscription(this.SubscriptionKey, this.Region);

            config.SetProperty("ConversationTranscriptionInRoomAndOnline", "true");
            var stopRecognition = new TaskCompletionSource <int>();

            using (var audioInput = AudioInputStream.CreatePushStream(AudioStreamFormat.GetWaveFormatPCM(samplesPerSecond, bitsPerSample, channels)))
            {
                var meetingID = Guid.NewGuid().ToString();
                using (var conversation = await Conversation.CreateConversationAsync(config, meetingID))
                {
                    // create a conversation transcriber using audio stream input
                    using (this.conversationTranscriber = new ConversationTranscriber(AudioConfig.FromStreamInput(audioInput)))
                    {
                        conversationTranscriber.Transcribing += (s, e) =>
                        {
                            this.SetText($"TRANSCRIBING: Text={e.Result.Text} SpeakerId={e.Result.UserId}");
                        };

                        conversationTranscriber.Transcribed += (s, e) =>
                        {
                            if (e.Result.Reason == ResultReason.RecognizedSpeech)
                            {
                                this.SetText($"TRANSCRIBED: Text={e.Result.Text} SpeakerId={e.Result.UserId}");
                            }
                            else if (e.Result.Reason == ResultReason.NoMatch)
                            {
                                this.SetText($"NOMATCH: Speech could not be recognized.");
                            }
                        };

                        conversationTranscriber.Canceled += (s, e) =>
                        {
                            this.SetText($"CANCELED: Reason={e.Reason}");

                            if (e.Reason == CancellationReason.Error)
                            {
                                this.SetText($"CANCELED: ErrorCode={e.ErrorCode}");
                                this.SetText($"CANCELED: ErrorDetails={e.ErrorDetails}");
                                this.SetText($"CANCELED: Did you update the subscription info?");
                                stopRecognition.TrySetResult(0);
                            }
                        };

                        conversationTranscriber.SessionStarted += (s, e) =>
                        {
                            this.SetText($"\nSession started event. SessionId={e.SessionId}");
                        };

                        conversationTranscriber.SessionStopped += (s, e) =>
                        {
                            this.SetText($"\nSession stopped event. SessionId={e.SessionId}");
                            this.SetText("\nStop recognition.");
                            stopRecognition.TrySetResult(0);
                        };

                        // Add participants to the conversation.
                        int i = 1;
                        foreach (var voiceSignatureStringUser in voiceSignatureStringUsers)
                        {
                            var speaker = Participant.From($"User{i++}", "en-US", voiceSignatureStringUser);
                            await conversation.AddParticipantAsync(speaker);
                        }

                        // Join to the conversation and start transcribing
                        await conversationTranscriber.JoinConversationAsync(conversation);

                        await conversationTranscriber.StartTranscribingAsync().ConfigureAwait(false);

                        using (var p = Pipeline.Create())
                        {
                            var store   = PsiStore.Create(p, "Transcribe", @"D:\Temp");
                            var capture = new AudioCapture(p, WaveFormat.CreatePcm((int)samplesPerSecond, bitsPerSample, channels)).Write("Audio", store);
                            capture.Do(audio => audioInput.Write(audio.Data));
                            p.RunAsync();

                            // waits for completion, then stop transcription
                            await stopRecognition.Task;
                        }

                        await conversationTranscriber.StopTranscribingAsync().ConfigureAwait(false);
                    }
                }
            }
        }
コード例 #11
0
        public void Start()
        {
            if (null != _audioContext)
                return;

            _audioContext = new AudioContext();
            AL.Listener(ALListenerf.Gain, 1.0f);
            _audioSource  = AL.GenSource();
            _audioCapture = new AudioCapture(String.Empty, _samplingRate, OpenTK.Audio.OpenAL.ALFormat.Mono16, _readBuffer.Length);

            _audioCapture.Start();
        }
コード例 #12
0
        public void Stop()
        {
            if (null == _audioContext)
                return;

            if (null != _audioCapture)
            {
                _audioCapture.Stop();
                _audioCapture.Dispose();
                _audioCapture = null;
            }

            if (null != _audioContext)
            {
                int r;
                AL.GetSource(_audioSource, ALGetSourcei.BuffersQueued, out r);
                clearBuffer(r);

                AL.DeleteSource(_audioSource);

                _audioContext.Dispose();
                _audioContext = null;
            }
        }
コード例 #13
0
        /// <summary>
        /// Check for the OpenAL Error.
        /// </summary>
        public static void CheckError(AudioCapture device)
        {
            AlcError errorCode = GetError(device);
            int      frame     = Logger.Instance.StackFrame;

            if (errorCode == AlcError.NoError)
            {
                if (VerboseLevel == VerboseFlags.All)
                {
                    Logger.Instance.StackFrame = @checked ? 2 : 3;
                    Logger.Instance.Log("NoError: AL Operation Success", Logger.Level.Information);
                    Logger.Instance.StackFrame = frame;
                }

                @checked = true;
                return;
            }

            string error       = "Unknown Error.";
            string description = "No Description available.";

            // Decode the error code
            switch (errorCode)
            {
            case AlcError.InvalidDevice:
            {
                error       = "AL_INVALID_DEVICE";
                description = "A bad device name has been specified.";
                break;
            }

            case AlcError.InvalidEnum:
            {
                error       = "AL_INVALID_ENUM";
                description = "An unacceptable value has been specified for an enumerated argument.";
                break;
            }

            case AlcError.InvalidValue:
            {
                error       = "AL_INVALID_VALUE";
                description = "A numeric argument is out of range.";
                break;
            }

            case AlcError.InvalidContext:
            {
                error       = "AL_INVALID_CONTEXT";
                description = "The specified operation is not allowed in the current state of audio context of this thread.";
                break;
            }

            case AlcError.OutOfMemory:
            {
                error       = "AL_OUT_OF_MEMORY";
                description = "There is not enough memory left to execute the command.";
                break;
            }

            default:
            {
                error = errorCode.ToString();
                break;
            }
            }

            Logger.Instance.StackFrame = @checked ? 2 : 3;
            Logger.Instance.Log(error + ": " + description, Logger.Level.Error);
            Logger.Instance.StackFrame = frame;

            @checked = true;
        }
コード例 #14
0
ファイル: Program.cs プロジェクト: aiv01/aiv-audio
        static void Main(string[] args)
        {
            foreach (string device in AudioDevice.Devices)
            {
                Console.WriteLine(device);
            }

            foreach (string device in AudioDevice.CaptureDevices)
            {
                Console.WriteLine(device);
            }

            AudioDevice playerEar = new AudioDevice();


            Console.WriteLine(AudioDevice.CurrentDevice.Name);

            AudioClip clip = new AudioClip("Assets/jumping.ogg");

            AudioClip laser = new AudioClip("Assets/laser.wav");

            AudioClip backgroundMusic = new AudioClip("Assets/test_wikipedia_mono.ogg");

            Console.WriteLine("--- Assets/jumping.ogg ---");
            Console.WriteLine(clip.Channels);
            Console.WriteLine(clip.Frequency);
            Console.WriteLine(clip.Samples);
            Console.WriteLine(clip.Duration);
            Console.WriteLine(clip.BitsPerSample);

            Console.WriteLine("--- Assets/laser.wav ---");
            Console.WriteLine(laser.Channels);
            Console.WriteLine(laser.Frequency);
            Console.WriteLine(laser.Samples);
            Console.WriteLine(laser.Duration);
            Console.WriteLine(laser.BitsPerSample);

            AudioSource source = new AudioSource();

            source.Play(clip);

            AudioCapture microphone = new AudioCapture(22050, 1, 5f);
            AudioBuffer  micBuffer  = new AudioBuffer();

            microphone.Start();

            AudioSource background = new AudioSource();


            Window window = new Window(1024, 576, "Aiv.Audio Example");

            background.Position          = new OpenTK.Vector3(window.Width / 2, window.Height / 2, 0);
            background.ReferenceDistance = 50;
            background.MaxDistance       = 100;
            background.RolloffFactor     = 1f;

            Sprite sprite = new Sprite(100, 100);

            while (window.opened)
            {
                background.Stream(backgroundMusic, window.deltaTime);

                if (window.GetKey(KeyCode.Space))
                {
                    source.Play(clip);
                }

                if (window.GetKey(KeyCode.Return))
                {
                    source.Play(laser);
                }

                if (window.GetKey(KeyCode.ShiftRight))
                {
                    microphone.Read(micBuffer);
                    source.Play(micBuffer);
                }

                if (window.GetKey(KeyCode.Right))
                {
                    sprite.position.X += 100 * window.deltaTime;
                }

                if (window.GetKey(KeyCode.Left))
                {
                    sprite.position.X -= 100 * window.deltaTime;
                }

                if (window.GetKey(KeyCode.Up))
                {
                    sprite.position.Y -= 100 * window.deltaTime;
                }

                if (window.GetKey(KeyCode.Down))
                {
                    sprite.position.Y += 100 * window.deltaTime;
                }

                playerEar.Position = new OpenTK.Vector3(sprite.position.X, sprite.position.Y, 0);
                source.Position    = playerEar.Position;

                sprite.DrawSolidColor(1f, 0, 0);

                window.Update();
            }
        }
コード例 #15
0
ファイル: Parrot.cs プロジェクト: jpespartero/OpenGlobe
        void StartRecording()
        {
            try
            {
                audio_context = new AudioContext();
            }
            catch (AudioException ae)
            {
                MessageBox.Show("Fatal: Cannot continue without a playback device.\nException caught when opening playback device.\n" + ae.Message);
                Application.Exit();
            }

            AL.Listener(ALListenerf.Gain, (float)numericUpDown_PlaybackGain.Value);
            src = AL.GenSource();

            int sampling_rate = (int)numericUpDown_Frequency.Value;
            double buffer_length_ms = (double)numericUpDown_BufferLength.Value;
            int buffer_length_samples = (int)((double)numericUpDown_BufferLength.Value * sampling_rate * 0.001 / BlittableValueType.StrideOf(buffer));

            try
            {
                audio_capture = new AudioCapture((string)comboBox_RecorderSelection.SelectedItem,
                    sampling_rate, ALFormat.Mono16, buffer_length_samples);
            }
            catch (AudioDeviceException ade)
            {
                MessageBox.Show("Exception caught when opening recording device.\n" + ade.Message);
                audio_capture = null;
            }

            if (audio_capture == null)
                return;

            audio_capture.Start();

            timer_GetSamples.Start();
            timer_GetSamples.Interval = (int)(buffer_length_ms / 2 + 0.5);   // Tick when half the buffer is full.
        }
コード例 #16
0
        public void Update(LayerModel layerModel, ModuleDataModel dataModel, bool isPreview = false)
        {
            layerModel.ApplyProperties(true);
            var newProperties = (AudioPropertiesModel)layerModel.Properties;

            if (_properties == null)
            {
                _properties = newProperties;
            }

            SubscribeToAudioChange();

            if (_audioCapture == null || newProperties.Device != _properties.Device ||
                newProperties.DeviceType != _properties.DeviceType)
            {
                var device = GetMmDevice();
                if (device != null)
                {
                    _audioCapture = _audioCaptureManager.GetAudioCapture(device, newProperties.DeviceType);
                }
            }

            _properties = newProperties;

            if (_audioCapture == null)
            {
                return;
            }

            _audioCapture.Pulse();

            var direction = ((AudioPropertiesModel)layerModel.Properties).Direction;

            int    currentLines;
            double currentHeight;

            if (direction == Direction.BottomToTop || direction == Direction.TopToBottom)
            {
                currentLines  = (int)layerModel.Width;
                currentHeight = layerModel.Height;
            }
            else
            {
                currentLines  = (int)layerModel.Height;
                currentHeight = layerModel.Width;
            }

            if (_lines != currentLines || _lineSpectrum == null)
            {
                _lines        = currentLines;
                _lineSpectrum = _audioCapture.GetLineSpectrum(_lines, ScalingStrategy.Decibel);
                if (_lineSpectrum == null)
                {
                    return;
                }
            }

            var newLineValues = _lineSpectrum?.GetLineValues(currentHeight);

            if (newLineValues != null)
            {
                _lineValues = newLineValues;
            }
        }
コード例 #17
0
ファイル: Parrot.cs プロジェクト: jpespartero/OpenGlobe
        void StopRecording()
        {
            timer_GetSamples.Stop();

            if (audio_capture != null)
            {
                audio_capture.Stop();
                audio_capture.Dispose();
                audio_capture = null;
            }

            if (audio_context != null)
            {
                int r;
                AL.GetSource(src, ALGetSourcei.BuffersQueued, out r);
                ClearBuffers(r);

                AL.DeleteSource(src);

                audio_context.Dispose();
                audio_context = null;
            }
        }
コード例 #18
0
        public void Update(LayerModel layerModel, ModuleDataModel dataModel, bool isPreview = false)
        {
            layerModel.ApplyProperties(true);
            var newProperties = (AudioPropertiesModel)layerModel.Properties;

            if (_properties == null)
            {
                _properties = newProperties;
            }

            SubscribeToAudioChange();

            if (_audioCapture == null || newProperties.Device != _properties.Device || newProperties.DeviceType != _properties.DeviceType)
            {
                var device = GetMmDevice();
                if (device != null)
                {
                    _audioCapture = _audioCaptureManager.GetAudioCapture(device, newProperties.DeviceType);
                }
            }

            _properties = newProperties;

            if (_audioCapture == null)
            {
                return;
            }

            _audioCapture.Pulse();

            var direction = ((AudioPropertiesModel)layerModel.Properties).Direction;

            int    currentLines;
            double currentHeight;

            if (direction == Direction.BottomToTop || direction == Direction.TopToBottom)
            {
                currentLines  = (int)layerModel.Width;
                currentHeight = layerModel.Height;
            }
            else
            {
                currentLines  = (int)layerModel.Height;
                currentHeight = layerModel.Width;
            }

            // Get a new line spectrum if the lines changed, it is null or the layer hasn't rendered for a few frames
            if (_lines != currentLines || _lineSpectrum == null || DateTime.Now - _lastRender > TimeSpan.FromMilliseconds(100))
            {
                _lines        = currentLines;
                _lineSpectrum = _audioCapture.GetLineSpectrum(_lines, ScalingStrategy.Decibel);
            }

            var newLineValues = _audioCapture.GetLineSpectrum(_lines, ScalingStrategy.Decibel)?.GetLineValues(currentHeight);

            if (newLineValues != null)
            {
                _lineValues = newLineValues;
                _lastRender = DateTime.Now;
            }
        }
コード例 #19
0
ファイル: VideoCaptureDebug.cs プロジェクト: savik-games/Cave
    void Update()
    {
        if (Input.GetKeyDown(startVideoKey) && (videoCaptureCtrl.status == VideoCaptureCtrlBase.StatusType.NOT_START || videoCaptureCtrl.status == VideoCaptureCtrlBase.StatusType.FINISH))
        {
            if (vc == null)
            {
                vc                  = TemplateGameManager.Instance.Camera.gameObject.AddComponent <VideoCapture>();
                vc.customPath       = false;
                vc.customPathFolder = "";

                vc.isDedicated = false;

                vc.frameSize        = frameSize;
                vc.offlineRender    = isOfflineRenderer;
                vc.encodeQuality    = encodeQuality;
                vc._antiAliasing    = antiAliasing;
                vc._targetFramerate = targetFramerate;

                videoCaptureCtrl.videoCaptures[0] = vc;
            }
            if (ac == null)
            {
                ac = TemplateGameManager.Instance.Camera.gameObject.AddComponent <AudioCapture>();

                videoCaptureCtrl.audioCapture = ac;
            }

            LeanTween.cancel(gameObject, false);
            recordingText.alpha = 1;
            recordingImg.color  = recordingImg.color.SetA(1);
            recordingImg.gameObject.SetActive(true);
            recordingText.gameObject.SetActive(true);
            recordingText.text = "Recoring";
            isProcessFinish    = false;
            videoCaptureCtrl.StartCapture();
        }
        else if (Input.GetKeyDown(pauseVideoKey) && videoCaptureCtrl != null && (videoCaptureCtrl.status == VideoCaptureCtrlBase.StatusType.STARTED || videoCaptureCtrl.status == VideoCaptureCtrlBase.StatusType.PAUSED))
        {
            videoCaptureCtrl.ToggleCapture();

            if (videoCaptureCtrl.status == VideoCaptureCtrlBase.StatusType.PAUSED)
            {
                recordingImg.gameObject.SetActive(false);
                recordingText.gameObject.SetActive(true);
                recordingText.text = "Paused";
            }
            else
            {
                recordingImg.gameObject.SetActive(true);
                recordingText.gameObject.SetActive(true);
                recordingText.text = "Recoring";
            }
        }
        else if (Input.GetKeyDown(stopVideoKey) && videoCaptureCtrl != null && (videoCaptureCtrl.status == VideoCaptureCtrlBase.StatusType.STARTED || videoCaptureCtrl.status == VideoCaptureCtrlBase.StatusType.PAUSED))
        {
            recordingImg.gameObject.SetActive(false);
            recordingText.gameObject.SetActive(true);
            recordingText.text = "Stopped. (Save in progress)";

            videoCaptureCtrl.StopCapture();
        }
        else if (!isProcessFinish && videoCaptureCtrl != null && videoCaptureCtrl.status == VideoCaptureCtrlBase.StatusType.FINISH)
        {
            isProcessFinish = true;

            recordingImg.gameObject.SetActive(false);
            recordingText.gameObject.SetActive(true);
            recordingText.text = $"Completed. (Press {openVideoFolderKey} to open)";

            LeanTween.delayedCall(gameObject, 2.0f, () => {
                LeanTween.value(gameObject, recordingText.alpha, 0.0f, 3.0f)
                .setOnUpdate((float a) => {
                    recordingText.alpha = a;
                    recordingImg.color  = recordingImg.color.SetA(a);
                })
                .setOnComplete(() => {
                    recordingImg.gameObject.SetActive(false);
                    recordingText.gameObject.SetActive(false);
                });
            });

            Debug.Log($"End saving video. {savePath}");
        }
        else if (Input.GetKeyDown(openVideoFolderKey))
        {
            string dir = PathConfig.SaveFolder;

            var file = Directory.EnumerateFiles(dir).FirstOrDefault();
            if (!string.IsNullOrEmpty(file))
            {
                ShowExplorer(Path.Combine(dir, file));
            }
            else
            {
                ShowExplorer(dir);
            }
        }
    }
コード例 #20
0
ファイル: Program.cs プロジェクト: areilly711/psi
        /// <summary>
        /// Builds and runs a speech recognition pipeline using the .NET System.Speech recognizer and a set of fixed grammars.
        /// </summary>
        /// <param name="outputLogPath">The path under which to write log data.</param>
        /// <param name="inputLogPath">The path from which to read audio input data.</param>
        public static void RunSystemSpeech(string outputLogPath = null, string inputLogPath = null)
        {
            // Create the pipeline object.
            using (Pipeline pipeline = Pipeline.Create())
            {
                // Use either live audio from the microphone or audio from a previously saved log
                IProducer <AudioBuffer> audioInput = null;
                if (inputLogPath != null)
                {
                    // Open the MicrophoneAudio stream from the last saved log
                    var store = Store.Open(pipeline, Program.AppName, inputLogPath);
                    audioInput = store.OpenStream <AudioBuffer>($"{Program.AppName}.MicrophoneAudio");
                }
                else
                {
                    // Create the AudioCapture component to capture audio from the default device in 16 kHz 1-channel
                    // PCM format as required by both the voice activity detector and speech recognition components.
                    audioInput = new AudioCapture(pipeline, new AudioCaptureConfiguration()
                    {
                        OutputFormat = WaveFormat.Create16kHz1Channel16BitPcm()
                    });
                }

                // Create System.Speech recognizer component
                var recognizer = new SystemSpeechRecognizer(
                    pipeline,
                    new SystemSpeechRecognizerConfiguration()
                {
                    Language = "en-US",
                    Grammars = new GrammarInfo[]
                    {
                        new GrammarInfo()
                        {
                            Name = Program.AppName, FileName = "SampleGrammar.grxml"
                        }
                    }
                });

                // Subscribe the recognizer to the input audio
                audioInput.PipeTo(recognizer);

                // Partial and final speech recognition results are posted on the same stream. Here
                // we use Psi's Where() operator to filter out only the final recognition results.
                var finalResults = recognizer.Out.Where(result => result.IsFinal);

                // Print the final recognition result to the console.
                finalResults.Do(result =>
                {
                    Console.WriteLine($"{result.Text} (confidence: {result.Confidence})");
                });

                // Create a data store to log the data to if necessary. A data store is necessary
                // only if output logging is enabled.
                var dataStore = CreateDataStore(pipeline, outputLogPath);

                // For disk logging only
                if (dataStore != null)
                {
                    // Log the microphone audio and recognition results
                    audioInput.Write($"{Program.AppName}.MicrophoneAudio", dataStore);
                    finalResults.Write($"{Program.AppName}.FinalRecognitionResults", dataStore);
                }

                // Register an event handler to catch pipeline errors
                pipeline.PipelineCompletionEvent += PipelineCompletionEvent;

                // Run the pipeline
                pipeline.RunAsync();

                // The file SampleGrammar.grxml defines a grammar to transcribe numbers
                Console.WriteLine("Say any number between 0 and 100");

                Console.WriteLine("Press any key to exit...");
                Console.ReadKey(true);
            }
        }
コード例 #21
0
ファイル: Sounds.cs プロジェクト: zbx1425/OpenBVE
        // --- initialization and deinitialization ---

        /// <summary>Initializes audio. A call to Deinitialize must be made when terminating the program.</summary>
        /// <returns>Whether initializing audio was successful.</returns>
        public void Initialize(HostInterface host, SoundRange range)
        {
            if (host.Platform == HostPlatform.MicrosoftWindows)
            {
                /*
                 *  If shipping an AnyCPU build and OpenALSoft / SDL, these are architecture specific PInvokes
                 *  Add the appropriate search path so this will work (common convention)
                 */
                string path = Path.GetDirectoryName(Assembly.GetEntryAssembly()?.Location);
                if (path != null)
                {
                    path = Path.Combine(path, IntPtr.Size == 4 ? "x86" : "x64");
                    bool ok = SetDllDirectory(path);
                    if (!ok)
                    {
                        throw new System.ComponentModel.Win32Exception();
                    }
                }
            }
            Deinitialize();

            CurrentHost = host;

            switch (range)
            {
            case SoundRange.Low:
                OuterRadiusFactorMinimum      = 2.0;
                OuterRadiusFactorMaximum      = 8.0;
                OuterRadiusFactorMaximumSpeed = 1.0;
                break;

            case SoundRange.Medium:
                OuterRadiusFactorMinimum      = 4.0;
                OuterRadiusFactorMaximum      = 16.0;
                OuterRadiusFactorMaximumSpeed = 2.0;
                break;

            case SoundRange.High:
                OuterRadiusFactorMinimum      = 6.0;
                OuterRadiusFactorMaximum      = 24.0;
                OuterRadiusFactorMaximumSpeed = 3.0;
                break;
            }
            OuterRadiusFactor      = Math.Sqrt(OuterRadiusFactorMinimum * OuterRadiusFactorMaximum);
            OuterRadiusFactorSpeed = 0.0;
            OpenAlDevice           = Alc.OpenDevice(null);
            string deviceName = Alc.GetString(OpenAlDevice, AlcGetString.DefaultDeviceSpecifier);

            if ((Environment.OSVersion.Platform == PlatformID.Win32S | Environment.OSVersion.Platform == PlatformID.Win32Windows | Environment.OSVersion.Platform == PlatformID.Win32NT) && deviceName == "Generic Software")
            {
                /*
                 * Creative OpenAL implementation on Windows seems to be limited to max 16 simulataneous sounds
                 * Now shipping OpenAL Soft, but detect this and don't glitch
                 * Further note that the current version of OpenAL Soft (1.20.0 at the time of writing) does not like OpenTK
                 * The version in use is 1.17.0 found here: https://openal-soft.org/openal-binaries/
                 */
                systemMaxSounds = 16;
            }
            try
            {
                OpenAlMic = new AudioCapture(AudioCapture.DefaultDevice, SamplingRate, ALFormat.Mono16, BufferSize);
            }
            catch
            {
                OpenAlMic = null;
            }

            if (OpenAlDevice != IntPtr.Zero)
            {
                OpenAlContext = Alc.CreateContext(OpenAlDevice, (int[])null);
                if (OpenAlContext != ContextHandle.Zero)
                {
                    Alc.MakeContextCurrent(OpenAlContext);
                    try
                    {
                        AL.SpeedOfSound(343.0f);
                    }
                    catch
                    {
                        MessageBox.Show(Translations.GetInterfaceString("errors_sound_openal_version"), Translations.GetInterfaceString("program_title"), MessageBoxButtons.OK, MessageBoxIcon.Hand);
                    }
                    AL.DistanceModel(ALDistanceModel.None);
                    return;
                }
                Alc.CloseDevice(OpenAlDevice);
                OpenAlDevice = IntPtr.Zero;
                if (OpenAlMic != null)
                {
                    OpenAlMic.Dispose();
                    OpenAlMic = null;
                }
                MessageBox.Show(Translations.GetInterfaceString("errors_sound_openal_context"), Translations.GetInterfaceString("program_title"), MessageBoxButtons.OK, MessageBoxIcon.Hand);
                return;
            }
            OpenAlContext = ContextHandle.Zero;
            MessageBox.Show(Translations.GetInterfaceString("errors_sound_openal_device"), Translations.GetInterfaceString("program_title"), MessageBoxButtons.OK, MessageBoxIcon.Hand);
        }
コード例 #22
0
ファイル: OpenALRecorder.cs プロジェクト: Nirklav/TCPChat
        public void Dispose()
        {
            if (disposed)
            return;

              disposed = true;
              recorded = null;

              lock (syncObj)
              {
            if (captureTimer != null)
              captureTimer.Dispose();

            captureTimer = null;

            if (capture != null)
            {
              capture.Stop();
              capture.Dispose();
            }

            capture = null;
              }
        }
コード例 #23
0
        public RecorderDiagnostic()
        {
            Trace.WriteLine("--- AudioCapture related errors ---");
            IsDeviceAvailable = false;

            try
            {
                r = new AudioCapture(AudioCapture.DefaultDevice, 16000, ALFormat.Mono16, 4096);
            }
            catch (AudioDeviceException ade)
            {
                Trace.WriteLine("AudioCapture Exception caught: " + ade.Message);
                return;
            }
            IsDeviceAvailable = true;
            DeviceName        = r.CurrentDevice;
            CheckRecorderError("Alc.CaptureOpenDevice");

            r.Start();
            CheckRecorderError("Alc.CaptureStart");
            Thread.Sleep(100);
            r.Stop();
            CheckRecorderError("Alc.CaptureStop");

            byte[] Buffer = new byte[8192];

            Thread.Sleep(10);  // Wait for a few samples to become available.
            int SamplesBefore = r.AvailableSamples;

            CheckRecorderError("Alc.GetInteger(...CaptureSamples...)");
            r.ReadSamples(Buffer, (SamplesBefore > 4096 ? 4096 : SamplesBefore));
            CheckRecorderError("Alc.CaptureSamples");

            int SamplesCaptured = SamplesBefore - r.AvailableSamples;

            uint ZeroCounter = 0;

            for (int i = 0; i < SamplesCaptured * 2; i++)
            {
                if (Buffer[i] == 0)
                {
                    ZeroCounter++;
                }
            }

            for (int i = 0; i < SamplesCaptured; i++)
            {
                short sample = BitConverter.ToInt16(Buffer, i * 2);
                if (sample > MaxSample)
                {
                    MaxSample = sample;
                }
                if (sample < MinSample)
                {
                    MinSample = sample;
                }
            }

            if (ZeroCounter < SamplesCaptured * 2 && SamplesCaptured > 0)
            {
                BufferContentsAllZero = false;
            }
            else
            {
                BufferContentsAllZero = true;
            }

            r.Dispose();
            CheckRecorderError("Alc.CaptureCloseDevice");

            // no playback test needed due to Parrot test app.

            /*
             * uint buf;
             * AL.GenBuffer(out buf);
             * AL.BufferData(buf, ALFormat.Mono16, BufferPtr, SamplesCaptured * 2, 16000);
             * uint src;
             * AL.GenSource(out src);
             * AL.BindBufferToSource(src, buf);
             * AL.Listener(ALListenerf.Gain, 16.0f);
             * AL.SourcePlay(src);
             * while (AL.GetSourceState(src) == ALSourceState.Playing)
             * {
             *  Thread.Sleep(0);
             * }
             * AL.SourceStop(src);
             *
             * AL.DeleteSource(ref src);
             * AL.DeleteBuffer(ref buf);
             */
        }
コード例 #24
0
ファイル: Capture.cs プロジェクト: feliwir/SharpAudio
 void TestCapture(AudioCapture capture)
 {
     //wait since Play is non blocking
     Thread.Sleep(1000);
 }
コード例 #25
0
ファイル: Musique.cs プロジェクト: ortue/UWPLedMatrix
        /// <summary>
        /// VuMeter
        /// </summary>
        /// <param name="criteria"></param>
        public static void VuMeter()
        {
            // Initialize the led strip
            Util.Setup();
            int task = Util.StartTask();

            Couleur couleur      = Couleur.Get(0, 0, 8);
            Random  ra           = new Random();
            bool    whiteBgColor = true;

            if (ra.Next(1, 3) == 1)
            {
                couleur      = Couleur.Get(63, 63, 127);
                whiteBgColor = false;
            }

            double        max        = 0;
            CaractereList caracteres = new CaractereList(Util.Context.Largeur);

            byte[] audioBuffer = new byte[256];
            using AudioCapture audioCapture = new AudioCapture(AudioCapture.AvailableDevices[1], 22000, ALFormat.Mono8, audioBuffer.Length);
            audioCapture.Start();

            while (Util.TaskWork(task))
            {
                max -= 0.5;

                double[] fft = Capture(audioCapture, audioBuffer);

                if (fft.Max(a => Math.Abs(a)) > max)
                {
                    max = fft.Max(a => Math.Abs(a));
                }

                if (whiteBgColor)
                {
                    foreach (Pixel pixel in Util.Context.Pixels)
                    {
                        pixel.Set(127, 127, 127);
                    }
                }

                caracteres.SetText("VU");
                Util.Context.Pixels.Print(caracteres.GetCaracteres(), 5, 12, couleur);

                Couleur couleurMax = couleur;

                //lumiere max
                if (max > 75)
                {
                    couleurMax = Couleur.Get(127, 0, 0);
                }

                Util.Context.Pixels.GetCoordonnee(17, 13).SetColor(couleurMax);
                Util.Context.Pixels.GetCoordonnee(18, 13).SetColor(couleurMax);
                Util.Context.Pixels.GetCoordonnee(17, 14).SetColor(couleurMax);
                Util.Context.Pixels.GetCoordonnee(18, 14).SetColor(couleurMax);

                //dessin
                Util.Context.Pixels.GetCoordonnee(1, 10).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(2, 10).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(3, 10).SetColor(couleur);

                Util.Context.Pixels.GetCoordonnee(4, 9).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(5, 9).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(6, 9).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(7, 9).SetColor(couleur);

                Util.Context.Pixels.GetCoordonnee(8, 8).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(9, 8).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(10, 8).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(11, 8).SetColor(couleur);

                Util.Context.Pixels.GetCoordonnee(12, 9).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(13, 9).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(14, 9).SetColor(Couleur.Get(127, 0, 0));
                Util.Context.Pixels.GetCoordonnee(15, 9).SetColor(Couleur.Get(127, 0, 0));

                Util.Context.Pixels.GetCoordonnee(16, 10).SetColor(Couleur.Get(127, 0, 0));
                Util.Context.Pixels.GetCoordonnee(17, 10).SetColor(Couleur.Get(127, 0, 0));
                Util.Context.Pixels.GetCoordonnee(18, 10).SetColor(Couleur.Get(127, 0, 0));

                //Moins
                Util.Context.Pixels.GetCoordonnee(1, 4).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(2, 4).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(3, 4).SetColor(couleur);

                Util.Context.Pixels.GetCoordonnee(2, 8).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(2, 9).SetColor(couleur);

                Util.Context.Pixels.GetCoordonnee(6, 7).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(6, 8).SetColor(couleur);

                Util.Context.Pixels.GetCoordonnee(9, 6).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(9, 7).SetColor(couleur);

                Util.Context.Pixels.GetCoordonnee(11, 6).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(11, 7).SetColor(couleur);

                Util.Context.Pixels.GetCoordonnee(13, 7).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(13, 8).SetColor(couleur);

                Util.Context.Pixels.GetCoordonnee(15, 7).SetColor(Couleur.Get(127, 0, 0));
                Util.Context.Pixels.GetCoordonnee(15, 8).SetColor(Couleur.Get(127, 0, 0));

                Util.Context.Pixels.GetCoordonnee(17, 8).SetColor(Couleur.Get(127, 0, 0));
                Util.Context.Pixels.GetCoordonnee(17, 9).SetColor(Couleur.Get(127, 0, 0));

                //Plus
                Util.Context.Pixels.GetCoordonnee(17, 3).SetColor(Couleur.Get(127, 0, 0));
                Util.Context.Pixels.GetCoordonnee(16, 4).SetColor(Couleur.Get(127, 0, 0));
                Util.Context.Pixels.GetCoordonnee(17, 4).SetColor(Couleur.Get(127, 0, 0));
                Util.Context.Pixels.GetCoordonnee(18, 4).SetColor(Couleur.Get(127, 0, 0));
                Util.Context.Pixels.GetCoordonnee(17, 5).SetColor(Couleur.Get(127, 0, 0));

                //base
                Util.Context.Pixels.GetCoordonnee(8, 18).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(9, 18).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(10, 18).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(11, 18).SetColor(couleur);

                Util.Context.Pixels.GetCoordonnee(7, 19).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(8, 19).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(9, 19).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(10, 19).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(11, 19).SetColor(couleur);
                Util.Context.Pixels.GetCoordonnee(12, 19).SetColor(couleur);

                //aiguille
                for (int r = 2; r < 18; r++)
                {
                    Util.Context.Pixels.GetCoordonnee(GetCercleCoord(max + 315, r)).SetColor(couleur);
                }

                Util.SetLeds();
                Util.Context.Pixels.Reset();
            }
        }
コード例 #26
0
        /// <summary>
        /// Builds and runs a speech recognition pipeline using the Azure speech recognizer. Requires a valid Cognitive Services
        /// subscription key. See https://docs.microsoft.com/en-us/azure/cognitive-services/cognitive-services-apis-create-account.
        /// </summary>
        /// <remarks>
        /// If you are getting a <see cref="System.InvalidOperationException"/> with the message 'AzureSpeechRecognizer returned
        /// OnConversationError with error code: LoginFailed. Original error text: Transport error', this most likely is due to
        /// an invalid subscription key. Please check your Azure portal at https://portal.azure.com and ensure that you have
        /// added a subscription to the Azure Speech API on your account.
        /// </remarks>
        public static void RunAzureSpeech()
        {
            // Get the Device Name to record audio from
            Console.Write("Enter Device Name (default: plughw:0,0)");
            string deviceName = Console.ReadLine();

            if (!string.IsNullOrWhiteSpace(deviceName))
            {
                deviceName = "plughw:0,0";
            }

            // Create the pipeline object.
            using (Pipeline pipeline = Pipeline.Create())
            {
                // Create the AudioSource component to capture audio from the default device in 16 kHz 1-channel
                // PCM format as required by both the voice activity detector and speech recognition components.
                IProducer <AudioBuffer> audioInput = new AudioCapture(pipeline, new AudioCaptureConfiguration()
                {
                    DeviceName = deviceName, Format = WaveFormat.Create16kHz1Channel16BitPcm()
                });

                // Perform voice activity detection using the voice activity detector component
                var vad = new SimpleVoiceActivityDetector(pipeline);
                audioInput.PipeTo(vad);

                // Create Azure speech recognizer component
                var recognizer = new AzureSpeechRecognizer(pipeline, new AzureSpeechRecognizerConfiguration()
                {
                    SubscriptionKey = Program.azureSubscriptionKey, Region = Program.azureRegion
                });

                // The input audio to the Azure speech recognizer needs to be annotated with a voice activity flag.
                // This can be constructed by using the Psi Join() operator to combine the audio and VAD streams.
                var annotatedAudio = audioInput.Join(vad);

                // Subscribe the recognizer to the annotated audio
                annotatedAudio.PipeTo(recognizer);

                // Partial and final speech recognition results are posted on the same stream. Here
                // we use Psi's Where() operator to filter out only the final recognition results.
                var finalResults = recognizer.Out.Where(result => result.IsFinal);

                // Print the recognized text of the final recognition result to the console.
                finalResults.Do(result => Console.WriteLine(result.Text));

                // Register an event handler to catch pipeline errors
                pipeline.PipelineExceptionNotHandled += Pipeline_PipelineException;

                // Register an event handler to be notified when the pipeline completes
                pipeline.PipelineCompleted += Pipeline_PipelineCompleted;

                // Run the pipeline
                pipeline.RunAsync();

                // Azure speech transcribes speech to text
                Console.WriteLine("Say anything");

                Console.WriteLine("Press any key to exit...");
                Console.ReadKey(true);
            }
        }
コード例 #27
0
        public Task <string> RecordToWav()
        {
            Directory.CreateDirectory($"./{_configuration.WavFilesFolderName}");

            var wavFile = $"./{_configuration.WavFilesFolderName}/{Guid.NewGuid()}.wav";

            var recorders = AudioCapture.AvailableDevices;

            for (int i = 0; i < recorders.Count; i++)
            {
                Console.WriteLine(recorders[i]);
            }
            Console.WriteLine("-----");

            const int samplingRate = 44100;     // Samples per second

            const ALFormat alFormat      = ALFormat.Mono16;
            const ushort   bitsPerSample = 16;  // Mono16 has 16 bits per sample
            const ushort   numChannels   = 1;   // Mono16 has 1 channel

            using (var f = File.OpenWrite(wavFile))
                using (var sw = new BinaryWriter(f))
                {
                    // Read This: http://soundfile.sapp.org/doc/WaveFormat/

                    sw.Write(new char[] { 'R', 'I', 'F', 'F' });
                    sw.Write(0); // will fill in later
                    sw.Write(new char[] { 'W', 'A', 'V', 'E' });
                    // "fmt " chunk (Google: WAVEFORMATEX structure)
                    sw.Write(new char[] { 'f', 'm', 't', ' ' });
                    sw.Write(16);                                               // chunkSize (in bytes)
                    sw.Write((ushort)1);                                        // wFormatTag (PCM = 1)
                    sw.Write(numChannels);                                      // wChannels
                    sw.Write(samplingRate);                                     // dwSamplesPerSec
                    sw.Write(samplingRate * numChannels * (bitsPerSample / 8)); // dwAvgBytesPerSec
                    sw.Write((ushort)(numChannels * (bitsPerSample / 8)));      // wBlockAlign
                    sw.Write(bitsPerSample);                                    // wBitsPerSample
                                                                                // "data" chunk
                    sw.Write(new char[] { 'd', 'a', 't', 'a' });
                    sw.Write(0);                                                // will fill in later

                    // 10 seconds of data. overblown, but it gets the job done
                    const int bufferLength = samplingRate * 10;
                    int       samplesWrote = 0;

                    Console.WriteLine($"Recording from: {recorders[0]}");

                    using (var audioCapture = new AudioCapture(
                               recorders[0], samplingRate, alFormat, bufferLength))
                    {
                        var buffer = new short[bufferLength];

                        audioCapture.Start();
                        for (int i = 0; i < _configuration.SecondsToRecord; ++i)
                        {
                            Thread.Sleep(1000); // give it some time to collect samples

                            var samplesAvailable = audioCapture.AvailableSamples;
                            audioCapture.ReadSamples(buffer, samplesAvailable);
                            for (var x = 0; x < samplesAvailable; ++x)
                            {
                                sw.Write(buffer[x]);
                            }

                            samplesWrote += samplesAvailable;

                            Console.WriteLine($"Wrote {samplesAvailable}/{samplesWrote} samples...");
                        }
                        audioCapture.Stop();
                    }

                    sw.Seek(4, SeekOrigin.Begin);  // seek to overall size
                    sw.Write(36 + samplesWrote * (bitsPerSample / 8) * numChannels);
                    sw.Seek(40, SeekOrigin.Begin); // seek to data size position
                    sw.Write(samplesWrote * (bitsPerSample / 8) * numChannels);
                }

            return(Task.FromResult(wavFile));
        }