public PortAudioPlaybackHandle(AudioStream baseStream, PortAudioDevice dev)
            {
                BaseStream = new BufferedAudioStream(baseStream, baseStream.Format.SampleRate * baseStream.Format.BytesPerFrame);

                PortAudioSampleFormat.PortAudioNumberFormat numberFormat;
                switch (baseStream.Format.DataFormat.NumberFormat)
                {
                case NumberFormat.FloatingPoint:
                    numberFormat = PortAudioSampleFormat.PortAudioNumberFormat.FloatingPoint;
                    break;

                case NumberFormat.Signed:
                    numberFormat = PortAudioSampleFormat.PortAudioNumberFormat.Signed;
                    break;

                case NumberFormat.Unsigned:
                    numberFormat = PortAudioSampleFormat.PortAudioNumberFormat.Unsigned;
                    break;

                default: throw new ArgumentException();
                }

                _pump = new PortAudioDevicePump(dev, baseStream.Format.Channels,
                                                new PortAudioSampleFormat(numberFormat, baseStream.Format.DataFormat.BytesPerSample),
                                                dev.DefaultHighOutputLatency, baseStream.Format.SampleRate, DataCallback);

                _pump.StreamFinished += PumpOnStreamFinished;
            }
Ejemplo n.º 2
0
        public async Task PlayAudioStream(Stream audioStream, CancellationToken token = default)
        {
            try
            {
                Playing?.Invoke(this, true);
                await semaphore.WaitAsync(token);

                using var pump       = new PortAudioDevicePump(OutputDevice, 2, outputFormat, OutputDevice.DefaultLowOutputLatency, 48000, ReadCallback);
                using var handle     = new ManualResetEventSlim(false);
                pump.StreamFinished += FinishedHandler;
                pump.Start();
                handle.Wait();
                pump.StreamFinished -= FinishedHandler;

                int ReadCallback(byte[] buffer, int offset, int count) => audioStream.Read(buffer, offset, count);
                void FinishedHandler(object sender, EventArgs eventArgs) => handle.Set();
            }
            catch (OperationCanceledException)
            {
            }
            finally
            {
                if (semaphore.CurrentCount == 0)
                {
                    semaphore.Release();
                }
                Playing?.Invoke(this, false);
            }
        }
Ejemplo n.º 3
0
        /// <summary>
        /// Starts the media capturing/source devices.
        /// </summary>
        public override async Task Start()
        {
            if (!_isStarted)
            {
                await base.Start();

                var apiType = PortAudioHostApiType.DirectSound;

                if (Environment.OSVersion.Platform == PlatformID.Unix)
                {
                    apiType = PortAudioHostApiType.Alsa;
                }

                _portAudioOutputDevice = PortAudioHostApi.SupportedHostApis.Where(x => x.HostApiType == apiType).First().DefaultOutputDevice;

                _outputDevicePump = new PortAudioDevicePump(_portAudioOutputDevice, AUDIO_CHANNEL_COUNT,
                                                            new PortAudioSampleFormat(PortAudioSampleFormat.PortAudioNumberFormat.Signed, AUDIO_BYTES_PER_SAMPLE),
                                                            TimeSpan.FromMilliseconds(SAMPLING_PERIOD_MILLISECONDS), AUDIO_SAMPLING_RATE, ReadAudioDataCalback);

                _portAudioInputDevice = PortAudioHostApi.SupportedHostApis.Where(x => x.HostApiType == apiType).First().DefaultInputDevice;

                _inputDevicePump = new PortAudioDevicePump(_portAudioInputDevice, AUDIO_CHANNEL_COUNT,
                                                           new PortAudioSampleFormat(PortAudioSampleFormat.PortAudioNumberFormat.Signed, AUDIO_BYTES_PER_SAMPLE),
                                                           TimeSpan.FromMilliseconds(SAMPLING_PERIOD_MILLISECONDS), AUDIO_SAMPLING_RATE, WriteDataCallback);

                _outputDevicePump.Start();
                _inputDevicePump.Start();
            }
        }
Ejemplo n.º 4
0
#pragma warning restore CS0067

        /// <summary>
        /// Creates a new basic RTP session that captures and renders audio to/from the default system devices.
        /// </summary>
        public PortAudioEndPoint(IAudioEncoder audioEncoder)
        {
            _audioEncoder       = audioEncoder;
            _audioFormatManager = new MediaFormatManager <AudioFormat>(_audioEncoder.SupportedFormats);

            var apiType = PortAudioHostApiType.DirectSound;

            if (Environment.OSVersion.Platform == PlatformID.Unix)
            {
                apiType = PortAudioHostApiType.Alsa;
            }

            _portAudioOutputDevice = PortAudioHostApi.SupportedHostApis.Where(x => x.HostApiType == apiType).First().DefaultOutputDevice;

            _outputDevicePump = new PortAudioDevicePump(_portAudioOutputDevice, AUDIO_CHANNEL_COUNT,
                                                        new PortAudioSampleFormat(PortAudioSampleFormat.PortAudioNumberFormat.Signed, AUDIO_BYTES_PER_SAMPLE),
                                                        TimeSpan.FromMilliseconds(SAMPLING_PERIOD_MILLISECONDS), AUDIO_SAMPLING_RATE, ReadAudioDataCalback);

            _portAudioInputDevice = PortAudioHostApi.SupportedHostApis.Where(x => x.HostApiType == apiType).First().DefaultInputDevice;

            _inputDevicePump = new PortAudioDevicePump(_portAudioInputDevice, AUDIO_CHANNEL_COUNT,
                                                       new PortAudioSampleFormat(PortAudioSampleFormat.PortAudioNumberFormat.Signed, AUDIO_BYTES_PER_SAMPLE),
                                                       TimeSpan.FromMilliseconds(SAMPLING_PERIOD_MILLISECONDS), AUDIO_SAMPLING_RATE, WriteDataCallback);
        }
Ejemplo n.º 5
0
        async Task Run()
        {
            using (var cts = new CancellationTokenSource())
            {
                Console.InputEncoding   = Encoding.Unicode;
                Console.OutputEncoding  = Encoding.Unicode;
                Console.CancelKeyPress += (s, e) =>
                {
                    e.Cancel = true;
                    cts.Cancel();
                };

                try
                {
                    using var input  = new StreamReader(Console.OpenStandardInput(), Encoding.Unicode);
                    using var api    = PickBestApi(input, PortAudioHostApi.SupportedHostApis, cts.Token);
                    using var device = GetOutputDevice(input, api.Devices, cts.Token);

                    Console.Write("Saisir le code de langue voulu (défaut : fr-FR): ");
                    if (!ReadLine(input, cts.Token, out string language))
                    {
                        return;
                    }
                    if (string.IsNullOrWhiteSpace(language))
                    {
                        language = "fr-FR";
                    }

                    Console.WriteLine("Choisir le type de voix (femme/homme):");
                    Console.WriteLine("[0] - Non spécifié");
                    Console.WriteLine("[1] - Homme");
                    Console.WriteLine("[2] - Femme");
                    Console.WriteLine("[2] - Neutre");

                    if (!ReadLine(input, cts.Token, out string voiceGenderStr) || !int.TryParse(voiceGenderStr, out int voiceGenderInt))
                    {
                        return;
                    }

                    var voiceGender = (SsmlVoiceGender)voiceGenderInt;

                    Console.WriteLine($"Prêt à parler sur {device.Name}");

                    while (!cts.IsCancellationRequested)
                    {
                        Console.Write("->");

                        // Handle user input
                        if (!ReadLine(input, cts.Token, out string line) || line is null)
                        {
                            return;
                        }

                        Console.WriteLine("Récupération du son pour le texte : " + line);
                        Console.WriteLine("Son récupéré, lecture...");

                        using var audioStream = await TextToAudioStreamAsync(line, language, voiceGender, cts.Token);

                        using var pump = new PortAudioDevicePump(
                                  device,
                                  2,
                                  new PortAudioSampleFormat(PortAudioSampleFormat.PortAudioNumberFormat.Signed, 2),
                                  device.DefaultLowOutputLatency,
                                  48000,
                                  (buffer, offset, count) => audioStream.Read(buffer, offset, count));

                        using var handle     = new ManualResetEventSlim(false);
                        pump.StreamFinished += FinishedHandler;
                        pump.Start();
                        handle.Wait();
                        pump.StreamFinished -= FinishedHandler;

                        void FinishedHandler(object sender, EventArgs eventArgs) => handle.Set();

                        Console.WriteLine("Lecture terminée");
                    }
                }
                catch (OperationCanceledException)
                {
                }
            }

            async Task <Stream> TextToAudioStreamAsync(string text, string language, SsmlVoiceGender voiceGender, CancellationToken token)
            {
                var request = new SynthesizeSpeechRequest
                {
                    AudioConfig = new AudioConfig
                    {
                        AudioEncoding = AudioEncoding.OggOpus,
                    },
                    Input = new SynthesisInput
                    {
                        Text = text
                    },
                    Voice = new VoiceSelectionParams
                    {
                        LanguageCode = language,
                        SsmlGender   = voiceGender,
                    },
                };

                var response = await ttsClient.SynthesizeSpeechAsync(request, token);

                using (var opusStream = new MemoryStream())
                {
                    response.AudioContent.WriteTo(opusStream);
                    opusStream.Position = 0;

                    var opusDecoder = new OpusDecoder(48000, 2);
                    var oggIn       = new OpusOggReadStream(opusDecoder, opusStream);

                    var pcmStream = new MemoryStream();
                    while (oggIn.HasNextPacket)
                    {
                        short[] packet = oggIn.DecodeNextPacket();
                        if (packet != null)
                        {
                            for (int i = 0; i < packet.Length; i++)
                            {
                                var bytes = BitConverter.GetBytes(packet[i]);
                                pcmStream.Write(bytes, 0, bytes.Length);
                            }
                        }
                    }

                    pcmStream.Position = 0;
                    return(pcmStream);
                }
            }
        }