Exemplo n.º 1
0
        /// <summary>
        /// Sends audio samples read from a file.
        /// </summary>
        private void SendMusicSample(object state)
        {
            if (!_streamSendInProgress)
            {
                lock (_audioStreamTimer)
                {
                    int    sampleRate = MUSIC_FILE_SAMPLE_RATE;
                    uint   sampleSize = (uint)(sampleRate / 1000 * AUDIO_SAMPLE_PERIOD_MILLISECONDS);
                    byte[] sample     = new byte[sampleSize * 2];

                    int bytesRead = _audioStreamReader.BaseStream.Read(sample, 0, sample.Length);

                    if (bytesRead > 0)
                    {
                        byte[] encodedSample = _audioEncoder.EncodeAudio(sample, _sendingFormat, AudioSamplingRatesEnum.Rate8KHz);
                        OnAudioSourceEncodedSample?.Invoke((uint)encodedSample.Length, encodedSample);
                    }

                    if (bytesRead == 0 || _audioStreamReader.EndOfStream)
                    {
                        _audioStreamReader.BaseStream.Position = 0;
                    }
                }
            }
        }
Exemplo n.º 2
0
        private unsafe void AudioDecoder_OnAudioFrame(ref AVFrame avFrame)
        {
            if (OnAudioSourceEncodedSample == null)
            {
                return;
            }

            // Avoid to create several times buffer of the same size
            if (_currentNbSamples != avFrame.nb_samples)
            {
                bufferSize        = ffmpeg.av_samples_get_buffer_size(null, avFrame.channels, avFrame.nb_samples, AVSampleFormat.AV_SAMPLE_FMT_S16, 1);
                buffer            = new byte[bufferSize];
                _currentNbSamples = avFrame.nb_samples;
            }

            // Convert audio
            int dstSampleCount;

            fixed(byte *pBuffer = buffer)
            dstSampleCount = ffmpeg.swr_convert(_audioDecoder._swrContext, &pBuffer, bufferSize, avFrame.extended_data, avFrame.nb_samples).ThrowExceptionIfError();

            Console.WriteLine($"nb_samples:{avFrame.nb_samples} - bufferSize:{bufferSize} - dstSampleCount:{dstSampleCount}");

            if (dstSampleCount > 0)
            {
                // FFmpeg AV_SAMPLE_FMT_S16 will store the bytes in the correct endianess for the underlying platform.
                short[] pcm           = buffer.Take(dstSampleCount * 2).Where((x, i) => i % 2 == 0).Select((y, i) => BitConverter.ToInt16(buffer, i * 2)).ToArray();
                var     encodedSample = _audioEncoder.EncodeAudio(pcm, _audioFormatManager.SelectedFormat);

                OnAudioSourceEncodedSample?.Invoke((uint)encodedSample.Length, encodedSample);
            }
        }
 /// <summary>
 /// Event handler for audio sample being supplied by local capture device.
 /// </summary>
 private void LocalAudioSampleAvailable(object sender, WaveInEventArgs args)
 {
     //WaveBuffer wavBuffer = new WaveBuffer(args.Buffer.Take(args.BytesRecorded).ToArray());
     //byte[] encodedSample = _audioEncoder.EncodeAudio(wavBuffer.ShortBuffer, _selectedSourceFormat, AudioSourceSamplingRate);
     byte[] encodedSample = _audioEncoder.EncodeAudio(args.Buffer.Take(args.BytesRecorded).ToArray(), _selectedSourceFormat, AudioSourceSamplingRate);
     OnAudioSourceEncodedSample?.Invoke((uint)encodedSample.Length, encodedSample);
 }
Exemplo n.º 4
0
        /// <summary>
        /// Sends audio samples read from a file containing 16 bit PCM samples.
        /// </summary>
        private void SendStreamSample(object state)
        {
            lock (_streamSourceTimer)
            {
                if (_streamSourceReader?.BaseStream?.CanRead == true)
                {
                    int     sampleRate  = (_streamSourceSampleRate == AudioSamplingRatesEnum.Rate8KHz) ? 8000 : 16000;
                    int     sampleSize  = sampleRate / 1000 * AUDIO_SAMPLE_PERIOD_MILLISECONDS;
                    short[] sample      = new short[sampleSize];
                    int     samplesRead = 0;

                    for (int i = 0; i < sampleSize && _streamSourceReader.BaseStream.Position < _streamSourceReader.BaseStream.Length; i++)
                    {
                        sample[samplesRead++] = _streamSourceReader.ReadInt16();
                    }

                    if (samplesRead > 0)
                    {
                        //Log.LogDebug($"Audio stream reader bytes read {samplesRead}, sample rate{_streamSourceSampleRate}, sending codec {_sendingFormat}.");

                        if (samplesRead < sample.Length)
                        {
                            // If the sending codec supports it fill up any short samples with silence.
                            if (_sendingFormat == AudioCodecsEnum.PCMU ||
                                _sendingFormat == AudioCodecsEnum.PCMU)
                            {
                                SetSilenceBuffer(sample, samplesRead);
                            }
                        }

                        //OnAudioSourceRawSample?.Invoke(_streamSourceSampleRate, AUDIO_SAMPLE_PERIOD_MILLISECONDS, sample);
                        byte[] encodedSample = _audioEncoder.EncodeAudio(sample, _sendingFormat, _streamSourceSampleRate);
                        OnAudioSourceEncodedSample?.Invoke((uint)encodedSample.Length, encodedSample);

                        if (_streamSourceReader.BaseStream.Position >= _streamSourceReader.BaseStream.Length)
                        {
                            Log.LogDebug("Send audio from stream completed.");
                            StopSendFromAudioStream();
                        }
                    }
                    else
                    {
                        Log.LogWarning("Failed to read from audio stream source.");
                        StopSendFromAudioStream();
                    }
                }
                else
                {
                    Log.LogWarning("Failed to read from audio stream source, stream null or closed.");
                    StopSendFromAudioStream();
                }
            }
        }
        /// <summary>
        /// Event handler for audio sample being supplied by local capture device.
        /// </summary>
        private void LocalAudioSampleAvailable(object sender, WaveInEventArgs args)
        {
            // Note NAudio.Wave.WaveBuffer.ShortBuffer does not take into account little endian.
            // https://github.com/naudio/NAudio/blob/master/NAudio/Wave/WaveOutputs/WaveBuffer.cs
            // WaveBuffer wavBuffer = new WaveBuffer(args.Buffer.Take(args.BytesRecorded).ToArray());
            // byte[] encodedSample = _audioEncoder.EncodeAudio(wavBuffer.ShortBuffer, _audioFormatManager.SelectedFormat);

            byte[]  buffer        = args.Buffer.Take(args.BytesRecorded).ToArray();
            short[] pcm           = buffer.Where((x, i) => i % 2 == 0).Select((y, i) => BitConverter.ToInt16(buffer, i * 2)).ToArray();
            byte[]  encodedSample = _audioEncoder.EncodeAudio(pcm, _audioFormatManager.SelectedFormat);
            OnAudioSourceEncodedSample?.Invoke((uint)encodedSample.Length, encodedSample);
        }
Exemplo n.º 6
0
        void WriteDataCallback(byte[] buffer, int offset, int count)
        {
            var copy = new byte[count];

            Buffer.BlockCopy(buffer, offset, copy, 0, count);

            byte[] encodedSample = _audioEncoder.EncodeAudio(
                copy.Where((x, i) => i % 2 == 0).Select((y, i) => (short)(copy[i * 2] << 8 | copy[i * 2 + 1])).ToArray(),
                _audioFormatManager.SelectedFormat);

            OnAudioSourceEncodedSample?.Invoke((uint)encodedSample.Length, encodedSample);
        }
Exemplo n.º 7
0
        private void Update()
        {
#if UNITY_ANDROID && !UNITY_EDITOR
            // Wait for microphone permissions before processing any audio
            if (!microphoneAuthorized)
            {
                microphoneAuthorized = Permission.HasUserAuthorizedPermission(Permission.Microphone);

                if (!microphoneAuthorized)
                {
                    return;
                }
            }
#endif

            // Run queued tasks synchronously
            while (true)
            {
                var task = null as Task;
                lock (taskLock)
                {
                    if (mainThreadTasks.Count == 0)
                    {
                        break;
                    }

                    task = mainThreadTasks.Dequeue();
                }

                task.RunSynchronously();
            }

            // Send samples if we have them
            while (microphoneListener.Advance())
            {
                // TODO pool buffers to avoid runtime GC
                var pcmSamples = new short[microphoneListener.samples.Length];
                for (int i = 0; i < microphoneListener.samples.Length; i++)
                {
                    var floatSample = microphoneListener.samples[i];
                    floatSample   = Mathf.Clamp(floatSample * gain, -.999f, .999f);
                    pcmSamples[i] = (short)(floatSample * short.MaxValue);
                }

                var encoded = audioEncoder.Encode(pcmSamples);
                OnAudioSourceEncodedSample.Invoke((uint)pcmSamples.Length, encoded);
            }
        }
        /// <summary>
        /// Creates a new basic RTP session that captures and renders audio to/from the default system devices.
        /// </summary>
        /// <param name="audioEncoder">A 3rd party audio encoder that can be used to encode and decode
        /// specific audio codecs.</param>
        /// <param name="externalSource">Optional. An external source to use in combination with the source
        /// provided by this end point. The application will need to signal which source is active.</param>
        /// <param name="disableSource">Set to true to disable the use of the audio source functionality, i.e.
        /// don't capture input from the microphone.</param>
        /// <param name="disableSink">Set to true to disable the use of the audio sink functionality, i.e.
        /// don't playback audio to the speaker.</param>
        public WindowsAudioEndPoint(IAudioEncoder audioEncoder, IAudioSource externalSource = null, bool disableSource = false, bool disableSink = false)
        {
            _audioEncoder = audioEncoder;

            _disableSource = disableSource;
            _disableSink   = disableSink;

            if (externalSource != null)
            {
                _externalSource = externalSource;

                // Pass the encoded audio sample to the RTP transport. If this class ever supported additional codecs,
                // such as Opus, the idea would be to change to receive raw samples from the external source and then
                // do the custom encoding before handing over to the transport.
                _externalSource.OnAudioSourceEncodedSample += (audioFormat, durationRtpUnits, sample)
                                                              => OnAudioSourceEncodedSample?.Invoke(audioFormat, durationRtpUnits, sample);
            }

            if (!_disableSink)
            {
                // Render device.
                _waveOutEvent = new WaveOutEvent();
                _waveOutEvent.DeviceNumber = AUDIO_OUTPUTDEVICE_INDEX;
                _waveProvider = new BufferedWaveProvider(_waveFormat);
                _waveProvider.DiscardOnBufferOverflow = true;
                _waveOutEvent.Init(_waveProvider);
            }

            if (!_disableSource)
            {
                if (WaveInEvent.DeviceCount > 0)
                {
                    _waveInEvent = new WaveInEvent();
                    _waveInEvent.BufferMilliseconds = AUDIO_SAMPLE_PERIOD_MILLISECONDS;
                    _waveInEvent.NumberOfBuffers    = INPUT_BUFFERS;
                    _waveInEvent.DeviceNumber       = AUDIO_INPUTDEVICE_INDEX;
                    _waveInEvent.WaveFormat         = _waveFormat;
                    _waveInEvent.DataAvailable     += LocalAudioSampleAvailable;
                }
                else
                {
                    throw new ApplicationException("No audio capture devices are available.");
                }
            }
        }
Exemplo n.º 9
0
        /// <summary>
        /// Sends a sample from a signal generator generated waveform.
        /// </summary>
        private void SendSignalGeneratorSample(object state)
        {
            if (!_streamSendInProgress)
            {
                lock (_audioStreamTimer)
                {
                    int  sourceSampleRate = _sourceAudioSampleRate == AudioSamplingRatesEnum.Rate8KHz ? 8000 : 16000;
                    int  inputBufferSize  = sourceSampleRate / 1000 * AUDIO_SAMPLE_PERIOD_MILLISECONDS;
                    uint outputBufferSize = (uint)(_sendingAudioRtpRate / 1000 * AUDIO_SAMPLE_PERIOD_MILLISECONDS);

                    // Get the signal generator to generate the samples and then convert from
                    // signed linear to PCM.
                    float[] linear = new float[inputBufferSize];
                    _signalGenerator.Read(linear, 0, inputBufferSize);
                    short[] pcm = linear.Select(x => (short)(x * LINEAR_MAXIMUM)).ToArray();

                    byte[] encodedSample = _audioEncoder.EncodeAudio(pcm, _sendingFormat, _sourceAudioSampleRate);
                    OnAudioSourceEncodedSample?.Invoke(outputBufferSize, encodedSample);
                }
            }
        }
Exemplo n.º 10
0
        /// <summary>
        /// Sends the sounds of silence.
        /// </summary>
        private void SendSilenceSample(object state)
        {
            if (!_streamSendInProgress)
            {
                lock (_audioStreamTimer)
                {
                    uint outputBufferSize = (uint)(_sendingAudioRtpRate / 1000 * AUDIO_SAMPLE_PERIOD_MILLISECONDS);
                    int  sourceSampleRate = _sourceAudioSampleRate == AudioSamplingRatesEnum.Rate8KHz ? 8000 : 16000;

                    if (_sendingFormat == AudioCodecsEnum.G722)
                    {
                        int     inputBufferSize = sourceSampleRate / 1000 * AUDIO_SAMPLE_PERIOD_MILLISECONDS;
                        short[] silencePcm      = new short[inputBufferSize];

                        //OnAudioSourceRawSample?.Invoke(AudioSamplingRatesEnum.Rate8KHz, AUDIO_SAMPLE_PERIOD_MILLISECONDS, silencePcm);
                        byte[] encodedSample = _audioEncoder.EncodeAudio(silencePcm, AudioCodecsEnum.G722, _sourceAudioSampleRate);
                        OnAudioSourceEncodedSample?.Invoke(outputBufferSize, encodedSample);
                    }
                    else if (_sendingFormat == AudioCodecsEnum.PCMU ||
                             _sendingFormat == AudioCodecsEnum.PCMA)
                    {
                        if (_silenceBuffer == null || _silenceBuffer.Length != outputBufferSize)
                        {
                            _silenceBuffer = new byte[outputBufferSize];
                            SetSilenceBuffer(_silenceBuffer, 0);
                        }

                        // No encoding required for PCMU/PCMA silence.
                        OnAudioSourceEncodedSample?.Invoke(outputBufferSize, _silenceBuffer);
                    }
                    else
                    {
                        Log.LogWarning($"SendSilenceSample does not know how to encode {_sendingFormat}.");
                    }
                }
            }
        }
 private void _FFmpegAudioSource_OnAudioSourceEncodedSample(uint durationRtpUnits, byte[] sample)
 {
     OnAudioSourceEncodedSample?.Invoke(durationRtpUnits, sample);
 }
Exemplo n.º 12
0
 void WriteDataCallback(byte[] buffer, int offset, int count)
 {
     byte[] encodedSample = _audioEncoder.EncodeAudio(buffer.Skip(offset).Take(count).ToArray(), _selectedSourceFormat, AudioSourceSamplingRate);
     OnAudioSourceEncodedSample?.Invoke((uint)encodedSample.Length, encodedSample);
 }