コード例 #1
0
        // private int _packetsSent;

        private void PrepareAndSendAudioPacket(AudioContext audioContext, short[] audioBuffer)
        {
            // Apply any custom filters.
            ApplyAudioInputFilter(audioBuffer);

            //if (++_packetsSent % 100 == 0)
            //{
            //    DebugHelper.AnalyzeAudioFrame("MediaController_PrepareAndSendAudioPacket", audioBuffer, 0, audioBuffer.Length);
            //}

            if (MediaConfig.EnableDenoise)
            {
                audioContext.DtxFilter.Filter(audioBuffer);
            }

            // Check to see if we've succeeded in sending some audio.
            if (!AudioSentSuccessfully)
            {
                AudioSentSuccessfully = !audioContext.DtxFilter.IsSilent;
            }

            // Set the volume.
            if (MediaConfig.ApplyVolumeFilterToRecordedSound)
            {
                ApplyVolumeFilter(MicrophoneVolume, audioBuffer, 0, audioBuffer.Length);
            }

            // Compress the audio onto the audioEncodeBuffer.
            LastAudioEncoder = audioContext.Encoder.CodecType;
            int length = audioContext.Encoder.Encode(audioBuffer, 0, audioBuffer.Length, audioContext.SendBuffer, audioContext.DtxFilter.IsSilent);

            // Send the packet
            MediaConnection.SendAudioPacket(audioContext.SendBuffer, length, audioContext.Encoder.CodecType, audioContext.DtxFilter.IsSilent, (int)_mediaEnvironment.LocalProcessorLoad);
            Logger.LogAudioFrameTransmitted(audioContext.DtxFilter.IsSilent);
        }
コード例 #2
0
        protected override void OnSamples(long sampleTime, long sampleDuration, byte[] sampleData)
        {
            try
            {
                // Raise an event if we managed to successfully capture real audio.
                if (!_dataReceived && HasRealAudio(sampleData))
                {
                    ClientLogger.Debug("The AudioSinkAdapter has detected that there's real audio coming in.");
                    _dataReceived = true;
                    if (CaptureSuccessful != null)
                    {
                        CaptureSuccessful(this, new EventArgs());
                    }
                }

                if (_audioContextFactory != null && AudioController != null && AudioController.IsConnected)
                {
                    var ctx = _audioContextFactory.GetAudioContext();
                    if (ctx != _lastAudioContext)
                    {
                        ClientLogger.Debug("Changed audio context: \r\nFrom: {0}\r\nTo: {1}", _lastAudioContext == null ? "" : _lastAudioContext.ToString(), ctx.ToString());
                        _lastAudioContext = ctx;
                    }
                    ctx.Resampler.Write(sampleData);
                    _logger.LogRawFrame(sampleTime, sampleDuration, sampleData);

                    bool moreFrames;
                    do
                    {
                        if (ctx.Resampler.Read(ctx.ResampleBuffer, out moreFrames))
                        {
                            SubmitFrame(ctx, ctx.ResampleBuffer);
                        }
                    } while (moreFrames);
                }
            }
            catch (Exception ex)
            {
                ClientLogger.Debug(ex.Message);
            }
        }
コード例 #3
0
        /// <summary>
        /// Sets the audio frame to be processed before it is transmitted to the media server. Typically called by the RtpAudioSink class.
        /// </summary>
        /// <param name="audioContext">The audio context for the frame in question</param>
        /// <param name="frame">A byte[] array representing the samples received from the local microphone.</param>
        public void SubmitRecordedFrame(AudioContext audioContext, byte[] frame)
        {
            Logger.LogAudioFrameSet();
            _microphoneStatistics.LatestFrame = frame;

            if (!IsConnected || !OtherMembersInRoom)
            {
                return;
            }

            if (IsMicrophoneMuted)
            {
                frame = _silentBytes;
            }

            lock (_audioFrames)
            {
                _audioFrames.Enqueue(new AudioFrame(audioContext, frame));
            }
            _audioEncodeResetEvent.Set();
        }
コード例 #4
0
 public AudioFrame(AudioContext audioContext, byte[] samples)
 {
     AudioContext = audioContext;
     Samples      = samples;
 }
コード例 #5
0
        /// <summary>
        /// Creates a new instance of the AudioContextFactory.
        /// </summary>
        /// <param name="rawAudioFormat">The format in which the audio coming directly from the microphone is recorded</param>
        /// <param name="playedAudioFormat">The format in which the audio will be played back on the far end (typically 16Khz)</param>
        /// <param name="config">The currently active MediaConfig instance</param>
        /// <param name="mediaEnvironment">An IMediaEnvironment instance which can be used to make decisions about which context to return, for instance,
        /// if the CPU is running too hot, or multiple people have joined the conference.</param>
        public AudioContextFactory(AudioFormat rawAudioFormat, AudioFormat playedAudioFormat, MediaConfig config, IMediaEnvironment mediaEnvironment)
        {
            RawAudioFormat    = rawAudioFormat;
            PlayedAudioFormat = playedAudioFormat;
            MediaConfig       = config;
            MediaEnvironment  = mediaEnvironment;

            // What we should use when there's only one other person, and CPU is OK:
            // 16Khz, Speex, WebRtc at full strength
            var directAudioFormat = new AudioFormat();
            var directResampler   = new ResampleFilter(rawAudioFormat, directAudioFormat);

            directResampler.InstanceName = "High Quality Direct Resampler";
            var directEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, directAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc);

            directEnhancer.InstanceName = "High";
            var directDtx     = new DtxFilter(directAudioFormat);
            var directEncoder = new SpeexEncoder(directAudioFormat);

            HighQualityDirectCtx             = new AudioContext(directAudioFormat, directResampler, directDtx, directEnhancer, directEncoder);
            HighQualityDirectCtx.Description = "High Quality Direct";

            // What we should use when there are multiple people (and hence the audio will need to be decoded and mixed), but CPU is OK:
            // 8Khz, G711, WebRtc at full strength
            var conferenceAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond);
            var conferenceResampler   = new ResampleFilter(rawAudioFormat, conferenceAudioFormat);

            conferenceResampler.InstanceName = "High Quality Conference Resampler";
            var conferenceEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, conferenceAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc);

            conferenceEnhancer.InstanceName = "Medium";
            var conferenceDtx     = new DtxFilter(conferenceAudioFormat);
            var conferenceEncoder = new G711MuLawEncoder(conferenceAudioFormat);

            HighQualityConferenceCtx             = new AudioContext(conferenceAudioFormat, conferenceResampler, conferenceDtx, conferenceEnhancer, conferenceEncoder);
            HighQualityConferenceCtx.Description = "High Quality Conference";

            // What we should use when one or more remote CPU's isn't keeping up (regardless of how many people are in the room):
            // 8Khz, G711, WebRtc at full-strength
            var remoteFallbackAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond);
            var remoteFallbackResampler   = new ResampleFilter(rawAudioFormat, remoteFallbackAudioFormat);

            remoteFallbackResampler.InstanceName = "Low Quality Remote CPU Resampler";
            var remoteFallbackEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, remoteFallbackAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc);

            remoteFallbackEnhancer.InstanceName = "Medium";
            var remoteFallbackDtx     = new DtxFilter(remoteFallbackAudioFormat);
            var remoteFallbackEncoder = new G711MuLawEncoder(remoteFallbackAudioFormat);

            LowQualityForRemoteCpuCtx             = new AudioContext(remoteFallbackAudioFormat, remoteFallbackResampler, remoteFallbackDtx, remoteFallbackEnhancer, remoteFallbackEncoder);
            LowQualityForRemoteCpuCtx.Description = "Fallback for remote high CPU";

            // What we should use when the local CPU isn't keeping up (regardless of how many people are in the room):
            // 8Khz, G711, WebRtc at half-strength
            var fallbackAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond);
            var fallbackResampler   = new ResampleFilter(rawAudioFormat, fallbackAudioFormat);

            fallbackResampler.InstanceName = "Low Quality Local CPU Resampler";
            var fallbackEnhancer = new WebRtcFilter(config.ExpectedAudioLatencyFallback, config.FilterLengthFallback, fallbackAudioFormat, playedAudioFormat, config.EnableAec, false, false);

            fallbackEnhancer.InstanceName = "Low";
            var fallbackDtx     = new DtxFilter(fallbackAudioFormat);
            var fallbackEncoder = new G711MuLawEncoder(fallbackAudioFormat);

            LowQualityForLocalCpuCtx             = new AudioContext(fallbackAudioFormat, fallbackResampler, fallbackDtx, fallbackEnhancer, fallbackEncoder);
            LowQualityForLocalCpuCtx.Description = "Fallback for local high CPU";

            _audioContextAdapter = new EnvironmentAdapter <AudioContext>(mediaEnvironment,
                                                                         HighQualityDirectCtx,
                                                                         HighQualityConferenceCtx,
                                                                         LowQualityForRemoteCpuCtx,
                                                                         LowQualityForLocalCpuCtx);
        }
コード例 #6
0
 protected virtual void SubmitFrame(AudioContext audioContext, byte[] frame)
 {
     AudioController.SubmitRecordedFrame(audioContext, frame);
     _logger.AudioFormat = audioContext.AudioFormat;
     _logger.LogResampledFrame(frame);
 }