Ejemplo n.º 1
0
        protected override void StartNextTest()
        {
            // Set everything up.
            base.StartNextTest();

            // Tweak a few parameters.
            mPlayer.VisualizationRate   = 10;
            mRecorder.VisualizationRate = 10;

            // Actually run the test. Since the media element in the player is null, this is how to trigger the echo cancellation and all the rest.
            ThreadPool.QueueUserWorkItem(o =>
            {
                var resampler = new ResampleFilter(audioFormat, audioFormat);
                var dtx       = new DtxFilter(audioFormat);
                var encoder   = new G711MuLawEncoder(audioFormat);
                var ctx       = new AudioContext(audioFormat, resampler, dtx, mEchoCancelFilter, encoder);
                for (int i = 0; i < SourceFrames.Count && i < SpeakerFrames.Count; i++)
                {
                    if (mStopRequested)
                    {
                        mStopRequested = false;
                        return;
                    }
                    int index = i;

                    // This has the (necessary) side-effect of registering the (virtually) played frame.
                    mPlayer.GetNextAudioFrame(ms => mRecorder.SubmitRecordedFrame(ctx, SpeakerFrames[index]));
                }

                // Stopping everything has the (necessary) side-effect of starting the next test, if there is one.
                mRecorder.StopRecording();
                mPlayer.StopPlaying();
            });
        }
Ejemplo n.º 2
0
        public void Base_Read_OneChannelToTwo()
        {
            var inputFormat   = new AudioFormat();
            var outputFormat  = new AudioFormat(channels: 2);
            var resampler     = new ResampleFilter(inputFormat, outputFormat);
            var inboundFrame  = new short[inputFormat.SamplesPerFrame];
            var outboundFrame = new short[outputFormat.SamplesPerFrame];

            // Populate one channel
            for (short i = 0; i < inputFormat.SamplesPerFrame; i++)
            {
                inboundFrame[i] = i;
            }
            var temp = new byte[Buffer.ByteLength(inboundFrame)];

            Buffer.BlockCopy(inboundFrame, 0, temp, 0, temp.Length);
            resampler.Write(temp);

            bool moreFrames;
            bool successful = resampler.Read(outboundFrame, out moreFrames);

            Assert.IsTrue(successful);
            Assert.IsFalse(moreFrames);

            // We should have two channels worth of data back. Because we interpolate and smooth, it isn't a simple
            // doubling of the original data, but some patterns can still be tested.
            for (int i = 0; i < (inputFormat.SamplesPerFrame * 2) - 4; i++)
            {
                Assert.IsTrue(outboundFrame[i] <= outboundFrame[i + 4]);
            }
        }
Ejemplo n.º 3
0
        public void Base_Read_32KhzTo8Khz()
        {
            var inputFormat   = new AudioFormat(32000);
            var outputFormat  = new AudioFormat(8000);
            var resampler     = new ResampleFilter(inputFormat, outputFormat);
            var inboundFrame  = new short[inputFormat.SamplesPerFrame];
            var outboundFrame = new short[outputFormat.SamplesPerFrame];

            for (short i = 0; i < inputFormat.SamplesPerFrame; i++)
            {
                inboundFrame[i] = i;
            }
            var temp = new byte[Buffer.ByteLength(inboundFrame)];

            Buffer.BlockCopy(inboundFrame, 0, temp, 0, temp.Length);
            resampler.Write(temp);

            bool moreFrames;
            bool successful = resampler.Read(outboundFrame, out moreFrames);

            Assert.IsTrue(successful);
            Assert.IsFalse(moreFrames);

            for (int i = 0; i < outputFormat.SamplesPerFrame; i++)
            {
                Assert.AreEqual(i * 4, outboundFrame[i]);
            }
        }
Ejemplo n.º 4
0
        public void Base_Read_TwoChannelsToOne()
        {
            var inputFormat   = new AudioFormat(channels: 2);
            var outputFormat  = new AudioFormat();
            var resampler     = new ResampleFilter(inputFormat, outputFormat);
            var inboundFrame  = new short[inputFormat.SamplesPerFrame];
            var outboundFrame = new short[outputFormat.SamplesPerFrame];

            // Give both channels the same values.
            int index = 0;

            for (short i = 0; i < outputFormat.SamplesPerFrame; i++)
            {
                inboundFrame[index++] = i;
                inboundFrame[index++] = i;
            }
            var temp = new byte[Buffer.ByteLength(inboundFrame)];

            Buffer.BlockCopy(inboundFrame, 0, temp, 0, temp.Length);
            resampler.Write(temp);

            bool moreFrames;
            bool successful = resampler.Read(outboundFrame, out moreFrames);

            Assert.IsTrue(successful);
            Assert.IsFalse(moreFrames);

            // We should get just one channel's values back.
            for (int i = 0; i < outputFormat.SamplesPerFrame; i++)
            {
                Assert.AreEqual(i, outboundFrame[i]);
            }
        }
Ejemplo n.º 5
0
        public void Base_Read_96KhzTo16Khz_And_TwoChannelsToOne()
        {
            var inputFormat   = new AudioFormat(96000, channels: 2);
            var outputFormat  = new AudioFormat();
            var resampler     = new ResampleFilter(inputFormat, outputFormat);
            var inboundFrame  = new short[inputFormat.SamplesPerFrame];
            var outboundFrame = new short[outputFormat.SamplesPerFrame];

            int index = 0;

            for (short i = 0; i < inputFormat.SamplesPerFrame / inputFormat.Channels; i++)
            {
                inboundFrame[index++] = i;
                inboundFrame[index++] = i;
            }
            var temp = new byte[Buffer.ByteLength(inboundFrame)];

            Buffer.BlockCopy(inboundFrame, 0, temp, 0, temp.Length);
            resampler.Write(temp);

            bool moreFrames;
            bool successful = resampler.Read(outboundFrame, out moreFrames);

            Assert.IsTrue(successful);
            Assert.IsFalse(moreFrames);

            for (int i = 0; i < outputFormat.SamplesPerFrame; i++)
            {
                Assert.AreEqual(i * 6, outboundFrame[i]);
            }
        }
Ejemplo n.º 6
0
        public void Base_Read_44KhzTo16Khz_And_2048To640()
        {
            // This tests the scenario we run into on some Macs, where the amount of data submitted is always on 2048 byte boundaries.

            // 44100 / 16000 = 2.75625
            const int writeFrameSize   = 1024;                           // in shorts
            const int readFrameSize    = 320;                            // in shorts
            const int inboundFrameSize = (int)(readFrameSize * 2.75625); //  = 882
            const int frameCount       = 100;
            var       inputFormat      = new AudioFormat(44100);
            var       outputFormat     = new AudioFormat();
            var       resampler        = new ResampleFilter(inputFormat, outputFormat);
            var       inboundFrames    = new short[inboundFrameSize * frameCount];
            var       outboundFrame    = new byte[readFrameSize * sizeof(short)];

            // Fill the inbound buffer.
            for (short frame = 0; frame < frameCount; frame++)
            {
                for (int i = 0; i < inboundFrameSize; i++)
                {
                    inboundFrames[frame * inboundFrameSize + i] = frame;
                }
            }

            int   index     = 0;
            short readFrame = 0;

            while (index + writeFrameSize < inboundFrames.Length)
            {
                // Write data to the frame in 1024 sample/2048 byte chunks (we'll be reading from it in 320 sample chunks).
                var tmpWrite = new byte[writeFrameSize * sizeof(short)];
                Buffer.BlockCopy(inboundFrames, index * sizeof(short), tmpWrite, 0, writeFrameSize * sizeof(short));
                resampler.Write(tmpWrite);
                index += writeFrameSize;

                bool moreFrames;
                do
                {
                    if (resampler.Read(outboundFrame, out moreFrames))
                    {
                        // Copy the byte array to a short array so we can check the values.
                        var tmpRead = new short[readFrameSize];
                        Buffer.BlockCopy(outboundFrame, 0, tmpRead, 0, readFrameSize * sizeof(short));

                        // There's some expected leakage around the ends of the buffers,
                        // so ignore the values there.
                        for (int i = 0; i < tmpRead.Length - (readFrame / 2); i++)
                        {
                            Assert.AreEqual(readFrame, tmpRead[i]);
                        }
                        readFrame++;
                    }
                } while (moreFrames);
            }
        }
        public SingleAudioContextFactory(AudioContext audioContext, AudioFormat rawAudioFormat, AudioFormat playedAudioFormat, MediaConfig mediaConfig, IMediaEnvironment mediaEnvironment)
        {
            RawAudioFormat    = rawAudioFormat;
            PlayedAudioFormat = playedAudioFormat;
            MediaConfig       = mediaConfig;
            MediaEnvironment  = mediaEnvironment;

            // Hack!!!! We need to make a copy of the audioContext, but with a few tweaks.
            // When the audio context is first created, we don't know what the rawAudioFormat will be,
            // but it should be accurate by this point, so we need to recreate the AudioContext.
            var resampler = new ResampleFilter(rawAudioFormat, playedAudioFormat);

            resampler.InstanceName    = audioContext.Resampler.InstanceName;
            _audioContext             = new AudioContext(playedAudioFormat, resampler, audioContext.DtxFilter, audioContext.SpeechEnhancementStack, audioContext.Encoder);
            _audioContext.Description = audioContext.Description;
        }
Ejemplo n.º 8
0
        public void Base_Read_NoChanges()
        {
            var resampler     = new ResampleFilter(AudioFormat.Default, AudioFormat.Default);
            var inboundFrame  = new byte[AudioFormat.Default.BytesPerFrame];
            var outboundFrame = new byte[AudioFormat.Default.BytesPerFrame];

            for (int i = 0; i < inboundFrame.Length; i++)
            {
                inboundFrame[i] = (byte)i;
            }
            resampler.Write(inboundFrame);
            bool moreFrames;
            bool successful = resampler.Read(outboundFrame, out moreFrames);

            Assert.IsTrue(successful);
            Assert.IsFalse(moreFrames);
            for (int i = 0; i < AudioFormat.Default.BytesPerFrame; i++)
            {
                Assert.AreEqual(inboundFrame[i], outboundFrame[i]);
            }
        }
        public AudioContext GetAudioContext()
        {
            var resampler     = new ResampleFilter(rawAudioFormat, transmittedAudioFormat);
            var conferenceDtx = new DtxFilter(transmittedAudioFormat);

            IAudioTwoWayFilter enhancer = null;

            switch (enhancementStack)
            {
            case SpeechEnhancementStack.None:
                enhancer = new NullEchoCancelFilter(mediaConfig.ExpectedAudioLatency, mediaConfig.FilterLength, transmittedAudioFormat, AudioFormat.Default);
                break;

            case SpeechEnhancementStack.Speex:
                enhancer = new SpeexEchoCanceller2(mediaConfig, transmittedAudioFormat, AudioFormat.Default);
                break;

            case SpeechEnhancementStack.WebRtc:
                enhancer = new WebRtcFilter(mediaConfig.ExpectedAudioLatency, mediaConfig.FilterLength, transmittedAudioFormat, AudioFormat.Default, mediaConfig.EnableAec, mediaConfig.EnableDenoise, mediaConfig.EnableAgc);
                break;
            }

            IAudioEncoder encoder = null;

            switch (codecType)
            {
            case AudioCodecType.G711M:
                encoder = new G711MuLawEncoder(transmittedAudioFormat);
                break;

            case AudioCodecType.Speex:
                encoder = new SpeexEncoder(transmittedAudioFormat);
                break;
            }

            var ctx = new AudioContext(transmittedAudioFormat, resampler, conferenceDtx, enhancer, encoder);

            return(ctx);
        }
Ejemplo n.º 10
0
        protected virtual void StartNextTest()
        {
            Deployment.Current.Dispatcher.BeginInvoke(() => Status = string.Format("Executing test for latency {0} and filter length {1}", ExpectedLatency, FilterLength));

            mTestStartTime = DateTime.Now;
            IAudioFilter playedResampler;
            IAudioFilter recordedResampler;

            mCancelledFrames = new List <byte[]>();

            // Decide whether to synchronize the audio or not.
            if (AecIsSynchronized)
            {
                playedResampler   = new ResampleFilter(audioFormat, audioFormat);
                recordedResampler = new ResampleFilter(audioFormat, audioFormat);
            }
            else
            {
                playedResampler   = new NullAudioFilter(audioFormat.BytesPerFrame);
                recordedResampler = new NullAudioFilter(audioFormat.BytesPerFrame);
            }

            // Initialize the echo canceller
            playedResampler.InstanceName     = "EchoCanceller_played";
            recordedResampler.InstanceName   = "EchoCanceller_recorded";
            mediaConfig.ExpectedAudioLatency = ExpectedLatency;
            mediaConfig.FilterLength         = FilterLength;

            switch (mEchoCancellerType)
            {
            case EchoCancellerType.Speex2:
                mEchoCancelFilter = new SpeexEchoCanceller2(mediaConfig, audioFormat, AudioFormat.Default);
                break;

            case EchoCancellerType.WebRtc:
                mEchoCancelFilter = new WebRtcFilter(mediaConfig.ExpectedAudioLatency,
                                                     mediaConfig.FilterLength,
                                                     audioFormat,
                                                     AudioFormat.Default,
                                                     true,
                                                     true,
                                                     false,
                                                     playedResampler, recordedResampler);
                break;

            case EchoCancellerType.TimeDomain:
                mEchoCancelFilter = new TimeDomainEchoCancelFilter(ExpectedLatency, FilterLength, audioFormat, AudioFormat.Default, playedResampler, recordedResampler);
                break;

            case EchoCancellerType.Speex:
                mEchoCancelFilter = new SpeexEchoCancelFilter(ExpectedLatency, FilterLength, audioFormat, AudioFormat.Default, playedResampler, recordedResampler);
                break;

            default:
                throw new Exception();
            }

            mPlayer   = GetPlayer();
            mRecorder = GetRecorder();

            mRecorder.StartRecording(SpeakerFrames, FinishTest);
            mPlayer.StartPlaying(SourceFrames, mRecorder.StopRecording);
        }
Ejemplo n.º 11
0
        private void btnPlay_Click(object sender, RoutedEventArgs e)
        {
            if (btnPlay.Content.ToString() == "Start")
            {
                try
                {
                    // Setup the parameters.
                    playedFrameIndex = 0;
                    int playbackDelay = Int32.Parse(txtPlaybackDelay.Text);
                    framePlaybackInterval = playbackDelay / 20;
                    expectedLatency       = Int32.Parse(txtExpectedLatency.Text);
                    filterLength          = Int32.Parse(txtTailSize.Text);

                    IAudioFilter playedResampler;
                    IAudioFilter recordedResampler;

                    // Decide whether to synchronize the audio or not.
                    if (chkSynchronize.IsChecked == true)
                    {
                        playedResampler = new ResampleFilter(AudioFormat.Default, AudioFormat.Default);

                        recordedResampler = new ResampleFilter(AudioFormat.Default, AudioFormat.Default);
                    }
                    else
                    {
                        playedResampler   = new NullAudioFilter(AudioFormat.Default.BytesPerFrame);
                        recordedResampler = new NullAudioFilter(AudioFormat.Default.BytesPerFrame);
                    }

                    // Initialize the echo canceller
                    playedResampler.InstanceName   = "EchoCanceller_played";
                    recordedResampler.InstanceName = "EchoCanceller_recorded";
                    echoCanceller = new SpeexEchoCancelFilter(expectedLatency, filterLength, AudioFormat.Default, AudioFormat.Default, playedResampler, recordedResampler);

                    if (chkPlaySilently.IsChecked == true)
                    {
                        PerformEchoCancellation();
                        return;
                    }
                    if (chkPlayWithAEC.IsChecked == true)
                    {
                        playbackFunction = GetNextAECFrame;
                    }
                    else
                    {
                        playbackFunction = GetNextRawFrame;
                    }

                    // Start playing the audio.
                    mediaElement.Play();
                    btnPlay.Content = "Stop";
                }
                catch (Exception ex)
                {
                    MessageBox.Show(ex.ToString());
                }
            }
            else
            {
                btnPlay.Content = "Start";
                mediaElement.Stop();
            }
        }
Ejemplo n.º 12
0
        /// <summary>
        /// Creates a new instance of the AudioContextFactory.
        /// </summary>
        /// <param name="rawAudioFormat">The format in which the audio coming directly from the microphone is recorded</param>
        /// <param name="playedAudioFormat">The format in which the audio will be played back on the far end (typically 16Khz)</param>
        /// <param name="config">The currently active MediaConfig instance</param>
        /// <param name="mediaEnvironment">An IMediaEnvironment instance which can be used to make decisions about which context to return, for instance,
        /// if the CPU is running too hot, or multiple people have joined the conference.</param>
        public AudioContextFactory(AudioFormat rawAudioFormat, AudioFormat playedAudioFormat, MediaConfig config, IMediaEnvironment mediaEnvironment)
        {
            RawAudioFormat    = rawAudioFormat;
            PlayedAudioFormat = playedAudioFormat;
            MediaConfig       = config;
            MediaEnvironment  = mediaEnvironment;

            // What we should use when there's only one other person, and CPU is OK:
            // 16Khz, Speex, WebRtc at full strength
            var directAudioFormat = new AudioFormat();
            var directResampler   = new ResampleFilter(rawAudioFormat, directAudioFormat);

            directResampler.InstanceName = "High Quality Direct Resampler";
            var directEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, directAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc);

            directEnhancer.InstanceName = "High";
            var directDtx     = new DtxFilter(directAudioFormat);
            var directEncoder = new SpeexEncoder(directAudioFormat);

            HighQualityDirectCtx             = new AudioContext(directAudioFormat, directResampler, directDtx, directEnhancer, directEncoder);
            HighQualityDirectCtx.Description = "High Quality Direct";

            // What we should use when there are multiple people (and hence the audio will need to be decoded and mixed), but CPU is OK:
            // 8Khz, G711, WebRtc at full strength
            var conferenceAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond);
            var conferenceResampler   = new ResampleFilter(rawAudioFormat, conferenceAudioFormat);

            conferenceResampler.InstanceName = "High Quality Conference Resampler";
            var conferenceEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, conferenceAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc);

            conferenceEnhancer.InstanceName = "Medium";
            var conferenceDtx     = new DtxFilter(conferenceAudioFormat);
            var conferenceEncoder = new G711MuLawEncoder(conferenceAudioFormat);

            HighQualityConferenceCtx             = new AudioContext(conferenceAudioFormat, conferenceResampler, conferenceDtx, conferenceEnhancer, conferenceEncoder);
            HighQualityConferenceCtx.Description = "High Quality Conference";

            // What we should use when one or more remote CPU's isn't keeping up (regardless of how many people are in the room):
            // 8Khz, G711, WebRtc at full-strength
            var remoteFallbackAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond);
            var remoteFallbackResampler   = new ResampleFilter(rawAudioFormat, remoteFallbackAudioFormat);

            remoteFallbackResampler.InstanceName = "Low Quality Remote CPU Resampler";
            var remoteFallbackEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, remoteFallbackAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc);

            remoteFallbackEnhancer.InstanceName = "Medium";
            var remoteFallbackDtx     = new DtxFilter(remoteFallbackAudioFormat);
            var remoteFallbackEncoder = new G711MuLawEncoder(remoteFallbackAudioFormat);

            LowQualityForRemoteCpuCtx             = new AudioContext(remoteFallbackAudioFormat, remoteFallbackResampler, remoteFallbackDtx, remoteFallbackEnhancer, remoteFallbackEncoder);
            LowQualityForRemoteCpuCtx.Description = "Fallback for remote high CPU";

            // What we should use when the local CPU isn't keeping up (regardless of how many people are in the room):
            // 8Khz, G711, WebRtc at half-strength
            var fallbackAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond);
            var fallbackResampler   = new ResampleFilter(rawAudioFormat, fallbackAudioFormat);

            fallbackResampler.InstanceName = "Low Quality Local CPU Resampler";
            var fallbackEnhancer = new WebRtcFilter(config.ExpectedAudioLatencyFallback, config.FilterLengthFallback, fallbackAudioFormat, playedAudioFormat, config.EnableAec, false, false);

            fallbackEnhancer.InstanceName = "Low";
            var fallbackDtx     = new DtxFilter(fallbackAudioFormat);
            var fallbackEncoder = new G711MuLawEncoder(fallbackAudioFormat);

            LowQualityForLocalCpuCtx             = new AudioContext(fallbackAudioFormat, fallbackResampler, fallbackDtx, fallbackEnhancer, fallbackEncoder);
            LowQualityForLocalCpuCtx.Description = "Fallback for local high CPU";

            _audioContextAdapter = new EnvironmentAdapter <AudioContext>(mediaEnvironment,
                                                                         HighQualityDirectCtx,
                                                                         HighQualityConferenceCtx,
                                                                         LowQualityForRemoteCpuCtx,
                                                                         LowQualityForLocalCpuCtx);
        }
Ejemplo n.º 13
0
 public static extern void FilterPixelate(int id, int pixelSize, bool useResample, ResampleFilter filter);
Ejemplo n.º 14
0
        public void CreateAudioContexts()
        {
            _captureSource.VideoCaptureDevice = null;
            if (_captureSource.AudioCaptureDevice == null)
            {
                _captureSource.AudioCaptureDevice = CaptureDeviceConfiguration.GetDefaultAudioCaptureDevice();
                if (_captureSource.AudioCaptureDevice == null)
                {
                    throw new InvalidOperationException("No suitable audio capture device was found");
                }
            }
            MediaDeviceConfig.SelectBestAudioFormat(_captureSource.AudioCaptureDevice);
            _captureSource.AudioCaptureDevice.AudioFrameSize = AudioFormat.Default.MillisecondsPerFrame;             // 20 milliseconds
            var desiredFormat  = _captureSource.AudioCaptureDevice.DesiredFormat;
            var rawAudioFormat = new AudioFormat(desiredFormat.SamplesPerSecond, AudioFormat.Default.MillisecondsPerFrame, desiredFormat.Channels, desiredFormat.BitsPerSample);

            var playedAudioFormat = new AudioFormat();
            var config            = MediaConfig.Default;

            // Absolutely bare minimum processing - doesn't process sound at all.
            var nullAudioFormat = new AudioFormat();
            var nullResampler   = new ResampleFilter(rawAudioFormat, nullAudioFormat);

            nullResampler.InstanceName = "Null resample filter";
            var nullEnhancer = new NullEchoCancelFilter(config.ExpectedAudioLatency, config.FilterLength, nullAudioFormat, playedAudioFormat);

            nullEnhancer.InstanceName = "Null";
            var nullDtx          = new NullAudioInplaceFilter();
            var nullEncoder      = new NullAudioEncoder();
            var nullAudioContext = new AudioContext(nullAudioFormat, nullResampler, nullDtx, nullEnhancer, nullEncoder);

            nullAudioContext.Description = "Null";

            // What we should use when there's only one other person, and CPU is OK:
            // 16Khz, Speex, WebRtc at full strength
            var directAudioFormat = new AudioFormat();
            var directResampler   = new ResampleFilter(rawAudioFormat, directAudioFormat);

            directResampler.InstanceName = "Direct high quality resample filter";
            var directEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, directAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc);

            directEnhancer.InstanceName = "High";
            var directDtx          = new DtxFilter(directAudioFormat);
            var directEncoder      = new SpeexEncoder(directAudioFormat);
            var directAudioContext = new AudioContext(directAudioFormat, directResampler, directDtx, directEnhancer, directEncoder);

            directAudioContext.Description = "High Quality Direct";

            // What we should use when there are multiple people (and hence the audio will need to be decoded and mixed), but CPU is OK:
            // 8Khz, G711, WebRtc at full strength
            var conferenceAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond);
            var conferenceResampler   = new ResampleFilter(rawAudioFormat, conferenceAudioFormat);

            conferenceResampler.InstanceName = "Conference high quality resample filter";
            var conferenceEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, conferenceAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc);

            conferenceEnhancer.InstanceName = "Medium";
            var conferenceDtx          = new DtxFilter(conferenceAudioFormat);
            var conferenceEncoder      = new G711MuLawEncoder(conferenceAudioFormat);
            var conferenceAudioContext = new AudioContext(conferenceAudioFormat, conferenceResampler, conferenceDtx, conferenceEnhancer, conferenceEncoder);

            conferenceAudioContext.Description = "High Quality Conference";

            // What we should use when one or more remote CPU's isn't keeping up (regardless of how many people are in the room):
            // 8Khz, G711, WebRtc at full-strength
            var remoteFallbackAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond);
            var remoteFallbackResampler   = new ResampleFilter(rawAudioFormat, remoteFallbackAudioFormat);

            remoteFallbackResampler.InstanceName = "Fallback remote high cpu resample filter";
            var remoteFallbackEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, remoteFallbackAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc);

            remoteFallbackEnhancer.InstanceName = "Medium";
            var remoteFallbackDtx          = new DtxFilter(remoteFallbackAudioFormat);
            var remoteFallbackEncoder      = new G711MuLawEncoder(remoteFallbackAudioFormat);
            var remoteFallbackAudioContext = new AudioContext(remoteFallbackAudioFormat, remoteFallbackResampler, remoteFallbackDtx, remoteFallbackEnhancer, remoteFallbackEncoder);

            remoteFallbackAudioContext.Description = "Fallback for remote high CPU";

            // What we should use when the local CPU isn't keeping up (regardless of how many people are in the room):
            // 8Khz, G711, WebRtc at half-strength
            var fallbackAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond);
            var fallbackResampler   = new ResampleFilter(rawAudioFormat, fallbackAudioFormat);

            fallbackResampler.InstanceName = "Fallback resample filter";
            var fallbackEnhancer = new WebRtcFilter(config.ExpectedAudioLatencyFallback, config.FilterLengthFallback, fallbackAudioFormat, playedAudioFormat, config.EnableAec, false, false);

            fallbackEnhancer.InstanceName = "Low";
            var fallbackDtx          = new DtxFilter(fallbackAudioFormat);
            var fallbackEncoder      = new G711MuLawEncoder(fallbackAudioFormat);
            var fallbackAudioContext = new AudioContext(fallbackAudioFormat, fallbackResampler, fallbackDtx, fallbackEnhancer, fallbackEncoder);

            fallbackAudioContext.Description = "Fallback for local high CPU";

            AudioContextCollection.Clear();
            AudioContextCollection.Add(nullAudioContext);
            AudioContextCollection.Add(directAudioContext);
            AudioContextCollection.Add(conferenceAudioContext);
            AudioContextCollection.Add(remoteFallbackAudioContext);
            AudioContextCollection.Add(fallbackAudioContext);

            CurrentAudioContext = nullAudioContext;
        }