Esempio n. 1
0
        public MediaController(MediaConfig config, AudioFormat playedAudioFormat, MediaStatistics mediaStats, IMediaEnvironment mediaEnvironment, IMediaConnection mediaConnection, IVideoQualityController videoQualityController)
        {
            // Initialize the class variables.
            _mediaEnvironment                  = mediaEnvironment;
            MediaConfig                        = config;
            MediaStats                         = mediaStats;
            MediaConnection                    = mediaConnection;
            VideoQualityController             = videoQualityController;
            MediaConnection.AudioPacketHandler = HandleAudioPacket;
            MediaConnection.VideoPacketHandler = HandleVideoPacket;

            Logger         = new MediaControllerLogger(VideoQualityController, MediaStats);
            _localSsrcId   = config.LocalSsrcId;
            RemoteSessions = new Dictionary <ushort, VideoThreadData>();
            VideoQualityController.RemoteSessions = RemoteSessions;
            PlayedAudioFormat = playedAudioFormat;

            _silentBytes        = new byte[PlayedAudioFormat.BytesPerFrame];
            _decodedFrameBuffer = new short[PlayedAudioFormat.SamplesPerFrame * 10];             // Make room for 10 frames.

            _codecFactory = config.CodecFactory;
            _videoEncoder = _codecFactory.GetVideoEncoder(VideoQualityController, MediaStats);

            // Instantiate the audio jitter class
            _audioJitter = new AudioJitterQueue(_codecFactory, VideoQualityController, MediaStats);
            _audioJitter.CodecTypeChanged += audioJitter_CodecTypeChanged;

            _audioDecodeBuffer = new byte[VideoConstants.MaxPayloadSize];
            _audioSendBuffer   = new ByteStream(RtpPacketData.DataPacketMaxLength);

            // Spin up the various audio and video encoding threads.
            // On multiprocessor machines, these can spread the load, but even on single-processor machines it helps a great deal
            // if the various audio and video sinks can return immediately.
            _audioEncodeResetEvent  = new ManualResetEvent(false);
            _audioEncodeThread      = new Thread(TransmitAudio);
            _audioEncodeThread.Name = "MediaController.TransmitAudio";
            _audioEncodeThread.Start();
            _videoEncodeResetEvent    = new ManualResetEvent(false);
            _videoTransmitThread      = new Thread(TransmitVideo);
            _videoTransmitThread.Name = "MediaController.TransmitVideo";
            _videoTransmitThread.Start();

            // Create the object pools that will help us reduce time spent in garbage collection.
            _videoBufferPool  = new ObjectPool <ByteStream>(() => new ByteStream(VideoConstants.MaxPayloadSize * 2), bs => bs.Reset());
            _packetBufferPool = new ObjectPool <ByteStream>(() => new ByteStream(RtpPacketData.DataPacketMaxLength), bs => bs.Reset());
            _videoChunkPool   = new ObjectPool <Chunk>(() => new Chunk {
                Payload = new ByteStream(VideoConstants.MaxPayloadSize * 2)
            }, chunk => { chunk.SsrcId = 0; chunk.Payload.Reset(); });

            AudioStats = new ObservableCollection <AudioStatistics>();

            _speakerStatistics    = new AudioStatistics("Volume:Sent to Speaker", MediaStats);
            _microphoneStatistics = new AudioStatistics("Volume:Received from Microphone", MediaStats);
            _cancelledStatistics  = new AudioStatistics("Volume:Echo Cancelled", MediaStats);

            AudioStats.Add(_speakerStatistics);
            AudioStats.Add(_microphoneStatistics);
            AudioStats.Add(_cancelledStatistics);
        }
Esempio n. 2
0
 public AudioSinkAdapter(CaptureSource captureSource, IAudioController audioController, MediaConfig mediaConfig, IMediaEnvironment mediaEnvironment, AudioFormat playedAudioFormat)
 {
     ClientLogger.Debug(GetType().Name + " created.");
     CaptureSource      = captureSource;
     AudioController    = audioController;
     _mediaConfig       = mediaConfig;
     _mediaEnvironment  = mediaEnvironment;
     _playedAudioFormat = playedAudioFormat;
     _logger            = new AudioSinkAdapterLogger();
 }
 public FromFileAudioSinkAdapter(
     CaptureSource captureSource,
     IAudioController audioController,
     MediaConfig mediaConfig,
     IMediaEnvironment mediaEnvironment,
     AudioFormat playedAudioFormat,
     List <byte[]> testFrames)
     : base(captureSource, audioController, mediaConfig, mediaEnvironment, playedAudioFormat)
 {
     _testFrames = testFrames;
 }
Esempio n. 4
0
 public MediaSinkFactory(IAudioController audioController,
                         IVideoController videoController,
                         MediaConfig mediaConfig,
                         IMediaEnvironment mediaEnvironment,
                         IVideoQualityController videoQualityController)
 {
     _audioController        = audioController;
     _videoController        = videoController;
     _mediaConfig            = mediaConfig;
     _mediaEnvironment       = mediaEnvironment;
     _videoQualityController = videoQualityController;
 }
 public TimingAudioSinkAdapter(
     AudioContext audioContext,
     CaptureSource captureSource,
     IAudioController audioController,
     MediaConfig mediaConfig,
     IMediaEnvironment mediaEnvironment,
     AudioFormat playedAudioFormat)
     : base(captureSource, audioController, mediaConfig, mediaEnvironment, playedAudioFormat)
 {
     _audioContext = audioContext;
     ClientLogger.Debug(GetType().Name + " created.");
 }
        public SingleAudioContextFactory(AudioContext audioContext, AudioFormat rawAudioFormat, AudioFormat playedAudioFormat, MediaConfig mediaConfig, IMediaEnvironment mediaEnvironment)
        {
            RawAudioFormat    = rawAudioFormat;
            PlayedAudioFormat = playedAudioFormat;
            MediaConfig       = mediaConfig;
            MediaEnvironment  = mediaEnvironment;

            // Hack!!!! We need to make a copy of the audioContext, but with a few tweaks.
            // When the audio context is first created, we don't know what the rawAudioFormat will be,
            // but it should be accurate by this point, so we need to recreate the AudioContext.
            var resampler = new ResampleFilter(rawAudioFormat, playedAudioFormat);

            resampler.InstanceName    = audioContext.Resampler.InstanceName;
            _audioContext             = new AudioContext(playedAudioFormat, resampler, audioContext.DtxFilter, audioContext.SpeechEnhancementStack, audioContext.Encoder);
            _audioContext.Description = audioContext.Description;
        }
Esempio n. 7
0
        /// <summary>
        /// Creates a new instance of the EnvironmentAdapter class
        /// </summary>
        /// <param name="mediaEnvironment">The IMediaEnvironment instance that the EnvironmentAdapter can use to gather information about its environment</param>
        /// <param name="directInstance">The instance of type T for when the CPU is low and there's only one remote session</param>
        /// <param name="conferenceInstance">The instance of type T for when the CPU is low and there are multiple remote sessions</param>
        /// <param name="remoteFallbackInstance">The instance of type T that should be returned when one or more remote CPU's is running too hot</param>
        /// <param name="fallbackInstance">The instance of type T that should be returned when the local CPU is running too hot</param>
        public EnvironmentAdapter(IMediaEnvironment mediaEnvironment,
                                  T directInstance,
                                  T conferenceInstance,
                                  T remoteFallbackInstance,
                                  T fallbackInstance)
        {
            DirectInstance         = directInstance;
            ConferenceInstance     = conferenceInstance;
            RemoteFallbackInstance = remoteFallbackInstance;
            FallbackInstance       = fallbackInstance;

            currentInstance           = directInstance;
            this.mediaEnvironment     = mediaEnvironment;
            MaxRecommendedLoad        = 70;
            MaxSafeLoad               = 60;
            MinimumTimeUntilDowngrade = TimeSpan.FromSeconds(5);
            MinimumTimeUntilUpgrade   = TimeSpan.FromSeconds(15);
        }
        /// <summary>
        /// Creates a new instance of the AudioContextFactory.
        /// </summary>
        /// <param name="rawAudioFormat">The format in which the audio coming directly from the microphone is recorded</param>
        /// <param name="playedAudioFormat">The format in which the audio will be played back on the far end (typically 16Khz)</param>
        /// <param name="config">The currently active MediaConfig instance</param>
        /// <param name="mediaEnvironment">An IMediaEnvironment instance which can be used to make decisions about which context to return, for instance,
        /// if the CPU is running too hot, or multiple people have joined the conference.</param>
        public AudioContextFactory(AudioFormat rawAudioFormat, AudioFormat playedAudioFormat, MediaConfig config, IMediaEnvironment mediaEnvironment)
        {
            RawAudioFormat    = rawAudioFormat;
            PlayedAudioFormat = playedAudioFormat;
            MediaConfig       = config;
            MediaEnvironment  = mediaEnvironment;

            // What we should use when there's only one other person, and CPU is OK:
            // 16Khz, Speex, WebRtc at full strength
            var directAudioFormat = new AudioFormat();
            var directResampler   = new ResampleFilter(rawAudioFormat, directAudioFormat);

            directResampler.InstanceName = "High Quality Direct Resampler";
            var directEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, directAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc);

            directEnhancer.InstanceName = "High";
            var directDtx     = new DtxFilter(directAudioFormat);
            var directEncoder = new SpeexEncoder(directAudioFormat);

            HighQualityDirectCtx             = new AudioContext(directAudioFormat, directResampler, directDtx, directEnhancer, directEncoder);
            HighQualityDirectCtx.Description = "High Quality Direct";

            // What we should use when there are multiple people (and hence the audio will need to be decoded and mixed), but CPU is OK:
            // 8Khz, G711, WebRtc at full strength
            var conferenceAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond);
            var conferenceResampler   = new ResampleFilter(rawAudioFormat, conferenceAudioFormat);

            conferenceResampler.InstanceName = "High Quality Conference Resampler";
            var conferenceEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, conferenceAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc);

            conferenceEnhancer.InstanceName = "Medium";
            var conferenceDtx     = new DtxFilter(conferenceAudioFormat);
            var conferenceEncoder = new G711MuLawEncoder(conferenceAudioFormat);

            HighQualityConferenceCtx             = new AudioContext(conferenceAudioFormat, conferenceResampler, conferenceDtx, conferenceEnhancer, conferenceEncoder);
            HighQualityConferenceCtx.Description = "High Quality Conference";

            // What we should use when one or more remote CPU's isn't keeping up (regardless of how many people are in the room):
            // 8Khz, G711, WebRtc at full-strength
            var remoteFallbackAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond);
            var remoteFallbackResampler   = new ResampleFilter(rawAudioFormat, remoteFallbackAudioFormat);

            remoteFallbackResampler.InstanceName = "Low Quality Remote CPU Resampler";
            var remoteFallbackEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, remoteFallbackAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc);

            remoteFallbackEnhancer.InstanceName = "Medium";
            var remoteFallbackDtx     = new DtxFilter(remoteFallbackAudioFormat);
            var remoteFallbackEncoder = new G711MuLawEncoder(remoteFallbackAudioFormat);

            LowQualityForRemoteCpuCtx             = new AudioContext(remoteFallbackAudioFormat, remoteFallbackResampler, remoteFallbackDtx, remoteFallbackEnhancer, remoteFallbackEncoder);
            LowQualityForRemoteCpuCtx.Description = "Fallback for remote high CPU";

            // What we should use when the local CPU isn't keeping up (regardless of how many people are in the room):
            // 8Khz, G711, WebRtc at half-strength
            var fallbackAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond);
            var fallbackResampler   = new ResampleFilter(rawAudioFormat, fallbackAudioFormat);

            fallbackResampler.InstanceName = "Low Quality Local CPU Resampler";
            var fallbackEnhancer = new WebRtcFilter(config.ExpectedAudioLatencyFallback, config.FilterLengthFallback, fallbackAudioFormat, playedAudioFormat, config.EnableAec, false, false);

            fallbackEnhancer.InstanceName = "Low";
            var fallbackDtx     = new DtxFilter(fallbackAudioFormat);
            var fallbackEncoder = new G711MuLawEncoder(fallbackAudioFormat);

            LowQualityForLocalCpuCtx             = new AudioContext(fallbackAudioFormat, fallbackResampler, fallbackDtx, fallbackEnhancer, fallbackEncoder);
            LowQualityForLocalCpuCtx.Description = "Fallback for local high CPU";

            _audioContextAdapter = new EnvironmentAdapter <AudioContext>(mediaEnvironment,
                                                                         HighQualityDirectCtx,
                                                                         HighQualityConferenceCtx,
                                                                         LowQualityForRemoteCpuCtx,
                                                                         LowQualityForLocalCpuCtx);
        }
 /// <summary>
 /// This overrides the normal audio context retrieval process. The TimingAudioContextFactory returns only the specified audio context, so that we can test it.
 /// </summary>
 protected override IAudioContextFactory GetAudioContextFactory(AudioFormat rawAudioFormat, AudioFormat playedAudioFormat, MediaConfig config, IMediaEnvironment mediaEnvironment)
 {
     ClientLogger.Debug("TimingAudioContextFactory created.");
     return(new SingleAudioContextFactory(_audioContext, rawAudioFormat, playedAudioFormat, config, mediaEnvironment));
 }
Esempio n. 10
0
 protected virtual IAudioContextFactory GetAudioContextFactory(AudioFormat rawAudioFormat, AudioFormat playedAudioFormat, MediaConfig config, IMediaEnvironment mediaEnvironment)
 {
     return(new AudioContextFactory(rawAudioFormat, playedAudioFormat, config, mediaEnvironment));
 }