Exemplo n.º 1
0
        public MediaController(MediaConfig config, AudioFormat playedAudioFormat, MediaStatistics mediaStats, IMediaEnvironment mediaEnvironment, IMediaConnection mediaConnection, IVideoQualityController videoQualityController)
        {
            // Initialize the class variables.
            _mediaEnvironment                  = mediaEnvironment;
            MediaConfig                        = config;
            MediaStats                         = mediaStats;
            MediaConnection                    = mediaConnection;
            VideoQualityController             = videoQualityController;
            MediaConnection.AudioPacketHandler = HandleAudioPacket;
            MediaConnection.VideoPacketHandler = HandleVideoPacket;

            Logger         = new MediaControllerLogger(VideoQualityController, MediaStats);
            _localSsrcId   = config.LocalSsrcId;
            RemoteSessions = new Dictionary <ushort, VideoThreadData>();
            VideoQualityController.RemoteSessions = RemoteSessions;
            PlayedAudioFormat = playedAudioFormat;

            _silentBytes        = new byte[PlayedAudioFormat.BytesPerFrame];
            _decodedFrameBuffer = new short[PlayedAudioFormat.SamplesPerFrame * 10];             // Make room for 10 frames.

            _codecFactory = config.CodecFactory;
            _videoEncoder = _codecFactory.GetVideoEncoder(VideoQualityController, MediaStats);

            // Instantiate the audio jitter class
            _audioJitter = new AudioJitterQueue(_codecFactory, VideoQualityController, MediaStats);
            _audioJitter.CodecTypeChanged += audioJitter_CodecTypeChanged;

            _audioDecodeBuffer = new byte[VideoConstants.MaxPayloadSize];
            _audioSendBuffer   = new ByteStream(RtpPacketData.DataPacketMaxLength);

            // Spin up the various audio and video encoding threads.
            // On multiprocessor machines, these can spread the load, but even on single-processor machines it helps a great deal
            // if the various audio and video sinks can return immediately.
            _audioEncodeResetEvent  = new ManualResetEvent(false);
            _audioEncodeThread      = new Thread(TransmitAudio);
            _audioEncodeThread.Name = "MediaController.TransmitAudio";
            _audioEncodeThread.Start();
            _videoEncodeResetEvent    = new ManualResetEvent(false);
            _videoTransmitThread      = new Thread(TransmitVideo);
            _videoTransmitThread.Name = "MediaController.TransmitVideo";
            _videoTransmitThread.Start();

            // Create the object pools that will help us reduce time spent in garbage collection.
            _videoBufferPool  = new ObjectPool <ByteStream>(() => new ByteStream(VideoConstants.MaxPayloadSize * 2), bs => bs.Reset());
            _packetBufferPool = new ObjectPool <ByteStream>(() => new ByteStream(RtpPacketData.DataPacketMaxLength), bs => bs.Reset());
            _videoChunkPool   = new ObjectPool <Chunk>(() => new Chunk {
                Payload = new ByteStream(VideoConstants.MaxPayloadSize * 2)
            }, chunk => { chunk.SsrcId = 0; chunk.Payload.Reset(); });

            AudioStats = new ObservableCollection <AudioStatistics>();

            _speakerStatistics    = new AudioStatistics("Volume:Sent to Speaker", MediaStats);
            _microphoneStatistics = new AudioStatistics("Volume:Received from Microphone", MediaStats);
            _cancelledStatistics  = new AudioStatistics("Volume:Echo Cancelled", MediaStats);

            AudioStats.Add(_speakerStatistics);
            AudioStats.Add(_microphoneStatistics);
            AudioStats.Add(_cancelledStatistics);
        }
Exemplo n.º 2
0
 public AudioSinkAdapter(CaptureSource captureSource, IAudioController audioController, MediaConfig mediaConfig, IMediaEnvironment mediaEnvironment, AudioFormat playedAudioFormat)
 {
     ClientLogger.Debug(GetType().Name + " created.");
     CaptureSource      = captureSource;
     AudioController    = audioController;
     _mediaConfig       = mediaConfig;
     _mediaEnvironment  = mediaEnvironment;
     _playedAudioFormat = playedAudioFormat;
     _logger            = new AudioSinkAdapterLogger();
 }
Exemplo n.º 3
0
 public RtpMediaConnection(MediaConfig config, MediaStatistics mediaStats)
 {
     _controlClient         = new NetClient(config.MediaServerHost, config.MediaServerControlPort);
     _rtpClient             = new NetClient(config.MediaServerHost, config.MediaServerStreamingPort, mediaStats);
     _rtpConnect            = new RtpPacketConnect();
     _rtpData               = new RtpPacketData();
     _rtpConnect.SsrcId     = config.LocalSsrcId;
     _dataReceiveBuffer     = new ByteStream(RtpPacketData.DataPacketMaxLength * 10);         // Leave room for at least 10 packets.
     _rtpPacketDataListPool = new ObjectPool <List <RtpPacketData> >(() => new List <RtpPacketData>(), list => list.Clear());
     _rtpPacketDataPool     = new ObjectPool <RtpPacketData>(() => new RtpPacketData());      // No reset action needed
     _packetBufferPool      = new ObjectPool <ByteStream>(() => new ByteStream(RtpPacketData.DataPacketMaxLength), bs => bs.Reset());
 }
Exemplo n.º 4
0
 public MediaSinkFactory(IAudioController audioController,
                         IVideoController videoController,
                         MediaConfig mediaConfig,
                         IMediaEnvironment mediaEnvironment,
                         IVideoQualityController videoQualityController)
 {
     _audioController        = audioController;
     _videoController        = videoController;
     _mediaConfig            = mediaConfig;
     _mediaEnvironment       = mediaEnvironment;
     _videoQualityController = videoQualityController;
 }
Exemplo n.º 5
0
        /// <summary>
        /// Creates a new instance of the AudioContextFactory.
        /// </summary>
        /// <param name="rawAudioFormat">The format in which the audio coming directly from the microphone is recorded</param>
        /// <param name="playedAudioFormat">The format in which the audio will be played back on the far end (typically 16Khz)</param>
        /// <param name="config">The currently active MediaConfig instance</param>
        /// <param name="mediaEnvironment">An IMediaEnvironment instance which can be used to make decisions about which context to return, for instance,
        /// if the CPU is running too hot, or multiple people have joined the conference.</param>
        public AudioContextFactory(AudioFormat rawAudioFormat, AudioFormat playedAudioFormat, MediaConfig config, IMediaEnvironment mediaEnvironment)
        {
            RawAudioFormat    = rawAudioFormat;
            PlayedAudioFormat = playedAudioFormat;
            MediaConfig       = config;
            MediaEnvironment  = mediaEnvironment;

            // What we should use when there's only one other person, and CPU is OK:
            // 16Khz, Speex, WebRtc at full strength
            var directAudioFormat = new AudioFormat();
            var directResampler   = new ResampleFilter(rawAudioFormat, directAudioFormat);

            directResampler.InstanceName = "High Quality Direct Resampler";
            var directEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, directAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc);

            directEnhancer.InstanceName = "High";
            var directDtx     = new DtxFilter(directAudioFormat);
            var directEncoder = new SpeexEncoder(directAudioFormat);

            HighQualityDirectCtx             = new AudioContext(directAudioFormat, directResampler, directDtx, directEnhancer, directEncoder);
            HighQualityDirectCtx.Description = "High Quality Direct";

            // What we should use when there are multiple people (and hence the audio will need to be decoded and mixed), but CPU is OK:
            // 8Khz, G711, WebRtc at full strength
            var conferenceAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond);
            var conferenceResampler   = new ResampleFilter(rawAudioFormat, conferenceAudioFormat);

            conferenceResampler.InstanceName = "High Quality Conference Resampler";
            var conferenceEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, conferenceAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc);

            conferenceEnhancer.InstanceName = "Medium";
            var conferenceDtx     = new DtxFilter(conferenceAudioFormat);
            var conferenceEncoder = new G711MuLawEncoder(conferenceAudioFormat);

            HighQualityConferenceCtx             = new AudioContext(conferenceAudioFormat, conferenceResampler, conferenceDtx, conferenceEnhancer, conferenceEncoder);
            HighQualityConferenceCtx.Description = "High Quality Conference";

            // What we should use when one or more remote CPU's isn't keeping up (regardless of how many people are in the room):
            // 8Khz, G711, WebRtc at full-strength
            var remoteFallbackAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond);
            var remoteFallbackResampler   = new ResampleFilter(rawAudioFormat, remoteFallbackAudioFormat);

            remoteFallbackResampler.InstanceName = "Low Quality Remote CPU Resampler";
            var remoteFallbackEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, remoteFallbackAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc);

            remoteFallbackEnhancer.InstanceName = "Medium";
            var remoteFallbackDtx     = new DtxFilter(remoteFallbackAudioFormat);
            var remoteFallbackEncoder = new G711MuLawEncoder(remoteFallbackAudioFormat);

            LowQualityForRemoteCpuCtx             = new AudioContext(remoteFallbackAudioFormat, remoteFallbackResampler, remoteFallbackDtx, remoteFallbackEnhancer, remoteFallbackEncoder);
            LowQualityForRemoteCpuCtx.Description = "Fallback for remote high CPU";

            // What we should use when the local CPU isn't keeping up (regardless of how many people are in the room):
            // 8Khz, G711, WebRtc at half-strength
            var fallbackAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond);
            var fallbackResampler   = new ResampleFilter(rawAudioFormat, fallbackAudioFormat);

            fallbackResampler.InstanceName = "Low Quality Local CPU Resampler";
            var fallbackEnhancer = new WebRtcFilter(config.ExpectedAudioLatencyFallback, config.FilterLengthFallback, fallbackAudioFormat, playedAudioFormat, config.EnableAec, false, false);

            fallbackEnhancer.InstanceName = "Low";
            var fallbackDtx     = new DtxFilter(fallbackAudioFormat);
            var fallbackEncoder = new G711MuLawEncoder(fallbackAudioFormat);

            LowQualityForLocalCpuCtx             = new AudioContext(fallbackAudioFormat, fallbackResampler, fallbackDtx, fallbackEnhancer, fallbackEncoder);
            LowQualityForLocalCpuCtx.Description = "Fallback for local high CPU";

            _audioContextAdapter = new EnvironmentAdapter <AudioContext>(mediaEnvironment,
                                                                         HighQualityDirectCtx,
                                                                         HighQualityConferenceCtx,
                                                                         LowQualityForRemoteCpuCtx,
                                                                         LowQualityForLocalCpuCtx);
        }
Exemplo n.º 6
0
 protected virtual IAudioContextFactory GetAudioContextFactory(AudioFormat rawAudioFormat, AudioFormat playedAudioFormat, MediaConfig config, IMediaEnvironment mediaEnvironment)
 {
     return(new AudioContextFactory(rawAudioFormat, playedAudioFormat, config, mediaEnvironment));
 }