Ejemplo n.º 1
0
        private void InitializeCaptureSource()
        {
            if (captureSource == null)
            {
                mediaElement      = new MediaElement();
                audioStreamSource = new TestAudioStreamSource(this);
                mediaElement.SetSource(audioStreamSource);

                // Set the audio properties.
                captureSource = new CaptureSource();
                captureSource.AudioCaptureDevice = CaptureDeviceConfiguration.GetDefaultAudioCaptureDevice();
                if (captureSource.AudioCaptureDevice != null)
                {
                    MediaDeviceConfig.SelectBestAudioFormat(captureSource.AudioCaptureDevice);
                    if (captureSource.AudioCaptureDevice.DesiredFormat != null)
                    {
                        captureSource.AudioCaptureDevice.AudioFrameSize = AudioFormat.Default.MillisecondsPerFrame; // 20 milliseconds
                        audioSink = new TestAudioSinkAdapter(captureSource);
                        audioSink.ProcessedFrameAvailable += audioSink_FrameArrived;
                        ClientLogger.Debug("CaptureSource initialized.");
                    }
                    else
                    {
                        ClientLogger.Debug("No suitable audio format was found.");
                    }
                }
                else
                {
                    // Do something more here eventually, once we figure out what the user experience should be.
                    ClientLogger.Debug("No audio capture device was found.");
                }
            }
        }
Ejemplo n.º 2
0
        private void InitializeCaptureSource()
        {
            if (_captureSource != null)
            {
                return;
            }

            // Setup the capture source (for recording audio)
            _captureSource = new CaptureSource();
            _captureSource.AudioCaptureDevice = CaptureDeviceConfiguration.GetDefaultAudioCaptureDevice();
            if (_captureSource.AudioCaptureDevice != null)
            {
                MediaDeviceConfig.SelectBestAudioFormat(_captureSource.AudioCaptureDevice);
                if (_captureSource.AudioCaptureDevice.DesiredFormat != null)
                {
                    var mediaStats       = new MediaStatistics();
                    var mediaEnvironment = new MediaEnvironment(mediaStats);
                    _captureSource.AudioCaptureDevice.AudioFrameSize = AudioFormat.Default.MillisecondsPerFrame;                     // 20 milliseconds
                    _audioSinkAdapter  = new MultipleControllerAudioSinkAdapter(GetMediaConfig(), _captureSource, 2000);
                    _mediaStreamSource = new MultipleControllerAudioMediaStreamSource(2000);
                    ClientLogger.Debug("CaptureSource initialized.");
                }
                else
                {
                    ClientLogger.Debug("No suitable audio format was found.");
                }
            }
            else
            {
                // Do something more here eventually, once we figure out what the user experience should be.
                ClientLogger.Debug("No audio capture device was found.");
            }
        }
Ejemplo n.º 3
0
 private void InitializeCaptureSource()
 {
     if (_captureSource == null)
     {
         // Setup the capture source (for recording audio)
         _captureSource = new CaptureSource();
         _captureSource.AudioCaptureDevice = CaptureDeviceConfiguration.GetDefaultAudioCaptureDevice();
         if (_captureSource.AudioCaptureDevice != null)
         {
             MediaDeviceConfig.SelectBestAudioFormat(_captureSource.AudioCaptureDevice);
             if (_captureSource.AudioCaptureDevice.DesiredFormat != null)
             {
                 _captureSource.AudioCaptureDevice.AudioFrameSize = AudioFormat.Default.MillisecondsPerFrame;                         // 20 milliseconds
                 _audioSink = new AudioSinkAdapter(_captureSource, null, MediaConfig.Default, new TestMediaEnvironment(), AudioFormat.Default);
                 _recorder  = new RecorderBase(_captureSource, _audioSink, speakersAudioVisualizer);
                 chkSynchronizeRecording.DataContext = _audioSink;
                 ClientLogger.Debug("CaptureSource initialized.");
             }
             else
             {
                 ClientLogger.Debug("No suitable audio format was found.");
             }
             panelMicrophone.DataContext = _captureSource;
         }
         else
         {
             // Do something more here eventually, once we figure out what the user experience should be.
             ClientLogger.Debug("No audio capture device was found.");
         }
     }
 }
Ejemplo n.º 4
0
        private void InitializeCaptureSource()
        {
            if (captureSource != null)
            {
                captureSource.Stop();
            }
            captureSource = new CaptureSource();
            captureSource.AudioCaptureDevice = (AudioCaptureDevice)listBoxAudioSources.SelectedItem;

            MediaDeviceConfig.SelectBestAudioFormat(captureSource.AudioCaptureDevice);

            captureSource.AudioCaptureDevice.DesiredFormat = captureSource.AudioCaptureDevice.SupportedFormats
                                                             .First(format => format.BitsPerSample == AudioConstants.BitsPerSample &&
                                                                    format.WaveFormat == WaveFormatType.Pcm &&
                                                                    format.Channels == 1 &&
                                                                    format.SamplesPerSecond == sampleRate);
            captureSource.AudioCaptureDevice.AudioFrameSize = AudioFormat.Default.MillisecondsPerFrame;             // 20 milliseconds

            audioSink = new TestAudioSinkAdapter(captureSource, new NullAudioController());
            audioSink.RawFrameAvailable       += audioSink_RawFrameAvailable;
            audioSink.ProcessedFrameAvailable += audioSink_FrameArrived;

            ClientLogger.Debug("Checking device access.");
            if (CaptureDeviceConfiguration.AllowedDeviceAccess || CaptureDeviceConfiguration.RequestDeviceAccess())
            {
                savedFramesForDebug = new List <byte[]>();
                captureSource.Start();
                ClientLogger.Debug("CaptureSource started.");
            }
        }
Ejemplo n.º 5
0
        private void InitializeCaptureSource()
        {
            ClientLogger.Debug("AudioLoopbackTest:InitializeCaptureSource()");
            if (_captureSource != null)
            {
                _captureSource.Stop();
            }
            _captureSource = new CaptureSource();
            _captureSource.AudioCaptureDevice = (AudioCaptureDevice)lstAudioInputDevices.SelectedItem;
            _captureSource.VideoCaptureDevice = (VideoCaptureDevice)lstVideoInputDevices.SelectedItem;
            _mediaElement      = new MediaElement();
            _audioStreamSource = new TestAudioStreamSource(this);


            // Set the audio properties.
            if (_captureSource.AudioCaptureDevice != null)
            {
                MediaDeviceConfig.SelectBestAudioFormat(_captureSource.AudioCaptureDevice);
                if (_captureSource.AudioCaptureDevice.DesiredFormat != null)
                {
                    _captureSource.AudioCaptureDevice.AudioFrameSize = _audioFormat.MillisecondsPerFrame;                     // 20 milliseconds
                    _audioSink = new TestAudioSinkAdapter(_captureSource, new NullAudioController());
                    _audioSink.RawFrameAvailable       += audioSink_RawFrameAvailable;
                    _audioSink.ProcessedFrameAvailable += audioSink_FrameArrived;
                    ClientLogger.Debug("CaptureSource initialized.");
                }
                else
                {
                    ClientLogger.Debug("No suitable audio format was found.");
                }

                ClientLogger.Debug("Checking device access.");
                if (CaptureDeviceConfiguration.AllowedDeviceAccess || CaptureDeviceConfiguration.RequestDeviceAccess())
                {
                    ClientLogger.Debug("AudioLoopbackTest CaptureSource starting with audio device {0}, video device {1}.",
                                       _captureSource.AudioCaptureDevice.FriendlyName, _captureSource.VideoCaptureDevice.FriendlyName);
                    _captureSource.Start();
                    ClientLogger.Debug("CaptureSource started; setting media element source.");
                    _mediaElement.SetSource(_audioStreamSource);
                    ClientLogger.Debug("MediaElement source set; telling media element to play.");
                    _mediaElement.Play();
                }
            }
            else
            {
                // Do something more here eventually, once we figure out what the user experience should be.
                ClientLogger.Debug("No audio capture device was found.");
            }
        }
Ejemplo n.º 6
0
        public void CreateAudioContexts()
        {
            _captureSource.VideoCaptureDevice = null;
            if (_captureSource.AudioCaptureDevice == null)
            {
                _captureSource.AudioCaptureDevice = CaptureDeviceConfiguration.GetDefaultAudioCaptureDevice();
                if (_captureSource.AudioCaptureDevice == null)
                {
                    throw new InvalidOperationException("No suitable audio capture device was found");
                }
            }
            MediaDeviceConfig.SelectBestAudioFormat(_captureSource.AudioCaptureDevice);
            _captureSource.AudioCaptureDevice.AudioFrameSize = AudioFormat.Default.MillisecondsPerFrame;             // 20 milliseconds
            var desiredFormat  = _captureSource.AudioCaptureDevice.DesiredFormat;
            var rawAudioFormat = new AudioFormat(desiredFormat.SamplesPerSecond, AudioFormat.Default.MillisecondsPerFrame, desiredFormat.Channels, desiredFormat.BitsPerSample);

            var playedAudioFormat = new AudioFormat();
            var config            = MediaConfig.Default;

            // Absolutely bare minimum processing - doesn't process sound at all.
            var nullAudioFormat = new AudioFormat();
            var nullResampler   = new ResampleFilter(rawAudioFormat, nullAudioFormat);

            nullResampler.InstanceName = "Null resample filter";
            var nullEnhancer = new NullEchoCancelFilter(config.ExpectedAudioLatency, config.FilterLength, nullAudioFormat, playedAudioFormat);

            nullEnhancer.InstanceName = "Null";
            var nullDtx          = new NullAudioInplaceFilter();
            var nullEncoder      = new NullAudioEncoder();
            var nullAudioContext = new AudioContext(nullAudioFormat, nullResampler, nullDtx, nullEnhancer, nullEncoder);

            nullAudioContext.Description = "Null";

            // What we should use when there's only one other person, and CPU is OK:
            // 16Khz, Speex, WebRtc at full strength
            var directAudioFormat = new AudioFormat();
            var directResampler   = new ResampleFilter(rawAudioFormat, directAudioFormat);

            directResampler.InstanceName = "Direct high quality resample filter";
            var directEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, directAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc);

            directEnhancer.InstanceName = "High";
            var directDtx          = new DtxFilter(directAudioFormat);
            var directEncoder      = new SpeexEncoder(directAudioFormat);
            var directAudioContext = new AudioContext(directAudioFormat, directResampler, directDtx, directEnhancer, directEncoder);

            directAudioContext.Description = "High Quality Direct";

            // What we should use when there are multiple people (and hence the audio will need to be decoded and mixed), but CPU is OK:
            // 8Khz, G711, WebRtc at full strength
            var conferenceAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond);
            var conferenceResampler   = new ResampleFilter(rawAudioFormat, conferenceAudioFormat);

            conferenceResampler.InstanceName = "Conference high quality resample filter";
            var conferenceEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, conferenceAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc);

            conferenceEnhancer.InstanceName = "Medium";
            var conferenceDtx          = new DtxFilter(conferenceAudioFormat);
            var conferenceEncoder      = new G711MuLawEncoder(conferenceAudioFormat);
            var conferenceAudioContext = new AudioContext(conferenceAudioFormat, conferenceResampler, conferenceDtx, conferenceEnhancer, conferenceEncoder);

            conferenceAudioContext.Description = "High Quality Conference";

            // What we should use when one or more remote CPU's isn't keeping up (regardless of how many people are in the room):
            // 8Khz, G711, WebRtc at full-strength
            var remoteFallbackAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond);
            var remoteFallbackResampler   = new ResampleFilter(rawAudioFormat, remoteFallbackAudioFormat);

            remoteFallbackResampler.InstanceName = "Fallback remote high cpu resample filter";
            var remoteFallbackEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, remoteFallbackAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc);

            remoteFallbackEnhancer.InstanceName = "Medium";
            var remoteFallbackDtx          = new DtxFilter(remoteFallbackAudioFormat);
            var remoteFallbackEncoder      = new G711MuLawEncoder(remoteFallbackAudioFormat);
            var remoteFallbackAudioContext = new AudioContext(remoteFallbackAudioFormat, remoteFallbackResampler, remoteFallbackDtx, remoteFallbackEnhancer, remoteFallbackEncoder);

            remoteFallbackAudioContext.Description = "Fallback for remote high CPU";

            // What we should use when the local CPU isn't keeping up (regardless of how many people are in the room):
            // 8Khz, G711, WebRtc at half-strength
            var fallbackAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond);
            var fallbackResampler   = new ResampleFilter(rawAudioFormat, fallbackAudioFormat);

            fallbackResampler.InstanceName = "Fallback resample filter";
            var fallbackEnhancer = new WebRtcFilter(config.ExpectedAudioLatencyFallback, config.FilterLengthFallback, fallbackAudioFormat, playedAudioFormat, config.EnableAec, false, false);

            fallbackEnhancer.InstanceName = "Low";
            var fallbackDtx          = new DtxFilter(fallbackAudioFormat);
            var fallbackEncoder      = new G711MuLawEncoder(fallbackAudioFormat);
            var fallbackAudioContext = new AudioContext(fallbackAudioFormat, fallbackResampler, fallbackDtx, fallbackEnhancer, fallbackEncoder);

            fallbackAudioContext.Description = "Fallback for local high CPU";

            AudioContextCollection.Clear();
            AudioContextCollection.Add(nullAudioContext);
            AudioContextCollection.Add(directAudioContext);
            AudioContextCollection.Add(conferenceAudioContext);
            AudioContextCollection.Add(remoteFallbackAudioContext);
            AudioContextCollection.Add(fallbackAudioContext);

            CurrentAudioContext = nullAudioContext;
        }
Ejemplo n.º 7
0
        public void StartSendingAudioToRoom(string ownerUserTag, string host, List <byte[]> testFrames, bool sendLive, OperationCallback callback)
        {
            // What we should use when there's only one other person, and CPU is OK:
            // 16Khz, Speex, WebRtc at full strength
            var config = MediaConfig.Default;

            config.LocalSsrcId           = (ushort)rnd.Next(ushort.MinValue, ushort.MaxValue);
            config.AudioContextSelection = AudioContextSelection.HighQualityDirect;
            config.MediaServerHost       = host;

            // Create the media controller
            var playedAudioFormat = new AudioFormat();
            var mediaStatistics   = new TimingMediaStatistics();
            var mediaEnvironment  = new TestMediaEnvironment();
            var mediaConnection   = new RtpMediaConnection(config, mediaStatistics);
            var vqc = new VideoQualityController(MediaConfig.Default.LocalSsrcId);

            _mediaController = new MediaController(MediaConfig.Default, playedAudioFormat, mediaStatistics, mediaEnvironment, mediaConnection, vqc);

            // Create the audio sink adapter.
            _captureSource = new CaptureSource();
            _captureSource.VideoCaptureDevice = null;
            if (_captureSource.AudioCaptureDevice == null)
            {
                _captureSource.AudioCaptureDevice = CaptureDeviceConfiguration.GetDefaultAudioCaptureDevice();
                if (_captureSource.AudioCaptureDevice == null)
                {
                    throw new InvalidOperationException("No suitable audio capture device was found");
                }
            }
            MediaDeviceConfig.SelectBestAudioFormat(_captureSource.AudioCaptureDevice);
            _captureSource.AudioCaptureDevice.AudioFrameSize = AudioFormat.Default.MillisecondsPerFrame;             // 20 milliseconds
            _audioSinkAdapter = sendLive
                                ? new AudioSinkAdapter(_captureSource, _mediaController, config, mediaEnvironment, playedAudioFormat)
                                : new FromFileAudioSinkAdapter(_captureSource, _mediaController, config, mediaEnvironment, playedAudioFormat, testFrames);

            var roomService = new RoomServiceAdapter();

            roomService.CreateClient();
            roomService.GetRoomId(Constants.DefaultCompanyTag, Constants.DefaultAuthenticationGroupTag, ownerUserTag, Constants.DefaultRoomName, (getRoomError, result) =>
            {
                if (getRoomError != null)
                {
                    callback(getRoomError);
                }
                else
                {
                    // Connect.
                    _mediaController.Connect(result.RoomId.ToString(), connectError => Deployment.Current.Dispatcher.BeginInvoke(() =>
                    {
                        if (connectError == null)
                        {
                            ClientLogger.Debug("MacTestViewModel connected to MediaController");
                            _captureSource.Start();
                        }
                        _mediaController.RegisterRemoteSession((ushort)(config.LocalSsrcId + 1));
                        callback(connectError);
                    }));
                }
            });
        }