private void InitializeCaptureSource() { if (_captureSource == null) { // Setup the capture source (for recording audio) _captureSource = new CaptureSource(); _captureSource.AudioCaptureDevice = CaptureDeviceConfiguration.GetDefaultAudioCaptureDevice(); if (_captureSource.AudioCaptureDevice != null) { MediaDeviceConfig.SelectBestAudioFormat(_captureSource.AudioCaptureDevice); if (_captureSource.AudioCaptureDevice.DesiredFormat != null) { _captureSource.AudioCaptureDevice.AudioFrameSize = AudioFormat.Default.MillisecondsPerFrame; // 20 milliseconds _audioSink = new AudioSinkAdapter(_captureSource, null, MediaConfig.Default, new TestMediaEnvironment(), AudioFormat.Default); _recorder = new RecorderBase(_captureSource, _audioSink, speakersAudioVisualizer); chkSynchronizeRecording.DataContext = _audioSink; ClientLogger.Debug("CaptureSource initialized."); } else { ClientLogger.Debug("No suitable audio format was found."); } panelMicrophone.DataContext = _captureSource; } else { // Do something more here eventually, once we figure out what the user experience should be. ClientLogger.Debug("No audio capture device was found."); } } }
public RecorderBase(CaptureSource captureSource, AudioSinkAdapter audioSinkAdapter, AudioVisualizer audioVisualizer) { mCaptureSource = captureSource; mAudioSinkAdapter = audioSinkAdapter; mAudioVisualizer = audioVisualizer; VisualizationRate = 1; }
public void StopSendingAudioToRoom() { _captureSource.Stop(); _captureSource = null; _mediaController.Dispose(); _mediaController = null; _audioSinkAdapter = null; }
public RecorderAec( CaptureSource captureSource, AudioSinkAdapter audioSinkAdapter, AudioVisualizer speakerAudioVisualizer, AudioVisualizer cancelledAudioVisualizer, EchoCancelFilter echoCancelFilter, List<byte[]> cancelledFrames) : base(captureSource, audioSinkAdapter, speakerAudioVisualizer) { mEchoCancelFilter = echoCancelFilter; mCancelledAudioVisualizer = cancelledAudioVisualizer; CancelledFrames = cancelledFrames; }
public void StartTimingTest() { // MessageBox.Show("Currently selected context = " + CurrentAudioContext.Description); Status = "Executing timing test for context '" + CurrentAudioContext.Description + "'"; _mediaElement = new MediaElement(); _audioStreamSource = new AudioMediaStreamSource(null, AudioFormat.Default); _captureSource.VideoCaptureDevice = null; // Make sure we can get at the devices. if (_captureSource.AudioCaptureDevice == null) { throw new InvalidOperationException("No audio capture device was found"); } if (_captureSource.AudioCaptureDevice.DesiredFormat == null) { throw new InvalidOperationException("No suitable audio format was found"); } if (!CaptureDeviceConfiguration.AllowedDeviceAccess && !CaptureDeviceConfiguration.RequestDeviceAccess()) { throw new InvalidOperationException("Device access not granted."); } // Create the audio sink. MediaConfig.Default.LocalSsrcId = 1000; MediaStatistics = new TimingMediaStatistics(); var mediaEnvironment = new TestMediaEnvironment(); // Create the media controller _mediaConnection = new LoopbackMediaConnection(MediaConfig.Default.LocalSsrcId); var vqc = new VideoQualityController(MediaConfig.Default.LocalSsrcId); _mediaController = new MediaController(MediaConfig.Default, AudioFormat.Default, MediaStatistics, mediaEnvironment, _mediaConnection, vqc); // Create the audio sink to grab data from the microphone and send it to the media controller. _audioSink = new TimingAudioSinkAdapter(CurrentAudioContext, _captureSource, _mediaController, MediaConfig.Default, new TestMediaEnvironment(), CurrentAudioContext.AudioFormat); _audioSink.CaptureSuccessful += _audioSink_CaptureSuccessful; // Create the media stream source to play data from the media controller _audioStreamSource.AudioController = _mediaController; _mediaElement.SetSource(_audioStreamSource); // Connect. _mediaController.Connect("test", ex => Deployment.Current.Dispatcher.BeginInvoke(() => { if (ex != null) { StopTimingTest(); MessageBox.Show(ex.ToString()); } else { ClientLogger.Debug("TimingViewModel connected to MediaController"); } })); _mediaController.RegisterRemoteSession(1001); // Start capturing (which should trigger the audio sink). _captureSource.Start(); if (_captureSource.State != CaptureState.Started) { throw new InvalidOperationException("Unable to capture microphone"); } // Start playing. _mediaElement.Play(); ClientLogger.Debug("CaptureSource initialized; captureSource.State={0}; captureSource.AudioCaptureDevice={1}; mediaElement.CurrentState={2}; ", _captureSource.State, _captureSource.AudioCaptureDevice.FriendlyName, _mediaElement.CurrentState); }
public void StartSendingAudioToRoom(string ownerUserTag, string host, List <byte[]> testFrames, bool sendLive, OperationCallback callback) { // What we should use when there's only one other person, and CPU is OK: // 16Khz, Speex, WebRtc at full strength var config = MediaConfig.Default; config.LocalSsrcId = (ushort)rnd.Next(ushort.MinValue, ushort.MaxValue); config.AudioContextSelection = AudioContextSelection.HighQualityDirect; config.MediaServerHost = host; // Create the media controller var playedAudioFormat = new AudioFormat(); var mediaStatistics = new TimingMediaStatistics(); var mediaEnvironment = new TestMediaEnvironment(); var mediaConnection = new RtpMediaConnection(config, mediaStatistics); var vqc = new VideoQualityController(MediaConfig.Default.LocalSsrcId); _mediaController = new MediaController(MediaConfig.Default, playedAudioFormat, mediaStatistics, mediaEnvironment, mediaConnection, vqc); // Create the audio sink adapter. _captureSource = new CaptureSource(); _captureSource.VideoCaptureDevice = null; if (_captureSource.AudioCaptureDevice == null) { _captureSource.AudioCaptureDevice = CaptureDeviceConfiguration.GetDefaultAudioCaptureDevice(); if (_captureSource.AudioCaptureDevice == null) { throw new InvalidOperationException("No suitable audio capture device was found"); } } MediaDeviceConfig.SelectBestAudioFormat(_captureSource.AudioCaptureDevice); _captureSource.AudioCaptureDevice.AudioFrameSize = AudioFormat.Default.MillisecondsPerFrame; // 20 milliseconds _audioSinkAdapter = sendLive ? new AudioSinkAdapter(_captureSource, _mediaController, config, mediaEnvironment, playedAudioFormat) : new FromFileAudioSinkAdapter(_captureSource, _mediaController, config, mediaEnvironment, playedAudioFormat, testFrames); var roomService = new RoomServiceAdapter(); roomService.CreateClient(); roomService.GetRoomId(Constants.DefaultCompanyTag, Constants.DefaultAuthenticationGroupTag, ownerUserTag, Constants.DefaultRoomName, (getRoomError, result) => { if (getRoomError != null) { callback(getRoomError); } else { // Connect. _mediaController.Connect(result.RoomId.ToString(), connectError => Deployment.Current.Dispatcher.BeginInvoke(() => { if (connectError == null) { ClientLogger.Debug("MacTestViewModel connected to MediaController"); _captureSource.Start(); } _mediaController.RegisterRemoteSession((ushort)(config.LocalSsrcId + 1)); callback(connectError); })); } }); }