public MultipleControllerAudioSinkAdapter(MediaConfig mediaConfig, CaptureSource captureSource, int frequency) { ClientLogger.Debug("MultipleControllerAudioSinkAdapter created"); this.mediaConfig = mediaConfig; AudioControllers = new List <IAudioController>(); AudioContexts = new List <AudioContext>(); CaptureSource = captureSource; RawAudioFormat = new AudioFormat(CaptureSource.AudioCaptureDevice.DesiredFormat.SamplesPerSecond); oscillator = new Oscillator(); oscillator.Frequency = frequency; }
public FromFileAudioSinkAdapter( CaptureSource captureSource, IAudioController audioController, MediaConfig mediaConfig, IMediaEnvironment mediaEnvironment, AudioFormat playedAudioFormat, List <byte[]> testFrames) : base(captureSource, audioController, mediaConfig, mediaEnvironment, playedAudioFormat) { _testFrames = testFrames; }
public TimingAudioSinkAdapter( AudioContext audioContext, CaptureSource captureSource, IAudioController audioController, MediaConfig mediaConfig, IMediaEnvironment mediaEnvironment, AudioFormat playedAudioFormat) : base(captureSource, audioController, mediaConfig, mediaEnvironment, playedAudioFormat) { _audioContext = audioContext; ClientLogger.Debug(GetType().Name + " created."); }
protected override void OnFormatChange(wmAudioFormat audioFormat) { // We may need to do more with this. ClientLogger.Debug("The audio format was changed: BitsPerSample = {0}, Channels = {1}, SamplesPerSecond = {2}", AudioConstants.BitsPerSample, AudioConstants.Channels, audioFormat.SamplesPerSecond); if (audioFormat.WaveFormat != WaveFormatType.Pcm || audioFormat.BitsPerSample != AudioConstants.BitsPerSample) { throw new ArgumentException("The audio format was not supported."); } wmAudioFormat = audioFormat; RawAudioFormat = new AudioFormat(audioFormat.SamplesPerSecond); }
public void AnalyzeSingleFrame(List <byte[]> testFrames) { // Create the audio context. var config = MediaConfig.Default; config.LocalSsrcId = 1000; var rawAudioFormat = new AudioFormat(); // This will be overwritten later var playedAudioFormat = new AudioFormat(); var mediaEnvironment = new TestMediaEnvironment(); var audioContextFactory = new AudioContextFactory(rawAudioFormat, playedAudioFormat, config, mediaEnvironment); var audioContext = audioContextFactory.HighQualityDirectCtx; // Create the media controller var mediaStatistics = new TimingMediaStatistics(); var mediaConnection = new SingleFrameMediaConnection(MediaConfig.Default.LocalSsrcId); mediaConnection.FirstPacketReceived += mediaConnection_FirstPacketReceived; var vqc = new VideoQualityController(MediaConfig.Default.LocalSsrcId); var mediaController = new MediaController(MediaConfig.Default, playedAudioFormat, mediaStatistics, mediaEnvironment, mediaConnection, vqc); // Connect. mediaController.Connect("test", ex => Deployment.Current.Dispatcher.BeginInvoke(() => { if (ex != null) { MessageBox.Show(ex.ToString()); } else { ClientLogger.Debug("TimingViewModel connected to MediaController"); } })); mediaController.RegisterRemoteSession(1001); foreach (var frame in testFrames) { mediaController.SubmitRecordedFrame(audioContext, frame); } }
private void btnStart_Click(object sender, RoutedEventArgs e) { try { if (!(CaptureDeviceConfiguration.AllowedDeviceAccess || CaptureDeviceConfiguration.RequestDeviceAccess())) { MessageBox.Show("Unable to capture microphone"); return; } _audioSinkAdapter.UseGeneratedTone = chkUseGeneratedTone.IsChecked ?? true; _mediaStreamSource.UseGeneratedTone = chkUseGeneratedTone.IsChecked ?? true; // Create the context factory. var rootMediaConfig = GetMediaConfig(); var connectionSelection = (ComboBoxItem)cboConnection.SelectedItem; var connectionType = (MediaConnectionType)Enum.Parse(typeof(MediaConnectionType), (string)connectionSelection.Content, true); var formatSelection = (ComboBoxItem)cboAudioFormat.SelectedItem; var audioFormat = new AudioFormat(int.Parse((string)formatSelection.Content)); var enhancerSelection = (ComboBoxItem)cboEnhancer.SelectedValue; var enhancerType = (SpeechEnhancementStack)Enum.Parse(typeof(SpeechEnhancementStack), (string)enhancerSelection.Content, true); var encoderSelection = (ComboBoxItem)cboEncoder.SelectedValue; var encoderType = (AudioCodecType)Enum.Parse(typeof(AudioCodecType), (string)encoderSelection.Content, true); var ctxFactory = new TestAudioContextFactory(rootMediaConfig, _audioSinkAdapter.RawAudioFormat, audioFormat, enhancerType, encoderType); _audioSinkAdapter.RootAudioContext = ctxFactory.GetAudioContext(); _mediaServerVms.Clear(); _audioSinkAdapter.AudioControllers.Clear(); _audioSinkAdapter.AudioContexts.Clear(); _mediaStreamSource.AudioControllers.Clear(); var connections = (int)txtConnections.Value; var rooms = (int)txtRooms.Value; _audioSinkAdapter.Rooms = rooms; _audioSinkAdapter.ConnectionsPerRoom = connections; for (int room = 0; room < rooms; room++) { string roomId = string.Format("__alantaTestRoom{0}__", room); var mediaStats = new MediaStatistics(); var mediaEnvironment = new MediaEnvironment(mediaStats); // Register each room on the remote server. for (int connection = 0; connection < connections; connection++) { var connectionMediaConfig = GetMediaConfig(); IMediaConnection mediaConnection; if (connectionType == MediaConnectionType.MediaServer) { mediaConnection = new RtpMediaConnection(connectionMediaConfig, mediaStats); } else { mediaConnection = new LoopbackMediaConnection(connectionMediaConfig.LocalSsrcId); } var vqc = new VideoQualityController(connectionMediaConfig.LocalSsrcId); var vm = new MediaServerViewModel(connectionMediaConfig, AudioFormat.Default, mediaStats, mediaEnvironment, mediaConnection, vqc, roomId); _audioSinkAdapter.AudioControllers.Add(vm.MediaController); _audioSinkAdapter.AudioContexts.Add(ctxFactory.GetAudioContext()); _mediaStreamSource.AudioControllers.Add(new AudioControllerEntry(vm.MediaController)); _mediaServerVms.Add(vm); vm.Connect(); } // Make sure each session knows about all the others in the same room. var localVms = _mediaServerVms.Where(x => x.RoomId == roomId).ToList(); foreach (var localVm in localVms) { var vm = localVm; var remoteVms = localVms.Where(x => x.SsrcId != vm.SsrcId).ToList(); foreach (var remoteVm in remoteVms) { vm.MediaController.RegisterRemoteSession(remoteVm.SsrcId); } } } if (mediaElement.CurrentState == MediaElementState.Closed) { mediaElement.SetSource(_mediaStreamSource); } _captureSource.Start(); mediaElement.Play(); btnStop.IsEnabled = true; btnStart.IsEnabled = false; } catch (Exception ex) { MessageBox.Show(ex.ToString()); } }
public void CreateAudioContexts() { _captureSource.VideoCaptureDevice = null; if (_captureSource.AudioCaptureDevice == null) { _captureSource.AudioCaptureDevice = CaptureDeviceConfiguration.GetDefaultAudioCaptureDevice(); if (_captureSource.AudioCaptureDevice == null) { throw new InvalidOperationException("No suitable audio capture device was found"); } } MediaDeviceConfig.SelectBestAudioFormat(_captureSource.AudioCaptureDevice); _captureSource.AudioCaptureDevice.AudioFrameSize = AudioFormat.Default.MillisecondsPerFrame; // 20 milliseconds var desiredFormat = _captureSource.AudioCaptureDevice.DesiredFormat; var rawAudioFormat = new AudioFormat(desiredFormat.SamplesPerSecond, AudioFormat.Default.MillisecondsPerFrame, desiredFormat.Channels, desiredFormat.BitsPerSample); var playedAudioFormat = new AudioFormat(); var config = MediaConfig.Default; // Absolutely bare minimum processing - doesn't process sound at all. var nullAudioFormat = new AudioFormat(); var nullResampler = new ResampleFilter(rawAudioFormat, nullAudioFormat); nullResampler.InstanceName = "Null resample filter"; var nullEnhancer = new NullEchoCancelFilter(config.ExpectedAudioLatency, config.FilterLength, nullAudioFormat, playedAudioFormat); nullEnhancer.InstanceName = "Null"; var nullDtx = new NullAudioInplaceFilter(); var nullEncoder = new NullAudioEncoder(); var nullAudioContext = new AudioContext(nullAudioFormat, nullResampler, nullDtx, nullEnhancer, nullEncoder); nullAudioContext.Description = "Null"; // What we should use when there's only one other person, and CPU is OK: // 16Khz, Speex, WebRtc at full strength var directAudioFormat = new AudioFormat(); var directResampler = new ResampleFilter(rawAudioFormat, directAudioFormat); directResampler.InstanceName = "Direct high quality resample filter"; var directEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, directAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc); directEnhancer.InstanceName = "High"; var directDtx = new DtxFilter(directAudioFormat); var directEncoder = new SpeexEncoder(directAudioFormat); var directAudioContext = new AudioContext(directAudioFormat, directResampler, directDtx, directEnhancer, directEncoder); directAudioContext.Description = "High Quality Direct"; // What we should use when there are multiple people (and hence the audio will need to be decoded and mixed), but CPU is OK: // 8Khz, G711, WebRtc at full strength var conferenceAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond); var conferenceResampler = new ResampleFilter(rawAudioFormat, conferenceAudioFormat); conferenceResampler.InstanceName = "Conference high quality resample filter"; var conferenceEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, conferenceAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc); conferenceEnhancer.InstanceName = "Medium"; var conferenceDtx = new DtxFilter(conferenceAudioFormat); var conferenceEncoder = new G711MuLawEncoder(conferenceAudioFormat); var conferenceAudioContext = new AudioContext(conferenceAudioFormat, conferenceResampler, conferenceDtx, conferenceEnhancer, conferenceEncoder); conferenceAudioContext.Description = "High Quality Conference"; // What we should use when one or more remote CPU's isn't keeping up (regardless of how many people are in the room): // 8Khz, G711, WebRtc at full-strength var remoteFallbackAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond); var remoteFallbackResampler = new ResampleFilter(rawAudioFormat, remoteFallbackAudioFormat); remoteFallbackResampler.InstanceName = "Fallback remote high cpu resample filter"; var remoteFallbackEnhancer = new WebRtcFilter(config.ExpectedAudioLatency, config.FilterLength, remoteFallbackAudioFormat, playedAudioFormat, config.EnableAec, config.EnableDenoise, config.EnableAgc); remoteFallbackEnhancer.InstanceName = "Medium"; var remoteFallbackDtx = new DtxFilter(remoteFallbackAudioFormat); var remoteFallbackEncoder = new G711MuLawEncoder(remoteFallbackAudioFormat); var remoteFallbackAudioContext = new AudioContext(remoteFallbackAudioFormat, remoteFallbackResampler, remoteFallbackDtx, remoteFallbackEnhancer, remoteFallbackEncoder); remoteFallbackAudioContext.Description = "Fallback for remote high CPU"; // What we should use when the local CPU isn't keeping up (regardless of how many people are in the room): // 8Khz, G711, WebRtc at half-strength var fallbackAudioFormat = new AudioFormat(AudioConstants.NarrowbandSamplesPerSecond); var fallbackResampler = new ResampleFilter(rawAudioFormat, fallbackAudioFormat); fallbackResampler.InstanceName = "Fallback resample filter"; var fallbackEnhancer = new WebRtcFilter(config.ExpectedAudioLatencyFallback, config.FilterLengthFallback, fallbackAudioFormat, playedAudioFormat, config.EnableAec, false, false); fallbackEnhancer.InstanceName = "Low"; var fallbackDtx = new DtxFilter(fallbackAudioFormat); var fallbackEncoder = new G711MuLawEncoder(fallbackAudioFormat); var fallbackAudioContext = new AudioContext(fallbackAudioFormat, fallbackResampler, fallbackDtx, fallbackEnhancer, fallbackEncoder); fallbackAudioContext.Description = "Fallback for local high CPU"; AudioContextCollection.Clear(); AudioContextCollection.Add(nullAudioContext); AudioContextCollection.Add(directAudioContext); AudioContextCollection.Add(conferenceAudioContext); AudioContextCollection.Add(remoteFallbackAudioContext); AudioContextCollection.Add(fallbackAudioContext); CurrentAudioContext = nullAudioContext; }
public TestTimingAudioSink(CaptureSource captureSource, AudioFormat outputAudioFormat) { CaptureSource = captureSource; this.outputAudioFormat = outputAudioFormat; }
/// <summary> /// This overrides the normal audio context retrieval process. The TimingAudioContextFactory returns only the specified audio context, so that we can test it. /// </summary> protected override IAudioContextFactory GetAudioContextFactory(AudioFormat rawAudioFormat, AudioFormat playedAudioFormat, MediaConfig config, IMediaEnvironment mediaEnvironment) { ClientLogger.Debug("TimingAudioContextFactory created."); return(new SingleAudioContextFactory(_audioContext, rawAudioFormat, playedAudioFormat, config, mediaEnvironment)); }
protected virtual IAudioContextFactory GetAudioContextFactory(AudioFormat rawAudioFormat, AudioFormat playedAudioFormat, MediaConfig config, IMediaEnvironment mediaEnvironment) { return(new AudioContextFactory(rawAudioFormat, playedAudioFormat, config, mediaEnvironment)); }
public AudioSinkAdapter(CaptureSource captureSource, IAudioController audioController, MediaConfig mediaConfig, IMediaEnvironment mediaEnvironment, AudioFormat playedAudioFormat) { ClientLogger.Debug(GetType().Name + " created."); CaptureSource = captureSource; AudioController = audioController; _mediaConfig = mediaConfig; _mediaEnvironment = mediaEnvironment; _playedAudioFormat = playedAudioFormat; _logger = new AudioSinkAdapterLogger(); }
public void StartSendingAudioToRoom(string ownerUserTag, string host, List <byte[]> testFrames, bool sendLive, OperationCallback callback) { // What we should use when there's only one other person, and CPU is OK: // 16Khz, Speex, WebRtc at full strength var config = MediaConfig.Default; config.LocalSsrcId = (ushort)rnd.Next(ushort.MinValue, ushort.MaxValue); config.AudioContextSelection = AudioContextSelection.HighQualityDirect; config.MediaServerHost = host; // Create the media controller var playedAudioFormat = new AudioFormat(); var mediaStatistics = new TimingMediaStatistics(); var mediaEnvironment = new TestMediaEnvironment(); var mediaConnection = new RtpMediaConnection(config, mediaStatistics); var vqc = new VideoQualityController(MediaConfig.Default.LocalSsrcId); _mediaController = new MediaController(MediaConfig.Default, playedAudioFormat, mediaStatistics, mediaEnvironment, mediaConnection, vqc); // Create the audio sink adapter. _captureSource = new CaptureSource(); _captureSource.VideoCaptureDevice = null; if (_captureSource.AudioCaptureDevice == null) { _captureSource.AudioCaptureDevice = CaptureDeviceConfiguration.GetDefaultAudioCaptureDevice(); if (_captureSource.AudioCaptureDevice == null) { throw new InvalidOperationException("No suitable audio capture device was found"); } } MediaDeviceConfig.SelectBestAudioFormat(_captureSource.AudioCaptureDevice); _captureSource.AudioCaptureDevice.AudioFrameSize = AudioFormat.Default.MillisecondsPerFrame; // 20 milliseconds _audioSinkAdapter = sendLive ? new AudioSinkAdapter(_captureSource, _mediaController, config, mediaEnvironment, playedAudioFormat) : new FromFileAudioSinkAdapter(_captureSource, _mediaController, config, mediaEnvironment, playedAudioFormat, testFrames); var roomService = new RoomServiceAdapter(); roomService.CreateClient(); roomService.GetRoomId(Constants.DefaultCompanyTag, Constants.DefaultAuthenticationGroupTag, ownerUserTag, Constants.DefaultRoomName, (getRoomError, result) => { if (getRoomError != null) { callback(getRoomError); } else { // Connect. _mediaController.Connect(result.RoomId.ToString(), connectError => Deployment.Current.Dispatcher.BeginInvoke(() => { if (connectError == null) { ClientLogger.Debug("MacTestViewModel connected to MediaController"); _captureSource.Start(); } _mediaController.RegisterRemoteSession((ushort)(config.LocalSsrcId + 1)); callback(connectError); })); } }); }