internal SystemAudioDevice(MMDevice device, IDeviceEnumerator<SystemAudioDevice> enumerator) : base(enumerator) { if (device == null) throw new ArgumentNullException("device", "Device cannot be null. Something bad went wrong"); Device = device; }
/// <summary> /// Initialises a new instance of the WASAPI capture class /// </summary> /// <param name="captureDevice">Capture device to use</param> public WasapiLoopbackCapture(MMDevice captureDevice) : base(captureDevice) { }
public NAudioItem(MMDevice Device, bool IsLoopback) : this(Device, Device.FriendlyName, IsLoopback) { }
public AudioDevice(int Index, MMDevice BaseDevice) { this.Index = Index; this.DeviceFriendlyname = BaseDevice.FriendlyName; this.Device = BaseDevice; }
public int GetDefaultDeviceIndex(MMDevice defaultDevice) { return(Array.IndexOf(View_AudioItems.Select(x => x.Name).ToArray(), defaultDevice.ToString())); }
private void selectDeviceImpl(string devId) { if (_capDevice != null && _capDevice.Id == devId) { return; } releaseDeviceImpl(); _capDevice = _devices.GetDevice(devId.Trim()); int idx = _deviceInfos.FindIndex((di) => { return di.DeviceId == devId; }); if (_capDevice == null) { #warning 例外 _audioClient = null; _capClient = null; return; } _capDeviceId = _capDevice.Id; // モード AudioClientShareMode shareMode = AudioClientShareMode.Shared; // デバイスに適した初期化方法を決定 AudioClientStreamFlags streamFlags = AudioClientStreamFlags.NoPersist; switch (shareMode) { case AudioClientShareMode.Shared: switch (_capDevice.DataFlow) { case EDataFlow.eCapture: streamFlags = 0; break; case EDataFlow.eRender: streamFlags = AudioClientStreamFlags.Loopback; break; } break; case AudioClientShareMode.Exclusive: streamFlags = AudioClientStreamFlags.NoPersist; break; } // フォーマット if (_audioClient != null) _capDevice.ReleaseAudioClient(); // ボリューム _masterVolume = 0; _channelVolumes = new double[_capDevice.AudioMeterInformation.PeakValues.Count]; var h = VolumeChanged; if (h != null) { h(this, new VolumeChangedEventArgs(_capDeviceId, _masterVolume, _channelVolumes)); } try { _audioClient = _capDevice.AudioClient; _capFormat = _audioClient.MixFormat; _capFormat.wFormatTag = WaveFormatTag.WAVE_FORMAT_EXTENSIBLE; _capFormat.nChannels = 1; _capFormat.nSamplesPerSec = 44100; _capFormat.wBitsPerSample = 16; _capFormat.SubFormat = CoreAudioApi.AudioMediaSubtypes.MEDIASUBTYPE_PCM; _capFormat.wValidBitsPerSample = _capFormat.wBitsPerSample; _capFormat.nBlockAlign = (ushort)(_capFormat.wBitsPerSample / 8.0 * _capFormat.nChannels); _capFormat.nAvgBytesPerSec = _capFormat.nSamplesPerSec * _capFormat.nBlockAlign; long tmp1; long tmp2; _audioClient.GetDevicePeriod(out tmp1, out tmp2); // 初期化 try { WAVEFORMATEXTENSIBLE tmpFmt = new WAVEFORMATEXTENSIBLE(); if (!_audioClient.IsFormatSupported(shareMode, _capFormat, ref tmpFmt)) _capFormat = tmpFmt; _audioClient.Initialize(shareMode, streamFlags, tmp2, tmp2, _capFormat, Guid.Empty); } catch (System.Runtime.InteropServices.COMException ex) { if ((uint)ex.ErrorCode == 0x88890019) { uint bufSize = _audioClient.BufferSize; tmp2 = (long)((10000.0 * 1000 / _capFormat.nSamplesPerSec * bufSize) + 0.5); _audioClient.Initialize(shareMode, streamFlags, tmp2, tmp2, _capFormat, Guid.Empty); } } clearBuffer(); _capClient = _audioClient.AudioCaptureClient; // イベント発火 var del = DeviceSelected; if (del != null) { del.Invoke(this, new DeviceSelectedEventArgs(_capDevice, idx)); } } catch (System.Runtime.InteropServices.COMException ex) { _audioClient = null; _capClient = null; throw ex; } }
public DeviceSelectedEventArgs(MMDevice dev, int index) { Device = dev; Index = index; }
public void StartEncoding(string guid, InputDeviceManager inputManager, IPAddress ipAddress, int port) { MMDevice speakers = null; if (_audioOutputSingleton.SelectedAudioOutput.Value == null) { speakers = WasapiOut.GetDefaultAudioEndpoint(); } else { speakers = (MMDevice)_audioOutputSingleton.SelectedAudioOutput.Value; } MMDevice micOutput = null; if (_audioOutputSingleton.SelectedMicAudioOutput.Value != null) { micOutput = (MMDevice)_audioOutputSingleton.SelectedMicAudioOutput.Value; } try { _micInputQueue.Clear(); InitMixers(); InitAudioBuffers(); //Audio manager should start / stop and cleanup based on connection successfull and disconnect //Should use listeners to synchronise all the state _waveOut = new WasapiOut(speakers, AudioClientShareMode.Shared, true, 40, windowsN); //add final volume boost to all mixed audio _volumeSampleProvider = new VolumeSampleProviderWithPeak(_clientAudioMixer, (peak => SpeakerMax = (float)VolumeConversionHelper.ConvertFloatToDB(peak))); _volumeSampleProvider.Volume = SpeakerBoost; if (speakers.AudioClient.MixFormat.Channels == 1) { if (_volumeSampleProvider.WaveFormat.Channels == 2) { _waveOut.Init(_volumeSampleProvider.ToMono()); } else { //already mono _waveOut.Init(_volumeSampleProvider); } } else { if (_volumeSampleProvider.WaveFormat.Channels == 1) { _waveOut.Init(_volumeSampleProvider.ToStereo()); } else { //already stereo _waveOut.Init(_volumeSampleProvider); } } _waveOut.Play(); //opus _encoder = OpusEncoder.Create(MIC_SAMPLE_RATE, 1, Application.Voip); _encoder.ForwardErrorCorrection = false; //speex _speex = new Preprocessor(AudioManager.MIC_SEGMENT_FRAMES, AudioManager.MIC_SAMPLE_RATE); } catch (Exception ex) { Logger.Error(ex, "Error starting audio Output - Quitting! " + ex.Message); ShowOutputError("Problem Initialising Audio Output!"); Environment.Exit(1); } if (micOutput != null) // && micOutput !=speakers { //TODO handle case when they're the same? try { _passThroughAudioProvider = new ClientAudioProvider(true); _micWaveOut = new WasapiOut(micOutput, AudioClientShareMode.Shared, true, 40, windowsN); _micWaveOutBuffer = new BufferedWaveProvider(new WaveFormat(AudioManager.OUTPUT_SAMPLE_RATE, 16, 1)); _micWaveOutBuffer.ReadFully = true; _micWaveOutBuffer.DiscardOnBufferOverflow = true; var sampleProvider = _micWaveOutBuffer.ToSampleProvider(); if (micOutput.AudioClient.MixFormat.Channels == 1) { if (sampleProvider.WaveFormat.Channels == 2) { _micWaveOut.Init(sampleProvider.ToMono()); } else { //already mono _micWaveOut.Init(sampleProvider); } } else { if (sampleProvider.WaveFormat.Channels == 1) { _micWaveOut.Init(sampleProvider.ToStereo()); } else { //already stereo _micWaveOut.Init(sampleProvider); } } _micWaveOut.Play(); } catch (Exception ex) { Logger.Error(ex, "Error starting mic audio Output - Quitting! " + ex.Message); ShowOutputError("Problem Initialising Mic Audio Output!"); Environment.Exit(1); } } if (_audioInputSingleton.MicrophoneAvailable) { try { var device = (MMDevice)_audioInputSingleton.SelectedAudioInput.Value; if (device == null) { device = WasapiCapture.GetDefaultCaptureDevice(); } device.AudioEndpointVolume.Mute = false; _wasapiCapture = new WasapiCapture(device, true); _wasapiCapture.ShareMode = AudioClientShareMode.Shared; _wasapiCapture.DataAvailable += WasapiCaptureOnDataAvailable; _wasapiCapture.RecordingStopped += WasapiCaptureOnRecordingStopped; _udpVoiceHandler = new UdpVoiceHandler(guid, ipAddress, port, this, inputManager); var voiceSenderThread = new Thread(_udpVoiceHandler.Listen); voiceSenderThread.Start(); _wasapiCapture.StartRecording(); MessageHub.Instance.Subscribe <SRClient>(RemoveClientBuffer); } catch (Exception ex) { Logger.Error(ex, "Error starting audio Input - Quitting! " + ex.Message); ShowInputError("Problem initialising Audio Input!"); Environment.Exit(1); } } else { //no mic.... _udpVoiceHandler = new UdpVoiceHandler(guid, ipAddress, port, this, inputManager); MessageHub.Instance.Subscribe <SRClient>(RemoveClientBuffer); var voiceSenderThread = new Thread(_udpVoiceHandler.Listen); voiceSenderThread.Start(); } }
public LdpVolumeController() { devEnum = new MMDeviceEnumerator(); device = devEnum.GetDefaultAudioEndpoint(EDataFlow.eRender, ERole.eMultimedia); GetTempMuteState = device.AudioEndpointVolume.Mute; }
public void ChangeAudioDevice(string deviceName, bool setToDefault) { try { // Reload filter collection if (_mMdevice != null) { Log.Debug($"Mixer: changed audio device init : {_mMdevice.FriendlyName}"); FilterHelper.ReloadFilterCollection(); Log.Debug($"Mixer: changed audio device done : {_mMdevice.FriendlyName}"); if (_mMdeviceEnumerator == null) { Log.Debug($"Mixer: changed audio device _mMdeviceEnumerator init : {_mMdevice.FriendlyName}"); _mMdeviceEnumerator = new MMDeviceEnumerator(); Log.Debug($"Mixer: changed audio device _mMdeviceEnumerator done : {_mMdevice.FriendlyName}"); } Log.Debug($"Mixer: changed audio device EnumAudioEndpoints init : {_mMdevice.FriendlyName}"); var mMdeviceList = _mMdeviceEnumerator.EnumAudioEndpoints(DataFlow.Render, DeviceState.Active); Log.Debug($"Mixer: changed audio device EnumAudioEndpoints done : {_mMdevice.FriendlyName}"); if (mMdeviceList != null && mMdeviceList.Count > 0) { Log.Debug($"Mixer: changed audio device GetDefaultAudioEndpoint init : {_mMdevice.FriendlyName}"); _mMdevice = _mMdeviceEnumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia); Log.Debug($"Mixer: changed audio device GetDefaultAudioEndpoint done : {_mMdevice.FriendlyName}"); // Need to check for certain strings as well because NAudio doesn't detect these if (deviceName != null && (setToDefault || deviceName == "Default DirectSound Device" || deviceName == "Default WaveOut Device")) { Log.Debug($"Mixer: changed audio device GetDefaultAudioEndpoint check init : {_mMdevice.FriendlyName}"); _mMdevice = _mMdeviceEnumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia); Log.Debug($"Mixer: changed audio device GetDefaultAudioEndpoint check done : {_mMdevice.FriendlyName}"); if (_mMdevice != null) { Log.Info($"Mixer: changed audio device to default : {_mMdevice.FriendlyName}"); _isDefaultDevice = true; GUIGraphicsContext.CurrentAudioRendererDevice = _mMdevice.FriendlyName; // Count when Audio device is detected GUIGraphicsContext.DeviceAudioConnected = mMdeviceList.Count; } Log.Debug($"Mixer: changed audio device return"); return; } Log.Debug($"Mixer: changed audio device EnumAudioEndpoints"); using ( var deviceFound = _mMdeviceEnumerator.EnumAudioEndpoints(DataFlow.Render, DeviceState.Active) .FirstOrDefault( device => { if (device == null) { throw new ArgumentNullException(nameof(device)); } return(deviceName != null && device.FriendlyName.Trim().ToLowerInvariant() == deviceName.Trim().ToLowerInvariant()); })) { Log.Debug($"Mixer: changed audio device start"); if (deviceFound != null) { Log.Debug($"Mixer: changed audio device CurrentAudioRendererDevice"); _mMdevice = deviceFound; _isDefaultDevice = false; GUIGraphicsContext.CurrentAudioRendererDevice = deviceFound.FriendlyName; Log.Info($"Mixer: changed audio device to : {deviceFound.FriendlyName}"); } else { Log.Info( $"Mixer: ChangeAudioDevice failed because device {deviceName} was not found, falling back to default"); _mMdevice = _mMdeviceEnumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia); _isDefaultDevice = true; GUIGraphicsContext.CurrentAudioRendererDevice = deviceName; Log.Debug($"Mixer: changed audio device GetDefaultAudioEndpoint done"); } } if (_mMdevice != null) { Log.Debug($"Mixer: changed audio device SetVolumeFromDevice init"); SetVolumeFromDevice(_mMdevice); Log.Debug($"Mixer: changed audio device SetVolumeFromDevice done"); } Log.Debug($"Mixer: changed audio device UpdateDeviceAudioEndpoint init"); UpdateDeviceAudioEndpoint(); Log.Debug($"Mixer: changed audio device UpdateDeviceAudioEndpoint done"); } else { GUIGraphicsContext.DeviceAudioConnected = 0; } } } catch (Exception ex) { Log.Error($"Mixer: error occured in ChangeAudioDevice: {ex}"); } }
public AudioDevice(MMDevice audioDevice) { _audioDevice = audioDevice; _name = $"{_audioDevice.FriendlyName} ({_audioDevice.State})"; }
static void Main(string[] args) { MMDeviceEnumerator DevEnum = new MMDeviceEnumerator(); MMDevice device = DevEnum.GetDefaultAudioEndpoint(EDataFlow.eRender, ERole.eMultimedia); // Note the AudioSession manager did not have a method to enumerate all sessions in windows Vista // this will only work on Win7 and newer. foreach (var session in device.AudioSessionManager2.Sessions) { if (session.State == AudioSessionState.AudioSessionStateActive) { Console.WriteLine("DisplayName: {0}", session.DisplayName); Console.WriteLine("State: {0}", session.State); Console.WriteLine("IconPath: {0}", session.IconPath); Console.WriteLine("SessionIdentifier: {0}", session.GetSessionIdentifier); Console.WriteLine("SessionInstanceIdentifier: {0}", session.GetSessionInstanceIdentifier); Console.WriteLine("ProcessID: {0}", session.GetProcessID); Console.WriteLine("IsSystemIsSystemSoundsSession: {0}", session.IsSystemSoundsSession); Process p = Process.GetProcessById((int)session.GetProcessID); Console.WriteLine("ProcessName: {0}", p.ProcessName); Console.WriteLine("MainWindowTitle: {0}", p.MainWindowTitle); AudioMeterInformation mi = session.AudioMeterInformation; SimpleAudioVolume vol = session.SimpleAudioVolume; Console.WriteLine("---[Hotkeys]---"); Console.WriteLine("M Toggle Mute"); Console.WriteLine(", Lower volume"); Console.WriteLine(", Raise volume"); Console.WriteLine("Q Quit"); Console.CursorVisible = false; int start = Console.CursorTop; while (true) { //Draw a VU meter int len = (int)(mi.MasterPeakValue * 79); Console.SetCursorPosition(0, start); for (int j = 0; j < len; j++) { Console.Write("*"); } for (int j = 0; j < 79 - len; j++) { Console.Write(" "); } Console.SetCursorPosition(0, start + 1); Console.WriteLine("Mute : {0} ", vol.Mute); Console.WriteLine("Master : {0:0.00} ", vol.MasterVolume * 100); if (Console.KeyAvailable) { ConsoleKeyInfo key = Console.ReadKey(true); switch (key.Key) { case ConsoleKey.M: vol.Mute = !vol.Mute; break; case ConsoleKey.Q: Console.CursorVisible = true; return; case ConsoleKey.OemComma: float curvol = vol.MasterVolume - 0.1f; if (curvol < 0) { curvol = 0; } vol.MasterVolume = curvol; break; case ConsoleKey.OemPeriod: float curvold = vol.MasterVolume + 0.1f; if (curvold > 1) { curvold = 1; } vol.MasterVolume = curvold; break; } } } } } //If we end up here there where no open audio sessions to monitor. Console.WriteLine("No Audio sessions found"); }
private static bool DeviceMatches(MMDevice device, string name) { return(device.FriendlyName.ToLower().Contains(name.ToLower())); }
private static void TryEnable(string driverName, MMDevice mMDevice) { try { var hwnd = 0; hwnd = FindWindow(null, "Sound"); Process soundProc; if (hwnd == 0) { soundProc = Process.Start("control.exe", "mmsys.cpl,,1"); } else { CloseWindow((IntPtr)hwnd); while (hwnd == 0) { Debug.WriteLine($"Waiting to Close ..."); hwnd = FindWindow(null, "Sound"); } soundProc = Process.Start("control.exe", "mmsys.cpl,,1"); } hwnd = 0; hwnd = FindWindow(null, "Sound"); while (hwnd == 0) { Debug.WriteLine($"Waiting ..."); hwnd = FindWindow(null, "Sound"); } if (hwnd == 0) { MessageBox.Show($"Couldnt find Sound Window."); return; } var id = GetWindowThreadProcessId((IntPtr)hwnd, out uint i); TestStack.White.Application application = TestStack.White.Application.Attach((int)i); Debug.WriteLine($"{application.Name}"); TestStack.White.UIItems.WindowItems.Window window = application.GetWindow("Sound"); var exists = window.Exists(TestStack.White.UIItems.Finders.SearchCriteria.ByText(driverName)); if (exists) { TestStack.White.UIItems.ListBoxItems.ListItem listItem = window.Get <TestStack.White.UIItems.ListBoxItems.ListItem>(TestStack.White.UIItems.Finders.SearchCriteria.ByText(driverName)); listItem.Focus(); window.Keyboard.PressSpecialKey(TestStack.White.WindowsAPI.KeyboardInput.SpecialKeys.UP); window.Keyboard.PressSpecialKey(TestStack.White.WindowsAPI.KeyboardInput.SpecialKeys.DOWN); window.Keyboard.HoldKey(TestStack.White.WindowsAPI.KeyboardInput.SpecialKeys.SHIFT); window.Keyboard.PressSpecialKey(TestStack.White.WindowsAPI.KeyboardInput.SpecialKeys.F10); window.Keyboard.LeaveKey(TestStack.White.WindowsAPI.KeyboardInput.SpecialKeys.SHIFT); window.Keyboard.Enter("E"); } else { window.Keyboard.HoldKey(TestStack.White.WindowsAPI.KeyboardInput.SpecialKeys.SHIFT); window.Keyboard.PressSpecialKey(TestStack.White.WindowsAPI.KeyboardInput.SpecialKeys.F10); window.Keyboard.LeaveKey(TestStack.White.WindowsAPI.KeyboardInput.SpecialKeys.SHIFT); window.Keyboard.Enter("S"); window.Keyboard.PressSpecialKey(TestStack.White.WindowsAPI.KeyboardInput.SpecialKeys.RETURN); TestStack.White.UIItems.ListBoxItems.ListItem listItem = window.Get <TestStack.White.UIItems.ListBoxItems.ListItem>(TestStack.White.UIItems.Finders.SearchCriteria.ByText(driverName)); listItem.Focus(); window.Keyboard.PressSpecialKey(TestStack.White.WindowsAPI.KeyboardInput.SpecialKeys.UP); window.Keyboard.PressSpecialKey(TestStack.White.WindowsAPI.KeyboardInput.SpecialKeys.DOWN); window.Keyboard.HoldKey(TestStack.White.WindowsAPI.KeyboardInput.SpecialKeys.SHIFT); window.Keyboard.PressSpecialKey(TestStack.White.WindowsAPI.KeyboardInput.SpecialKeys.F10); window.Keyboard.LeaveKey(TestStack.White.WindowsAPI.KeyboardInput.SpecialKeys.SHIFT); window.Keyboard.Enter("E"); } if (mMDevice != null) { if (mMDevice.State == DeviceState.Active) { Debug.WriteLine($"{ mMDevice.FriendlyName}"); CloseWindow((IntPtr)hwnd); } else { MessageBox.Show("Please Enable the device "); } } } catch (Exception) { } }
private void SetVolumeEventHandler(MMDevice device) { device.AudioEndpointVolume.NotificationGuid = _guid; device.AudioEndpointVolume.OnVolumeNotification += OnVolumeChanged; }
public AudioCatch(MMDevice _device) { this._device = _device; }
public abstract Task PlayRequest(MMDevice outputDevice);
// thread to animate and unmute microphones private void UiThread() { // hang tight P.Dispatcher.Invoke(new UpdateTitleCallback(UpdateTitle), ("hang tight")); // sleep a bit Thread.Sleep(500); // find me some microphones P.Dispatcher.Invoke(new UpdateTitleCallback(UpdateTitle), ("finding microphones to fix...")); // placeholder for device setting set bool setDevice = false; // first try the new way, otherwise fallback in the catch try { // get the devices connected MMDeviceEnumerator devEnum = new MMDeviceEnumerator(); MMDeviceCollection devices = devEnum.EnumerateAudioEndPoints(EDataFlow.eCapture, EDeviceState.DEVICE_STATEMASK_ALL); // show how many devices we found P.Dispatcher.Invoke(new UpdateTitleCallback(UpdateTitle), string.Format("found {0} possible devices", devices.Count)); // holder for progress spinner int t = 0; // itterate over devices for (int i = 0; i < devices.Count; i++) { // itterate over progressbar for (int j = t; j <= 100; j++) { t = j; double d = (((double)(i + 1) / devices.Count) * 100); if (d <= j) { break; } Thread.Sleep(35); P.Dispatcher.Invoke(new UpdateProgressCallback(UpdateProgress), j); } // dont spin too fast Thread.Sleep(1000); // extract device data MMDevice deviceAt = devices[i]; string lowName = deviceAt.FriendlyName.ToLower(); // skip not present devices if (deviceAt.State == EDeviceState.DEVICE_STATE_NOTPRESENT) { // not present P.Dispatcher.Invoke(new UpdateTitleCallback(UpdateTitle), string.Format("skipping {0}, device not present", lowName)); continue; } // skip not plugged in devices if (deviceAt.State == EDeviceState.DEVICE_STATE_UNPLUGGED) { // not plugged in P.Dispatcher.Invoke(new UpdateTitleCallback(UpdateTitle), string.Format("skipping {0}, device unplugged", lowName)); continue; } // try to unmute and set volume on this device try { deviceAt.AudioEndpointVolume.Mute = false; deviceAt.AudioEndpointVolume.MasterVolumeLevelScalar = 1; P.Dispatcher.Invoke(new UpdateTitleCallback(UpdateTitle), string.Format("{0} : unmute, volume (100%)", lowName)); // mark as passed this section if name is microphone if (lowName.Contains("microphone")) { setDevice = true; } } catch { // ignored } } // did we even find any devices? if (devices.Count == 0) { // failure, can't continue P.Dispatcher.Invoke(new UpdateTitleCallback(UpdateTitle), "no microphones found"); // reset progressbar P.Dispatcher.Invoke(new UpdateProgressCallback(UpdateProgress), new object[] { 0 }); // hide the microphone icon Mic.Dispatcher.Invoke(new HideMicCallback(HideMic)); // show failure X X.Dispatcher.Invoke(new ShowXCallback(ShowX)); return; } } catch { // fallback option MixerNativeLibrary.MicInterface.MuteOrUnMuteAllMics(false); // i dunno, always set this to true setDevice = true; } // hide the microphone icon Mic.Dispatcher.Invoke(new HideMicCallback(HideMic)); // did we do some good? if (!setDevice) { // failure, can't continue P.Dispatcher.Invoke(new UpdateTitleCallback(UpdateTitle), "all valid microphones unplugged or disabled"); // reset progressbar P.Dispatcher.Invoke(new UpdateProgressCallback(UpdateProgress), new object[] { 0 }); // show failure X X.Dispatcher.Invoke(new ShowXCallback(ShowX)); } else { // finsh out the progress bar P.Dispatcher.Invoke(new UpdateProgressCallback(UpdateProgress), 100); // show the checkmark CheckMark.Dispatcher.Invoke(new ShowCheckCallback(ShowCheck)); // done P.Dispatcher.Invoke(new UpdateTitleCallback(UpdateTitle), "done with microphone(s)"); // Zzzz Thread.Sleep(2000); // reset progressbar P.Dispatcher.Invoke(new UpdateProgressCallback(UpdateProgress), new object[] { 0 }); // hide the check mark CheckMark.Dispatcher.Invoke(new HideCheckCallback(HideCheck)); // show the pulsing chrome icon CheckMark.Dispatcher.Invoke(new ShowChromeCallback(ShowChrome)); // figure out how many chrome processes are open and running Process[] chromeInstances = Process.GetProcessesByName("chrome"); int total = chromeInstances.Length; // case where no chrome windows open if (total <= 0) { // indicate chrome restart P.Dispatcher.Invoke(new UpdateTitleCallback(UpdateTitle), "opening chrome..."); // open chrome Process.Start(@"chrome.exe"); } else { // indicate chrome restart P.Dispatcher.Invoke(new UpdateTitleCallback(UpdateTitle), "restarting chrome..."); // restart all instances of chrome, wait for them to all close Process.Start(@"chrome.exe", "chrome://restart"); // stopwatch for give up plan Stopwatch sw = new Stopwatch(); sw.Start(); while (true) { chromeInstances = Process.GetProcessesByName("chrome"); // wait til we reach 2 or less chrome instances // also give up after 45 seconds if (chromeInstances.Length <= 2 || sw.Elapsed.TotalSeconds > 45) { // done break; } else { // make sure the "progress" donut doesnt show less progress over time total = Math.Max(total, chromeInstances.Length); // update th eprogress bar P.Dispatcher.Invoke(new UpdateProgressCallback(UpdateProgress), Math.Ceiling(((total - (double)chromeInstances.Length) / total) * 100)); } // dont spin the cpu Thread.Sleep(100); } } // set to 100% for visual clue P.Dispatcher.Invoke(new UpdateProgressCallback(UpdateProgress), 100); // hide the pulsing chrome icon CheckMark.Dispatcher.Invoke(new HideChromeCallback(HideChrome)); // show the check mark CheckMark.Dispatcher.Invoke(new ShowCheckCallback(ShowCheck)); // set done and good luck messaging P.Dispatcher.Invoke(new UpdateTitleCallback(UpdateTitle), "done, good luck on your exam!"); // let them read it and wait Thread.Sleep(5000); // kill this app Environment.Exit(0); } }
public void StartEncoding(int mic, MMDevice speakers, string guid, InputDeviceManager inputManager, IPAddress ipAddress, int port, MMDevice micOutput, VOIPConnectCallback voipConnectCallback) { _stop = false; try { _micInputQueue.Clear(); InitMixers(); InitAudioBuffers(); //Audio manager should start / stop and cleanup based on connection successfull and disconnect //Should use listeners to synchronise all the state _waveOut = new WasapiOut(speakers, AudioClientShareMode.Shared, true, 40, windowsN); //add final volume boost to all mixed audio _volumeSampleProvider = new VolumeSampleProviderWithPeak(_clientAudioMixer, (peak => SpeakerMax = (float)VolumeConversionHelper.ConvertFloatToDB(peak))); _volumeSampleProvider.Volume = SpeakerBoost; if (speakers.AudioClient.MixFormat.Channels == 1) { if (_volumeSampleProvider.WaveFormat.Channels == 2) { _waveOut.Init(_volumeSampleProvider.ToMono()); } else { //already mono _waveOut.Init(_volumeSampleProvider); } } else { if (_volumeSampleProvider.WaveFormat.Channels == 1) { _waveOut.Init(_volumeSampleProvider.ToStereo()); } else { //already stereo _waveOut.Init(_volumeSampleProvider); } } _waveOut.Play(); //opus _encoder = OpusEncoder.Create(INPUT_SAMPLE_RATE, 1, Application.Voip); _encoder.ForwardErrorCorrection = false; _decoder = OpusDecoder.Create(INPUT_SAMPLE_RATE, 1); _decoder.ForwardErrorCorrection = false; //speex _speex = new Preprocessor(AudioManager.SEGMENT_FRAMES, AudioManager.INPUT_SAMPLE_RATE); } catch (Exception ex) { Logger.Error(ex, "Error starting audio Output - Quitting! " + ex.Message); ShowOutputError("Problem Initialising Audio Output!"); Environment.Exit(1); } if (micOutput != null) // && micOutput !=speakers { //TODO handle case when they're the same? try { _micWaveOut = new WasapiOut(micOutput, AudioClientShareMode.Shared, true, 40, windowsN); _micWaveOutBuffer = new BufferedWaveProvider(new WaveFormat(AudioManager.INPUT_SAMPLE_RATE, 16, 1)); _micWaveOutBuffer.ReadFully = true; _micWaveOutBuffer.DiscardOnBufferOverflow = true; var sampleProvider = _micWaveOutBuffer.ToSampleProvider(); if (micOutput.AudioClient.MixFormat.Channels == 1) { if (sampleProvider.WaveFormat.Channels == 2) { _micWaveOut.Init(new RadioFilter(sampleProvider.ToMono())); } else { //already mono _micWaveOut.Init(new RadioFilter(sampleProvider)); } } else { if (sampleProvider.WaveFormat.Channels == 1) { _micWaveOut.Init(new RadioFilter(sampleProvider.ToStereo())); } else { //already stereo _micWaveOut.Init(new RadioFilter(sampleProvider)); } } _micWaveOut.Play(); } catch (Exception ex) { Logger.Error(ex, "Error starting mic audio Output - Quitting! " + ex.Message); ShowOutputError("Problem Initialising Mic Audio Output!"); Environment.Exit(1); } } if (mic != -1) { try { _waveIn = new WaveIn(WaveCallbackInfo.FunctionCallback()) { BufferMilliseconds = INPUT_AUDIO_LENGTH_MS, DeviceNumber = mic, }; _waveIn.NumberOfBuffers = 2; _waveIn.DataAvailable += _waveIn_DataAvailable; _waveIn.WaveFormat = new WaveFormat(INPUT_SAMPLE_RATE, 16, 1); _udpVoiceHandler = new UdpVoiceHandler(_clientsList, guid, ipAddress, port, _decoder, this, inputManager, voipConnectCallback); var voiceSenderThread = new Thread(_udpVoiceHandler.Listen); voiceSenderThread.Start(); _waveIn.StartRecording(); MessageHub.Instance.Subscribe <SRClient>(RemoveClientBuffer); } catch (Exception ex) { Logger.Error(ex, "Error starting audio Input - Quitting! " + ex.Message); ShowInputError("Problem initialising Audio Input!"); Environment.Exit(1); } } else { //no mic.... _udpVoiceHandler = new UdpVoiceHandler(_clientsList, guid, ipAddress, port, _decoder, this, inputManager, voipConnectCallback); MessageHub.Instance.Subscribe <SRClient>(RemoveClientBuffer); var voiceSenderThread = new Thread(_udpVoiceHandler.Listen); voiceSenderThread.Start(); } }
public void ChangeVol(ControlChangeEvent cce) { MMDevice nd = enumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Console); nd.AudioEndpointVolume.MasterVolumeLevelScalar = (float)cce.ControllerValue / 127; }
private void releaseDeviceImpl() { if (_capDevice != null) { if (_capturing) stopCaptureImpl(); _capDevice.Dispose(); } _capDevice = null; if (_capClient != null) _capClient.Dispose(); _capClient = null; if (_audioClient != null) _audioClient.Dispose(); _audioClient = null; }
/// <summary> /// WASAPIデバイス情報 /// </summary> /// <param name="device">デバイス情報</param> /// <param name="isSystemDefault">システムのデフォルトデバイスかどうかのフラグ</param> private WasapiDeviceInfo(MMDevice device) { this.Id = device.ID; this.Name = device.FriendlyName; this.IsSytemDefault = false; }
private void DoMainLoop(MaxLifxBulbController bulbController, MMDevice device) { SettingsCast.WaveStartTime = DateTime.Now; var persistentFloatH = (float)r.NextDouble(); var persistentFloatS = (float)r.NextDouble(); var persistentFloatB = (float)r.NextDouble(); var persistedSince = DateTime.Now; var spectrumEngine = new SpectrumAnalyserEngine(); spectrumEngine.StartCapture(); while (!TerminateThread) { if (ShowUI) { var t = new Thread(() => { var form = new SoundResponseUI(SettingsCast, bulbController.Bulbs.Select(x => x.Label).ToList(), r); /* (SettingsCast, bulbController.Bulbs);*/ form.ShowDialog(); }); t.Start(); ShowUI = false; } var _offOrOn = Settings.OffOrOn(); if (_offOrOn) { ushort brightness = 0; ushort saturation = 0; var _hue = 0; var timeRunning = DateTime.Now - SettingsCast.WaveStartTime; var floatValueH = 0f; var floatValueS = 0f; var floatValueB = 0f; var bulbCtr = 0; foreach (var label in SettingsCast.SelectedLabels) { var bulbNumber = SettingsCast.PerBulb ? bulbCtr : 0; try { switch (SettingsCast.WaveType) { case WaveTypes.Audio: var levelMin = 1 - (((float)(SettingsCast.Levels[bulbNumber] + SettingsCast.LevelRanges[bulbNumber] / 2 > 255 ? 255 : SettingsCast.Levels[bulbNumber] + SettingsCast.LevelRanges[bulbNumber] / 2) / 255)); var levelMax = 1 - (((float)(SettingsCast.Levels[bulbNumber] - SettingsCast.LevelRanges[bulbNumber] / 2 < 0 ? 0 : SettingsCast.Levels[bulbNumber] - SettingsCast.LevelRanges[bulbNumber] / 2) / 255)); var levelRange = levelMax - levelMin; var rawLevel = 1 - (spectrumEngine.LatestPoints[SettingsCast.Bins[bulbNumber]].Y / 255); float adjustedLevel; if (rawLevel < levelMin) { adjustedLevel = 0; } else if (rawLevel > levelMax) { adjustedLevel = 1; } else { adjustedLevel = (rawLevel - levelMin) / levelRange; if (adjustedLevel < 0 || adjustedLevel > 1) { MessageBox.Show("Doh!"); } } floatValueH = floatValueS = floatValueB = adjustedLevel; // device.AudioMeterInformation.MasterPeakValue; break; case WaveTypes.Sine: floatValueH = floatValueS = floatValueB = (float) (Math.Sin(timeRunning.TotalSeconds * 6.283 * 500 / SettingsCast.WaveDuration) + 1) / 2; break; case WaveTypes.Square: floatValueH = floatValueS = floatValueB = ((int)(timeRunning.TotalMilliseconds / SettingsCast.WaveDuration)) % 2; break; case WaveTypes.Sawtooth: floatValueH = floatValueS = floatValueB = ((float)timeRunning.TotalMilliseconds - (((int)timeRunning.TotalMilliseconds / SettingsCast.WaveDuration) * SettingsCast.WaveDuration)) / SettingsCast.WaveDuration; break; case WaveTypes.Noise: var span = DateTime.Now - persistedSince; if (span.TotalMilliseconds > SettingsCast.WaveDuration) { floatValueH = (float)r.NextDouble(); floatValueS = (float)r.NextDouble(); floatValueB = (float)r.NextDouble(); persistentFloatH = floatValueH; persistentFloatS = floatValueS; persistentFloatB = floatValueB; persistedSince = DateTime.Now; } else { floatValueH = persistentFloatH; floatValueS = persistentFloatS; floatValueB = persistentFloatB; } break; } if (SettingsCast.Hues.Count > bulbNumber) { brightness = (ushort) (((SettingsCast.BrightnessInvert ? 1 - floatValueB : floatValueB) * SettingsCast.BrightnessRanges[bulbNumber] * 2 + (SettingsCast.Brightnesses[bulbNumber] - SettingsCast.BrightnessRanges[bulbNumber])) * 65535); saturation = (ushort) (((SettingsCast.SaturationInvert ? 1 - floatValueS : floatValueS) * SettingsCast.SaturationRanges[bulbNumber] * 2 + (SettingsCast.Saturations[bulbNumber] - SettingsCast.SaturationRanges[bulbNumber])) * 65535); _hue = ((int) ((SettingsCast.HueInvert ? 1 - floatValueH : floatValueH) * SettingsCast.HueRanges[bulbNumber] * 2 + (SettingsCast.Hues[bulbNumber] - SettingsCast.HueRanges[0])) + 720) % 360; var _payload = new SetColourPayload { Hue = _hue, Saturation = saturation, Brightness = brightness, Kelvin = SettingsCast.Kelvin, TransitionDuration = (uint)SettingsCast.TransitionDuration }; bulbController.SetColour(label, _payload); if (SettingsCast.Delay > 200) { bulbController.SetColour(label, _payload); Thread.Sleep(1); } } else { Thread.Sleep(1); } } catch (ArgumentOutOfRangeException) { Thread.Sleep(1); } Thread.Sleep(1); bulbCtr++; } } Thread.Sleep(SettingsCast.Delay); } spectrumEngine.StopCapture(); }
public MMDevice GetDefaultRenderDevice() { MMDevice = enumerator.GetDefaultAudioEndpoint(DataFlow.Render, CSCore.CoreAudioAPI.Role.Console); return(MMDevice); }
public void ResetVoiceStream() { if (targetOutputDevice is null) { //Set up device #if DEBUG && USE_STANDARD_DEBUG_OUTPUT targetOutputDevice = GetDefaultOutputDevice(); #else targetOutputDevice = GetOutputDevice(botConfig.VoiceOutputDevice); #endif if (targetOutputDevice is null) { targetOutputDevice = GetDefaultOutputDevice(); if (targetOutputDevice is null) { //Failed to get a device communication.SendErrorMessage("Unable to initialize voice output device."); return; } else { communication.SendWarningMessage($"Audio output device {botConfig.VoiceOutputDevice} not found. " + $"Fell back to default audio output device: {targetOutputDevice.DeviceFriendlyName}"); } } } if (targetInputDevice is null) { //Set up device targetInputDevice = GetInputDevice(botConfig.VoiceInputDevice); if (targetInputDevice is null) { targetInputDevice = GetDefaultInputDevice(); if (targetInputDevice is null) { //Failed to get a device communication.SendErrorMessage("Unable to initialize voice input device."); return; } else { communication.SendWarningMessage($"Audio input device {botConfig.VoiceInputDevice} not found. " + $"Fell back to default audio input device: {targetInputDevice.DeviceFriendlyName}"); } } } CleanUpActiveStream(); if (botConfig.MicConfiguration.Enabled) { outputDevice = new WasapiOut(targetOutputDevice, AudioClientShareMode.Shared, true, 10); recordingStream = new BufferedWasapiQueuer(targetInputDevice, 1000); outputDevice.Init(recordingStream.ApplyMicrophoneEffects(botConfig.MicConfiguration, currentEffect)); outputDevice.Play(); } }
public float Audio(MMDevice device) { using AudioMeterInformation meter = AudioMeterInformation.FromDevice(device); meterinfo = meter.PeakValue; return(meterinfo); }
public void AddRecordingDevices(MMDeviceCollection devices, MMDevice defaultdevice) { if (devices == null || cmbRecordingDevice == null) { return; } if (InvokeRequired) { Invoke(new Action <MMDeviceCollection, MMDevice>(AddRecordingDevices), new object[] { devices, defaultdevice }); return; } if (IsDisposed) { return; } foreach (var device in devices) { var exists = false; for (int i = 0; i < cmbRecordingDevice.Items.Count; i++) { if (((MMDevice)cmbRecordingDevice.Items[i]).DeviceID == device.DeviceID) { exists = true; } } if (!exists) { var index = cmbRecordingDevice.Items.Add(device); } } // Select the right device. if (!isRecordingDeviceSelected) { for (int i = 0; i < cmbRecordingDevice.Items.Count; i++) { var device = (MMDevice)cmbRecordingDevice.Items[i]; if (previousRecordingDeviceID == null && device.DeviceID == defaultdevice.DeviceID) { // Nothing previously selected, select the default device. if (cmbRecordingDevice.SelectedIndex != i) { cmbRecordingDevice.SelectedIndex = i; PlaySilence(); isRecordingDeviceSelected = true; } } else if (!string.IsNullOrEmpty(previousRecordingDeviceID) && device.DeviceID == previousRecordingDeviceID) { // Select the previously selected device (only once). cmbRecordingDevice.SelectedIndex = i; PlaySilence(); previousRecordingDeviceID = string.Empty; isRecordingDeviceSelected = true; } } } if (!eventHandlerAdded) { cmbRecordingDevice.SelectedIndexChanged += CmbRecordingDevice_SelectedIndexChanged; eventHandlerAdded = true; } }
public DeviceItem(MMDevice device) { Device = device; }
NAudioItem(MMDevice Device, string Name, bool IsLoopback) { this.Device = Device; this.IsLoopback = IsLoopback; this.Name = Name; }
public DeviceDefaultChangedEvent(MMDevice device, DeviceRole role) { DeviceId = device.ID; Device = device; Role = role; }
public void InitCaptureSound() { MMDeviceEnumerator enumerator = new MMDeviceEnumerator(); var devices = enumerator.EnumerateAudioEndPoints(DataFlow.Capture, DeviceState.Active); MMDevice[] devicesList = devices.ToArray(); Console.WriteLine("사용하실 마이크 디바이스를 선택해주세요"); Console.WriteLine(""); for (int i = 0; i < devicesList.Length; i++) { Console.WriteLine(i.ToString() + ". " + devicesList[i]); } Console.WriteLine(""); selectInput: string devicenum = Console.ReadLine(); try { Convert.ToInt16(devicenum); } catch { Console.WriteLine(devicenum + "은(는) 정수가 아닙니다"); goto selectInput; } if (Convert.ToInt16(devicenum) > devicesList.Length - 1 || 0 > Convert.ToInt16(devicenum)) { Console.WriteLine(devicenum + "은(는) 범위 바깥의 값입니다"); goto selectInput; } InputDevice = devicesList[Convert.ToInt16(devicenum)]; Console.WriteLine(""); Console.WriteLine("마이크 트리거를 사용할까요? (Y/N)"); Console.WriteLine(""); SetInputTriggerBool: string InputTriggerUse = Console.ReadLine(); if (InputTriggerUse == "Y" || InputTriggerUse == "y") { ConsoleKeyInfo keys; while (true) { if (ShortRecordStop == false) { CaptureShortSound(); } if (Console.KeyAvailable) { keys = Console.ReadKey(true); if (keys.Key == ConsoleKey.Spacebar) { Program.StartVoiceCapture(VoiceCaptureType.DefaultCapture); } } } } else if (InputTriggerUse == "N" || InputTriggerUse == "n") { ConsoleKeyInfo keys; while (true) { if (Console.KeyAvailable) { keys = Console.ReadKey(true); if (keys.Key == ConsoleKey.Spacebar) { Program.StartVoiceCapture(VoiceCaptureType.DefaultCapture); } } } } else { Console.WriteLine("유효한 값을 적어주세요"); goto SetInputTriggerBool; } }
public WasapiCaptureProvider(MMDevice Device) : base(new WasapiCapture(Device)) { }
public void SelectDevice(string devId) { _capDevice = _devices.GetDevice(devId.Trim()); if (_capDevice == null) { _audioClient = null; _capClient = null; return; } _capDeviceId = _capDevice.Id; // モード AudioClientShareMode shareMode = AudioClientShareMode.Exclusive; AudioClientStreamFlags streamFlags = AudioClientStreamFlags.NoPersist; if (_audioClient != null) _capDevice.ReleaseAudioClient(); try { _audioClient = _capDevice.AudioClient; _capFormat = _audioClient.MixFormat; _capFormat.wFormatTag = WaveFormatTag.WAVE_FORMAT_EXTENSIBLE; _capFormat.nChannels = 2; _capFormat.nSamplesPerSec = 16000; _capFormat.wBitsPerSample = 16; _capFormat.SubFormat = CoreAudioApi.AudioMediaSubtypes.MEDIASUBTYPE_PCM; _capFormat.wValidBitsPerSample = _capFormat.wBitsPerSample; _capFormat.nBlockAlign = (ushort)(_capFormat.wBitsPerSample / 8.0 * _capFormat.nChannels); _capFormat.nAvgBytesPerSec = _capFormat.nSamplesPerSec * _capFormat.nBlockAlign; long tmp1; long tmp2; _audioClient.GetDevicePeriod(out tmp1, out tmp2); // 初期化 try { WAVEFORMATEXTENSIBLE tmpFmt = new WAVEFORMATEXTENSIBLE(); if (!_audioClient.IsFormatSupported(shareMode, _capFormat, ref tmpFmt)) _capFormat = tmpFmt; _audioClient.Initialize(shareMode, streamFlags, tmp2, tmp2, _capFormat, Guid.Empty); } catch (System.Runtime.InteropServices.COMException ex) { try { AudioClientError error = (AudioClientError)ex.ErrorCode; switch (error) { case AudioClientError.BufferSizeNotAligned: uint bufSize = _audioClient.BufferSize; tmp2 = (long)((10000.0 * 1000 / _capFormat.nSamplesPerSec * bufSize) + 0.5); _audioClient.Initialize(shareMode, streamFlags, tmp2, tmp2, _capFormat, Guid.Empty); break; case AudioClientError.UnsupportedFormat: break; } } catch (InvalidCastException) { } } _capClient = _audioClient.AudioCaptureClient; } catch (System.Runtime.InteropServices.COMException ex) { _audioClient = null; _capClient = null; throw ex; } }
/// <summary> /// True: the given NAudio device matches the name given in the <see cref="AudioDevice"/> /// </summary> /// <param name="naudioDevice"></param> /// <param name="device"></param> /// <returns></returns> private static bool DeviceNameMatches(MMDevice naudioDevice, AudioDevice device) { return(device.AudioDeviceName.Equals(naudioDevice.ID) || device.AudioDeviceName.Equals(naudioDevice.FriendlyName, StringComparison.CurrentCultureIgnoreCase) || device.AudioDeviceName.Equals(naudioDevice.DeviceFriendlyName, StringComparison.CurrentCultureIgnoreCase)); }
public WasapiDevice(MMDevice device) { this.device = device; player = new WavePlayerConverter(new WasapiOut(device, AudioClientShareMode.Exclusive, false, 100)); }
public Speakers() { MMDeviceEnumerator enumerator = new MMDeviceEnumerator(); _device = enumerator.GetDefaultAudioEndpoint(DataFlow.Render, Role.Console); }