public void StartEncoding(int mic, MMDevice speakers, string guid, InputDeviceManager inputManager, IPAddress ipAddress, int port, MMDevice micOutput, VOIPConnectCallback voipConnectCallback) { _stop = false; try { _micInputQueue.Clear(); InitMixers(); InitAudioBuffers(); //Audio manager should start / stop and cleanup based on connection successfull and disconnect //Should use listeners to synchronise all the state _waveOut = new WasapiOut(speakers, AudioClientShareMode.Shared, true, 40); //add final volume boost to all mixed audio _volumeSampleProvider = new VolumeSampleProviderWithPeak(_clientAudioMixer, (peak => SpeakerMax = (float)VolumeConversionHelper.ConvertFloatToDB(peak))); _volumeSampleProvider.Volume = SpeakerBoost; if (speakers.AudioClient.MixFormat.Channels == 1) { if (_volumeSampleProvider.WaveFormat.Channels == 2) { _waveOut.Init(_volumeSampleProvider.ToMono()); } else { //already mono _waveOut.Init(_volumeSampleProvider); } } else { if (_volumeSampleProvider.WaveFormat.Channels == 1) { _waveOut.Init(_volumeSampleProvider.ToStereo()); } else { //already stereo _waveOut.Init(_volumeSampleProvider); } } _waveOut.Play(); //opus _encoder = OpusEncoder.Create(INPUT_SAMPLE_RATE, 1, Application.Voip); _encoder.ForwardErrorCorrection = false; _decoder = OpusDecoder.Create(INPUT_SAMPLE_RATE, 1); _decoder.ForwardErrorCorrection = false; //speex _speex = new Preprocessor(AudioManager.SEGMENT_FRAMES, AudioManager.INPUT_SAMPLE_RATE); } catch (Exception ex) { Logger.Error(ex, "Error starting audio Output - Quitting! " + ex.Message); ShowOutputError("Problem Initialising Audio Output!"); Environment.Exit(1); } if (micOutput != null) // && micOutput !=speakers { //TODO handle case when they're the same? try { _micWaveOut = new WasapiOut(micOutput, AudioClientShareMode.Shared, true, 40); _micWaveOutBuffer = new BufferedWaveProvider(new WaveFormat(AudioManager.INPUT_SAMPLE_RATE, 16, 1)); _micWaveOutBuffer.ReadFully = true; _micWaveOutBuffer.DiscardOnBufferOverflow = true; var sampleProvider = _micWaveOutBuffer.ToSampleProvider(); if (micOutput.AudioClient.MixFormat.Channels == 1) { if (sampleProvider.WaveFormat.Channels == 2) { _micWaveOut.Init(sampleProvider.ToMono()); } else { //already mono _micWaveOut.Init(sampleProvider); } } else { if (sampleProvider.WaveFormat.Channels == 1) { _micWaveOut.Init(sampleProvider.ToStereo()); } else { //already stereo _micWaveOut.Init(sampleProvider); } } _micWaveOut.Play(); } catch (Exception ex) { Logger.Error(ex, "Error starting mic audio Output - Quitting! " + ex.Message); ShowOutputError("Problem Initialising Mic Audio Output!"); Environment.Exit(1); } } if (mic != -1) { try { _waveIn = new WaveIn(WaveCallbackInfo.FunctionCallback()) { BufferMilliseconds = INPUT_AUDIO_LENGTH_MS, DeviceNumber = mic, }; _waveIn.NumberOfBuffers = 2; _waveIn.DataAvailable += _waveIn_DataAvailable; _waveIn.WaveFormat = new WaveFormat(INPUT_SAMPLE_RATE, 16, 1); _tcpVoiceHandler = new TCPVoiceHandler(_clientsList, guid, ipAddress, port, _decoder, this, inputManager, voipConnectCallback); var voiceSenderThread = new Thread(_tcpVoiceHandler.Listen); voiceSenderThread.Start(); _waveIn.StartRecording(); MessageHub.Instance.Subscribe <SRClient>(RemoveClientBuffer); } catch (Exception ex) { Logger.Error(ex, "Error starting audio Input - Quitting! " + ex.Message); ShowInputError("Problem initialising Audio Input!"); Environment.Exit(1); } } }
//Stopwatch _stopwatch = new Stopwatch(); private void _waveIn_DataAvailable(object sender, WaveInEventArgs e) { // if(_stopwatch.ElapsedMilliseconds > 22) //Console.WriteLine($"Time: {_stopwatch.ElapsedMilliseconds} - Bytes: {e.BytesRecorded}"); // _stopwatch.Restart(); short[] pcmShort = null; if ((e.BytesRecorded / 2 == SEGMENT_FRAMES) && (_micInputQueue.Count == 0)) { //perfect! pcmShort = new short[SEGMENT_FRAMES]; Buffer.BlockCopy(e.Buffer, 0, pcmShort, 0, e.BytesRecorded); } else { for (var i = 0; i < e.BytesRecorded; i++) { _micInputQueue.Enqueue(e.Buffer[i]); } } //read out the queue while ((pcmShort != null) || (_micInputQueue.Count >= AudioManager.SEGMENT_FRAMES)) { //null sound buffer so read from the queue if (pcmShort == null) { pcmShort = new short[AudioManager.SEGMENT_FRAMES]; for (var i = 0; i < AudioManager.SEGMENT_FRAMES; i++) { pcmShort[i] = _micInputQueue.Dequeue(); } } //null sound buffer so read from the queue if (pcmShort == null) { pcmShort = new short[AudioManager.SEGMENT_FRAMES]; for (var i = 0; i < AudioManager.SEGMENT_FRAMES; i++) { pcmShort[i] = _micInputQueue.Dequeue(); } } try { //volume boost pre for (var i = 0; i < pcmShort.Length; i++) { // n.b. no clipping test going on here pcmShort[i] = (short)(pcmShort[i] * MicBoost); } //process with Speex _speex.Process(new ArraySegment <short>(pcmShort)); float max = 0; for (var i = 0; i < pcmShort.Length; i++) { //determine peak if (pcmShort[i] > max) { max = pcmShort[i]; } } //convert to dB MicMax = (float)VolumeConversionHelper.ConvertFloatToDB(max / 32768F); var pcmBytes = new byte[pcmShort.Length * 2]; Buffer.BlockCopy(pcmShort, 0, pcmBytes, 0, pcmBytes.Length); //encode as opus bytes int len; var buff = _encoder.Encode(pcmBytes, pcmBytes.Length, out len); if ((_tcpVoiceHandler != null) && (buff != null) && (len > 0)) { //create copy with small buffer var encoded = new byte[len]; Buffer.BlockCopy(buff, 0, encoded, 0, len); // Console.WriteLine("Sending: " + e.BytesRecorded); if (_tcpVoiceHandler.Send(encoded, len)) { //send audio so play over local too _micWaveOutBuffer?.AddSamples(pcmBytes, 0, pcmBytes.Length); } } else { Logger.Error($"Invalid Bytes for Encoding - {e.BytesRecorded} should be {SEGMENT_FRAMES} "); } _errorCount = 0; } catch (Exception ex) { _errorCount++; if (_errorCount < 10) { Logger.Error(ex, "Error encoding Opus! " + ex.Message); } else if (_errorCount == 10) { Logger.Error(ex, "Final Log of Error encoding Opus! " + ex.Message); } } pcmShort = null; } }
private void _waveIn_DataAvailable(object sender, WaveInEventArgs e) { //fill sound buffer short[] pcmShort = null; if ((e.BytesRecorded / 2 == AudioManager.SEGMENT_FRAMES) && (_micInputQueue.Count == 0)) { //perfect! pcmShort = new short[AudioManager.SEGMENT_FRAMES]; Buffer.BlockCopy(e.Buffer, 0, pcmShort, 0, e.BytesRecorded); } else { for (var i = 0; i < e.BytesRecorded; i++) { _micInputQueue.Enqueue(e.Buffer[i]); } } //read out the queue while ((pcmShort != null) || (_micInputQueue.Count >= AudioManager.SEGMENT_FRAMES)) { //null sound buffer so read from the queue if (pcmShort == null) { pcmShort = new short[AudioManager.SEGMENT_FRAMES]; for (var i = 0; i < AudioManager.SEGMENT_FRAMES; i++) { pcmShort[i] = _micInputQueue.Dequeue(); } } try { //volume boost pre // for (var i = 0; i < pcmShort.Length; i++) // { // //clipping tests thanks to Coug4r // if (_settings.GetClientSetting(SettingsKeys.RadioEffects).BoolValue) // { // if (pcmShort[i] > 4000) // { // pcmShort[i] = 4000; // } // else if (pcmShort[i] < -4000) // { // pcmShort[i] = -4000; // } // } // // // n.b. no clipping test going on here // //pcmShort[i] = (short) (pcmShort[i] * MicBoost); // } //process with Speex _speex.Process(new ArraySegment <short>(pcmShort)); float max = 0; for (var i = 0; i < pcmShort.Length; i++) { //determine peak if (pcmShort[i] > max) { max = pcmShort[i]; } } //convert to dB MicMax = (float)VolumeConversionHelper.ConvertFloatToDB(max / 32768F); var pcmBytes = new byte[pcmShort.Length * 2]; Buffer.BlockCopy(pcmShort, 0, pcmBytes, 0, pcmBytes.Length); // _buffBufferedWaveProvider.AddSamples(pcmBytes, 0, pcmBytes.Length); //encode as opus bytes int len; //need to get framing right for opus - var buff = _encoder.Encode(pcmBytes, pcmBytes.Length, out len); if ((buff != null) && (len > 0)) { //create copy with small buffer var encoded = new byte[len]; Buffer.BlockCopy(buff, 0, encoded, 0, len); var decodedLength = 0; //now decode var decodedBytes = _decoder.Decode(encoded, len, out decodedLength); _buffBufferedWaveProvider.AddSamples(decodedBytes, 0, decodedLength); //_waveFile.Write(decodedBytes, 0,decodedLength); // _waveFile.Flush(); } else { Logger.Error( $"Invalid Bytes for Encoding - {e.BytesRecorded} should be {AudioManager.SEGMENT_FRAMES} "); } } catch (Exception ex) { Logger.Error(ex, "Error encoding Opus! " + ex.Message); } pcmShort = null; } }
public void StartPreview(int mic, MMDevice speakers) { try { _settings = SettingsStore.Instance; _waveOut = new WasapiOut(speakers, AudioClientShareMode.Shared, true, 40); _buffBufferedWaveProvider = new BufferedWaveProvider(new WaveFormat(AudioManager.INPUT_SAMPLE_RATE, 16, 1)); _buffBufferedWaveProvider.ReadFully = true; _buffBufferedWaveProvider.DiscardOnBufferOverflow = true; RadioFilter filter = new RadioFilter(_buffBufferedWaveProvider.ToSampleProvider()); //add final volume boost to all mixed audio _volumeSampleProvider = new VolumeSampleProviderWithPeak(filter, (peak => SpeakerMax = (float)VolumeConversionHelper.ConvertFloatToDB(peak))); _volumeSampleProvider.Volume = SpeakerBoost; if (speakers.AudioClient.MixFormat.Channels == 1) { if (_volumeSampleProvider.WaveFormat.Channels == 2) { _waveOut.Init(_volumeSampleProvider.ToMono()); } else { //already mono _waveOut.Init(_volumeSampleProvider); } } else { if (_volumeSampleProvider.WaveFormat.Channels == 1) { _waveOut.Init(_volumeSampleProvider.ToStereo()); } else { //already stereo _waveOut.Init(_volumeSampleProvider); } } _waveOut.Play(); } catch (Exception ex) { Logger.Error(ex, "Error starting audio Output - Quitting! " + ex.Message); ShowOutputError("Problem Initialising Audio Output!"); Environment.Exit(1); } try { _speex = new Preprocessor(AudioManager.SEGMENT_FRAMES, AudioManager.INPUT_SAMPLE_RATE); //opus _encoder = OpusEncoder.Create(AudioManager.INPUT_SAMPLE_RATE, 1, FragLabs.Audio.Codecs.Opus.Application.Voip); _encoder.ForwardErrorCorrection = false; _decoder = OpusDecoder.Create(AudioManager.INPUT_SAMPLE_RATE, 1); _decoder.ForwardErrorCorrection = false; _waveIn = new WaveIn(WaveCallbackInfo.FunctionCallback()) { BufferMilliseconds = AudioManager.INPUT_AUDIO_LENGTH_MS, DeviceNumber = mic }; _waveIn.NumberOfBuffers = 2; _waveIn.DataAvailable += _waveIn_DataAvailable; _waveIn.WaveFormat = new WaveFormat(AudioManager.INPUT_SAMPLE_RATE, 16, 1); //debug wave file //_waveFile = new WaveFileWriter(@"C:\Temp\Test-Preview.wav", _waveIn.WaveFormat); _waveIn.StartRecording(); } catch (Exception ex) { Logger.Error(ex, "Error starting audio Input - Quitting! " + ex.Message); ShowInputError(); Environment.Exit(1); } }
public void StartPreview(bool windowsN) { this.windowsN = windowsN; try { MMDevice speakers = null; if (_audioOutputSingleton.SelectedAudioOutput.Value == null) { speakers = WasapiOut.GetDefaultAudioEndpoint(); } else { speakers = (MMDevice)_audioOutputSingleton.SelectedAudioOutput.Value; } _waveOut = new WasapiOut(speakers, AudioClientShareMode.Shared, true, 80, windowsN); _buffBufferedWaveProvider = new BufferedWaveProvider(new WaveFormat(AudioManager.INPUT_SAMPLE_RATE, 16, 1)); _buffBufferedWaveProvider.ReadFully = true; _buffBufferedWaveProvider.DiscardOnBufferOverflow = true; RadioFilter filter = new RadioFilter(_buffBufferedWaveProvider.ToSampleProvider()); CachedLoopingAudioProvider natoEffect = new CachedLoopingAudioProvider(filter.ToWaveProvider16(), new WaveFormat(AudioManager.INPUT_SAMPLE_RATE, 16, 1), CachedAudioEffect.AudioEffectTypes.NATO_TONE); //add final volume boost to all mixed audio _volumeSampleProvider = new VolumeSampleProviderWithPeak(natoEffect.ToSampleProvider(), (peak => SpeakerMax = (float)VolumeConversionHelper.ConvertFloatToDB(peak))); _volumeSampleProvider.Volume = SpeakerBoost; if (speakers.AudioClient.MixFormat.Channels == 1) { if (_volumeSampleProvider.WaveFormat.Channels == 2) { _waveOut.Init(_volumeSampleProvider.ToMono()); } else { //already mono _waveOut.Init(_volumeSampleProvider); } } else { if (_volumeSampleProvider.WaveFormat.Channels == 1) { _waveOut.Init(_volumeSampleProvider.ToStereo()); } else { //already stereo _waveOut.Init(_volumeSampleProvider); } } _waveOut.Play(); } catch (Exception ex) { Logger.Error(ex, "Error starting audio Output - Quitting! " + ex.Message); ShowOutputError("Problem Initialising Audio Output!"); Environment.Exit(1); } try { _speex = new Preprocessor(AudioManager.SEGMENT_FRAMES, AudioManager.INPUT_SAMPLE_RATE); //opus _encoder = OpusEncoder.Create(AudioManager.INPUT_SAMPLE_RATE, 1, FragLabs.Audio.Codecs.Opus.Application.Voip); _encoder.ForwardErrorCorrection = false; _decoder = OpusDecoder.Create(AudioManager.INPUT_SAMPLE_RATE, 1); _decoder.ForwardErrorCorrection = false; var device = (MMDevice)_audioInputSingleton.SelectedAudioInput.Value; if (device == null) { device = WasapiCapture.GetDefaultCaptureDevice(); } device.AudioEndpointVolume.Mute = false; _wasapiCapture = new WasapiCapture(device, true); _wasapiCapture.ShareMode = AudioClientShareMode.Shared; _wasapiCapture.DataAvailable += WasapiCaptureOnDataAvailable; _wasapiCapture.RecordingStopped += WasapiCaptureOnRecordingStopped; //debug wave file // _waveFile = new WaveFileWriter(@"C:\Temp\Test-Preview.wav", new WaveFormat(AudioManager.INPUT_SAMPLE_RATE, 16, 1)); _wasapiCapture.StartRecording(); } catch (Exception ex) { Logger.Error(ex, "Error starting audio Input - Quitting! " + ex.Message); ShowInputError(); Environment.Exit(1); } }
//Stopwatch _stopwatch = new Stopwatch(); private void WasapiCaptureOnDataAvailable(object sender, WaveInEventArgs e) { if (_resampler == null) { _resampler = new EventDrivenResampler(windowsN, _wasapiCapture.WaveFormat, new WaveFormat(AudioManager.INPUT_SAMPLE_RATE, 16, 1)); } if (e.BytesRecorded > 0) { //Logger.Info($"Time: {_stopwatch.ElapsedMilliseconds} - Bytes: {e.BytesRecorded}"); short[] resampledPCM16Bit = _resampler.Resample(e.Buffer, e.BytesRecorded); // Logger.Info($"Time: {_stopwatch.ElapsedMilliseconds} - Bytes: {resampledPCM16Bit.Length}"); //fill sound buffer short[] pcmShort = null; for (var i = 0; i < resampledPCM16Bit.Length; i++) { _micInputQueue.Enqueue(resampledPCM16Bit[i]); } //read out the queue while ((pcmShort != null) || (_micInputQueue.Count >= AudioManager.SEGMENT_FRAMES)) { //null sound buffer so read from the queue if (pcmShort == null) { pcmShort = new short[AudioManager.SEGMENT_FRAMES]; for (var i = 0; i < AudioManager.SEGMENT_FRAMES; i++) { pcmShort[i] = _micInputQueue.Dequeue(); } } try { //process with Speex _speex.Process(new ArraySegment <short>(pcmShort)); float max = 0; for (var i = 0; i < pcmShort.Length; i++) { //determine peak if (pcmShort[i] > max) { max = pcmShort[i]; } } //convert to dB MicMax = (float)VolumeConversionHelper.ConvertFloatToDB(max / 32768F); var pcmBytes = new byte[pcmShort.Length * 2]; Buffer.BlockCopy(pcmShort, 0, pcmBytes, 0, pcmBytes.Length); // _buffBufferedWaveProvider.AddSamples(pcmBytes, 0, pcmBytes.Length); //encode as opus bytes int len; //need to get framing right for opus - var buff = _encoder.Encode(pcmBytes, pcmBytes.Length, out len); if ((buff != null) && (len > 0)) { //create copy with small buffer var encoded = new byte[len]; Buffer.BlockCopy(buff, 0, encoded, 0, len); var decodedLength = 0; //now decode var decodedBytes = _decoder.Decode(encoded, len, out decodedLength); _buffBufferedWaveProvider.AddSamples(decodedBytes, 0, decodedLength); // Logger.Info($"Time: {_stopwatch.ElapsedMilliseconds} - Added samples"); } else { Logger.Error( $"Invalid Bytes for Encoding - {e.BytesRecorded} should be {AudioManager.SEGMENT_FRAMES} "); } } catch (Exception ex) { Logger.Error(ex, "Error encoding Opus! " + ex.Message); } pcmShort = null; } } // _stopwatch.Restart(); }
private void WasapiCaptureOnDataAvailable(object sender, WaveInEventArgs e) { if (_resampler == null) { //create and use in the same thread or COM issues _resampler = new EventDrivenResampler(windowsN, _wasapiCapture.WaveFormat, new WaveFormat(AudioManager.INPUT_SAMPLE_RATE, 16, 1)); } if (e.BytesRecorded > 0) { //Logger.Info($"Time: {_stopwatch.ElapsedMilliseconds} - Bytes: {e.BytesRecorded}"); short[] resampledPCM16Bit = _resampler.Resample(e.Buffer, e.BytesRecorded); // Logger.Info($"Time: {_stopwatch.ElapsedMilliseconds} - Bytes: {resampledPCM16Bit.Length}"); //fill sound buffer for (var i = 0; i < resampledPCM16Bit.Length; i++) { _micInputQueue.Enqueue(resampledPCM16Bit[i]); } //read out the queue while (_micInputQueue.Count >= AudioManager.SEGMENT_FRAMES) { short[] pcmShort = new short[AudioManager.SEGMENT_FRAMES]; for (var i = 0; i < AudioManager.SEGMENT_FRAMES; i++) { pcmShort[i] = _micInputQueue.Dequeue(); } try { //volume boost pre for (var i = 0; i < pcmShort.Length; i++) { // n.b. no clipping test going on here pcmShort[i] = (short)(pcmShort[i] * MicBoost); } //process with Speex _speex.Process(new ArraySegment <short>(pcmShort)); float max = 0; for (var i = 0; i < pcmShort.Length; i++) { //determine peak if (pcmShort[i] > max) { max = pcmShort[i]; } } //convert to dB MicMax = (float)VolumeConversionHelper.ConvertFloatToDB(max / 32768F); var pcmBytes = new byte[pcmShort.Length * 2]; Buffer.BlockCopy(pcmShort, 0, pcmBytes, 0, pcmBytes.Length); //encode as opus bytes int len; var buff = _encoder.Encode(pcmBytes, pcmBytes.Length, out len); if ((_udpVoiceHandler != null) && (buff != null) && (len > 0)) { //create copy with small buffer var encoded = new byte[len]; Buffer.BlockCopy(buff, 0, encoded, 0, len); // Console.WriteLine("Sending: " + e.BytesRecorded); if (_udpVoiceHandler.Send(encoded, len)) { //send audio so play over local too _micWaveOutBuffer?.AddSamples(pcmBytes, 0, pcmBytes.Length); } } else { Logger.Error($"Invalid Bytes for Encoding - {pcmShort.Length} should be {SEGMENT_FRAMES} "); } _errorCount = 0; } catch (Exception ex) { _errorCount++; if (_errorCount < 10) { Logger.Error(ex, "Error encoding Opus! " + ex.Message); } else if (_errorCount == 10) { Logger.Error(ex, "Final Log of Error encoding Opus! " + ex.Message); } } } } }
public void StartEncoding(string guid, InputDeviceManager inputManager, IPAddress ipAddress, int port) { MMDevice speakers = null; if (_audioOutputSingleton.SelectedAudioOutput.Value == null) { speakers = WasapiOut.GetDefaultAudioEndpoint(); } else { speakers = (MMDevice)_audioOutputSingleton.SelectedAudioOutput.Value; } MMDevice micOutput = null; if (_audioOutputSingleton.SelectedMicAudioOutput.Value != null) { micOutput = (MMDevice)_audioOutputSingleton.SelectedMicAudioOutput.Value; } try { _micInputQueue.Clear(); InitMixers(); InitAudioBuffers(); //Audio manager should start / stop and cleanup based on connection successfull and disconnect //Should use listeners to synchronise all the state _waveOut = new WasapiOut(speakers, AudioClientShareMode.Shared, true, 40, windowsN); //add final volume boost to all mixed audio _volumeSampleProvider = new VolumeSampleProviderWithPeak(_clientAudioMixer, (peak => SpeakerMax = (float)VolumeConversionHelper.ConvertFloatToDB(peak))); _volumeSampleProvider.Volume = SpeakerBoost; if (speakers.AudioClient.MixFormat.Channels == 1) { if (_volumeSampleProvider.WaveFormat.Channels == 2) { _waveOut.Init(_volumeSampleProvider.ToMono()); } else { //already mono _waveOut.Init(_volumeSampleProvider); } } else { if (_volumeSampleProvider.WaveFormat.Channels == 1) { _waveOut.Init(_volumeSampleProvider.ToStereo()); } else { //already stereo _waveOut.Init(_volumeSampleProvider); } } _waveOut.Play(); //opus _encoder = OpusEncoder.Create(INPUT_SAMPLE_RATE, 1, Application.Voip); _encoder.ForwardErrorCorrection = false; //speex _speex = new Preprocessor(AudioManager.SEGMENT_FRAMES, AudioManager.INPUT_SAMPLE_RATE); } catch (Exception ex) { Logger.Error(ex, "Error starting audio Output - Quitting! " + ex.Message); ShowOutputError("Problem Initialising Audio Output!"); Environment.Exit(1); } InitMicPassthrough(micOutput); InitMicCapture(guid, ipAddress, port, inputManager); InitTextToSpeech(); }
public void StartEncoding(string guid, InputDeviceManager inputManager, IPAddress ipAddress, int port) { MMDevice speakers = null; if (_audioOutputSingleton.SelectedAudioOutput.Value == null) { speakers = WasapiOut.GetDefaultAudioEndpoint(); } else { speakers = (MMDevice)_audioOutputSingleton.SelectedAudioOutput.Value; } MMDevice micOutput = null; if (_audioOutputSingleton.SelectedMicAudioOutput.Value != null) { micOutput = (MMDevice)_audioOutputSingleton.SelectedMicAudioOutput.Value; } try { _micInputQueue.Clear(); InitMixers(); InitAudioBuffers(); //Audio manager should start / stop and cleanup based on connection successfull and disconnect //Should use listeners to synchronise all the state _waveOut = new WasapiOut(speakers, AudioClientShareMode.Shared, true, 40, windowsN); //add final volume boost to all mixed audio _volumeSampleProvider = new VolumeSampleProviderWithPeak(_clientAudioMixer, (peak => SpeakerMax = (float)VolumeConversionHelper.ConvertFloatToDB(peak))); _volumeSampleProvider.Volume = SpeakerBoost; if (speakers.AudioClient.MixFormat.Channels == 1) { if (_volumeSampleProvider.WaveFormat.Channels == 2) { _waveOut.Init(_volumeSampleProvider.ToMono()); } else { //already mono _waveOut.Init(_volumeSampleProvider); } } else { if (_volumeSampleProvider.WaveFormat.Channels == 1) { _waveOut.Init(_volumeSampleProvider.ToStereo()); } else { //already stereo _waveOut.Init(_volumeSampleProvider); } } _waveOut.Play(); //opus _encoder = OpusEncoder.Create(INPUT_SAMPLE_RATE, 1, Application.Voip); _encoder.ForwardErrorCorrection = false; //speex _speex = new Preprocessor(AudioManager.SEGMENT_FRAMES, AudioManager.INPUT_SAMPLE_RATE); } catch (Exception ex) { Logger.Error(ex, "Error starting audio Output - Quitting! " + ex.Message); ShowOutputError("Problem Initialising Audio Output!"); Environment.Exit(1); } if (micOutput != null) // && micOutput !=speakers { //TODO handle case when they're the same? try { _micWaveOut = new WasapiOut(micOutput, AudioClientShareMode.Shared, true, 40, windowsN); _micWaveOutBuffer = new BufferedWaveProvider(new WaveFormat(AudioManager.INPUT_SAMPLE_RATE, 16, 1)); _micWaveOutBuffer.ReadFully = true; _micWaveOutBuffer.DiscardOnBufferOverflow = true; var sampleProvider = _micWaveOutBuffer.ToSampleProvider(); if (micOutput.AudioClient.MixFormat.Channels == 1) { if (sampleProvider.WaveFormat.Channels == 2) { _micWaveOut.Init(new RadioFilter(sampleProvider.ToMono())); } else { //already mono _micWaveOut.Init(new RadioFilter(sampleProvider)); } } else { if (sampleProvider.WaveFormat.Channels == 1) { _micWaveOut.Init(new RadioFilter(sampleProvider.ToStereo())); } else { //already stereo _micWaveOut.Init(new RadioFilter(sampleProvider)); } } _micWaveOut.Play(); } catch (Exception ex) { Logger.Error(ex, "Error starting mic audio Output - Quitting! " + ex.Message); ShowOutputError("Problem Initialising Mic Audio Output!"); Environment.Exit(1); } } if (_audioInputSingleton.MicrophoneAvailable) { try { var device = (MMDevice)_audioInputSingleton.SelectedAudioInput.Value; if (device == null) { device = WasapiCapture.GetDefaultCaptureDevice(); } device.AudioEndpointVolume.Mute = false; _wasapiCapture = new WasapiCapture(device, true); _wasapiCapture.ShareMode = AudioClientShareMode.Shared; _wasapiCapture.DataAvailable += WasapiCaptureOnDataAvailable; _wasapiCapture.RecordingStopped += WasapiCaptureOnRecordingStopped; _udpVoiceHandler = new UdpVoiceHandler(guid, ipAddress, port, this, inputManager); var voiceSenderThread = new Thread(_udpVoiceHandler.Listen); voiceSenderThread.Start(); _wasapiCapture.StartRecording(); MessageHub.Instance.Subscribe <SRClient>(RemoveClientBuffer); } catch (Exception ex) { Logger.Error(ex, "Error starting audio Input - Quitting! " + ex.Message); ShowInputError("Problem initialising Audio Input!"); Environment.Exit(1); } } else { //no mic.... _udpVoiceHandler = new UdpVoiceHandler(guid, ipAddress, port, this, inputManager); MessageHub.Instance.Subscribe <SRClient>(RemoveClientBuffer); var voiceSenderThread = new Thread(_udpVoiceHandler.Listen); voiceSenderThread.Start(); } }