void SendMicrophoneData() { if (_microphoneStream == null) { return; } // Store the client ID / stream ID so remote clients can find the corresponding AudioOutputStream _model.clientID = _microphoneStream.ClientID(); _model.streamID = _microphoneStream.StreamID(); // Check if AudioInputStream is valid. if (_model.clientID < 0 || _model.streamID < 0) { return; } // Send audio data in _microphoneFrameSize chunks until we're out of microphone data to send float[] audioData = new float[_microphoneFrameSize]; bool didGetAudioData = false; while (GetMicrophoneAudioData(audioData)) { // If we have an _audioPreprocessor, preprocess the microphone data to remove noise and echo if (_audioPreprocessor != null) { _audioPreprocessor.ProcessRecordSamples(audioData); } // TODO: This is a lame hack. Ideally I'd like to stop sending audio data all together. // Note that even when muted audio still needs to run through the audio processor to make sure echo cancellation works properly when mute is turned back off. if (_mute) { Array.Clear(audioData, 0, audioData.Length); } // Send out microphone data _microphoneStream.SendRawAudioData(audioData); didGetAudioData = true; } // If we got audio data, update the current microphone level. // Note: I moved this here so that we do our volume level calculations on microphone audio that has run through the AudioPreprocessor. if (didGetAudioData) { int firstFrame = audioData.Length - 256; if (firstFrame < 0) { firstFrame = 0; } int firstSample = firstFrame * _microphoneChannels; _microphoneDbLevel = StaticFunctions.CalculateAverageDbForAudioBuffer(audioData, firstSample); } }
void SendMicrophoneData() { if (_microphoneStream == null) { return; } // Store the client ID / stream ID so remote clients can find the corresponding AudioOutputStream model.clientID = _microphoneStream.ClientID(); model.streamID = _microphoneStream.StreamID(); // Check if AudioInputStream is valid. if (model.clientID < 0 || model.streamID < 0) { return; } // Clear the previous microphone frame data Array.Clear(_microphoneFrameData, 0, _microphoneFrameData.Length); // Send audio data in _microphoneFrameSize chunks until we're out of microphone data to send bool didGetAudioData = false; while (GetMicrophoneAudioData(_microphoneFrameData)) { // If we have an _audioPreprocessor, preprocess the microphone data to remove noise and echo if (_audioPreprocessor != null) { _audioPreprocessor.ProcessRecordSamples(_microphoneFrameData); } // Note that even when muted audio still needs to run through the audio processor to make sure echo cancellation works properly when mute is turned back off. if (_mute) { Array.Clear(_microphoneFrameData, 0, _microphoneFrameData.Length); } // Send out microphone data _microphoneStream.SendRawAudioData(_microphoneFrameData); didGetAudioData = true; } // Normcore queues audio packets locally so they can go out in a single packet. This sends them off. if (didGetAudioData) { _microphoneStream.SendQueuedMessages(); } // If we got audio data, update the current microphone level. // Note: I moved this here so that we do our volume level calculations on microphone audio that has run through the AudioPreprocessor. if (didGetAudioData) { int firstFrame = _microphoneFrameData.Length - 256; if (firstFrame < 0) { firstFrame = 0; } int firstSample = firstFrame * _microphoneChannels; _microphoneDbLevel = StaticFunctions.CalculateAverageDbForAudioBuffer(_microphoneFrameData, firstSample); } }