void OnAudioFilterRead(float[] data, int channels) { if (_audioOutputStream == null || _audioOutputStream.nativePointerIsNull) { // Zero the data back out. for (int i = 0; i < data.Length; i++) { data[i] = 0.0f; } // Zero db level dbLevel = -42.0f; // Bail return; } // Configure the AudioOutputStream to resample to our desired sample rate _audioOutputStream.SetSampleRate(_systemSampleRate); int incomingNumberOfChannels = _audioOutputStream.Channels(); int numberOfFramesNeeded = data.Length / channels; int numberOfIncomingSamplesNeeded = numberOfFramesNeeded * incomingNumberOfChannels; float[] audioData = new float[numberOfIncomingSamplesNeeded]; if (_audioOutputStream.GetAudioData(audioData)) { // Mix incoming audio data into buffer buffer for (int f = 0; f < numberOfFramesNeeded; f++) { for (int c = 0; c < channels; c++) { int cIn = c; if (cIn >= incomingNumberOfChannels) { cIn = incomingNumberOfChannels - 1; } int sIn = f * incomingNumberOfChannels + cIn; int sOut = f * channels + c; // TODO: If there's no spatializer, we need to do this, but if there is a spatializer, we can just copy the value. // TODO: Why is the input signal we're getting not 1.0 when spatialization is turned off?? data[sOut] = !_mute ? audioData[sIn] : 0.0f; } } // Calculate db level using the last 256 frames int firstFrame = numberOfFramesNeeded - 256; if (firstFrame < 0) { firstFrame = 0; } int firstSample = firstFrame * incomingNumberOfChannels; dbLevel = StaticFunctions.CalculateAverageDbForAudioBuffer(audioData, firstSample); } else { // Failed to retrieve audio samples. zero the data back out. // TODO: Maybe we should fade in/out here? Maybe the native interface can do that for us? for (int i = 0; i < data.Length; i++) { data[i] = 0.0f; } // Zero db level dbLevel = -42.0f; } }
void OnAudioFilterRead(float[] data, int channels) { if (_audioOutputStream == null || _audioOutputStream.nativePointerIsNull) { // Zero the data back out. for (int i = 0; i < data.Length; i++) { data[i] = 0.0f; } // Zero db level _dbLevel = -42.0f; // Bail return; } // Configure the AudioOutputStream to resample to our desired sample rate _audioOutputStream.SetSampleRate(_systemSampleRate); int incomingNumberOfChannels = _audioOutputStream.Channels(); int numberOfFramesNeeded = data.Length / channels; int numberOfIncomingSamplesNeeded = numberOfFramesNeeded * incomingNumberOfChannels; if (_audioData == null || numberOfIncomingSamplesNeeded > _audioData.Length) { _audioData = new float[numberOfIncomingSamplesNeeded]; } if (_audioOutputStream.GetAudioData(_audioData)) { // Mix incoming audio data into buffer buffer for (int f = 0; f < numberOfFramesNeeded; f++) { for (int c = 0; c < channels; c++) { int cIn = c; if (cIn >= incomingNumberOfChannels) { cIn = incomingNumberOfChannels - 1; } int sIn = f * incomingNumberOfChannels + cIn; int sOut = f * channels + c; data[sOut] = !_mute ? data[sOut] * _audioData[sIn] : 0.0f; } } // Calculate db level using the last 256 frames int firstFrame = numberOfFramesNeeded - 256; if (firstFrame < 0) { firstFrame = 0; } int firstSample = firstFrame * incomingNumberOfChannels; _dbLevel = !_mute?StaticFunctions.CalculateAverageDbForAudioBuffer(_audioData, firstSample) : -42.0f; } else { // Failed to retrieve audio samples. zero the data back out. // TODO: Maybe we should fade in/out here? Maybe the native interface can do that for us? for (int i = 0; i < data.Length; i++) { data[i] = 0.0f; } // Zero db level _dbLevel = -42.0f; } }