Example #1
0
        void SendMicrophoneData()
        {
            if (_microphoneStream == null)
            {
                return;
            }

            // Store the client ID / stream ID so remote clients can find the corresponding AudioOutputStream
            _model.clientID = _microphoneStream.ClientID();
            _model.streamID = _microphoneStream.StreamID();

            // Check if AudioInputStream is valid.
            if (_model.clientID < 0 || _model.streamID < 0)
            {
                return;
            }

            // Send audio data in _microphoneFrameSize chunks until we're out of microphone data to send
            float[] audioData       = new float[_microphoneFrameSize];
            bool    didGetAudioData = false;

            while (GetMicrophoneAudioData(audioData))
            {
                // If we have an _audioPreprocessor, preprocess the microphone data to remove noise and echo
                if (_audioPreprocessor != null)
                {
                    _audioPreprocessor.ProcessRecordSamples(audioData);
                }

                // TODO: This is a lame hack. Ideally I'd like to stop sending audio data all together.
                //       Note that even when muted audio still needs to run through the audio processor to make sure echo cancellation works properly when mute is turned back off.
                if (_mute)
                {
                    Array.Clear(audioData, 0, audioData.Length);
                }

                // Send out microphone data
                _microphoneStream.SendRawAudioData(audioData);

                didGetAudioData = true;
            }

            // If we got audio data, update the current microphone level.
            // Note: I moved this here so that we do our volume level calculations on microphone audio that has run through the AudioPreprocessor.
            if (didGetAudioData)
            {
                int firstFrame = audioData.Length - 256;
                if (firstFrame < 0)
                {
                    firstFrame = 0;
                }
                int firstSample = firstFrame * _microphoneChannels;
                _microphoneDbLevel = StaticFunctions.CalculateAverageDbForAudioBuffer(audioData, firstSample);
            }
        }
Example #2
0
        void OnAudioFilterRead(float[] data, int channels)
        {
            if (_audioOutputStream == null || _audioOutputStream.nativePointerIsNull)
            {
                // Zero the data back out.
                for (int i = 0; i < data.Length; i++)
                {
                    data[i] = 0.0f;
                }

                // Zero db level
                dbLevel = -42.0f;

                // Bail
                return;
            }

            // Configure the AudioOutputStream to resample to our desired sample rate
            _audioOutputStream.SetSampleRate(_systemSampleRate);

            int incomingNumberOfChannels = _audioOutputStream.Channels();
            int numberOfFramesNeeded     = data.Length / channels;

            int numberOfIncomingSamplesNeeded = numberOfFramesNeeded * incomingNumberOfChannels;

            float[] audioData = new float[numberOfIncomingSamplesNeeded];
            if (_audioOutputStream.GetAudioData(audioData))
            {
                // Mix incoming audio data into buffer buffer
                for (int f = 0; f < numberOfFramesNeeded; f++)
                {
                    for (int c = 0; c < channels; c++)
                    {
                        int cIn = c;
                        if (cIn >= incomingNumberOfChannels)
                        {
                            cIn = incomingNumberOfChannels - 1;
                        }
                        int sIn  = f * incomingNumberOfChannels + cIn;
                        int sOut = f * channels + c;

                        // TODO: If there's no spatializer, we need to do this, but if there is a spatializer, we can just copy the value.
                        // TODO: Why is the input signal we're getting not 1.0 when spatialization is turned off??
                        data[sOut] = !_mute ? audioData[sIn] : 0.0f;
                    }
                }

                // Calculate db level using the last 256 frames
                int firstFrame = numberOfFramesNeeded - 256;
                if (firstFrame < 0)
                {
                    firstFrame = 0;
                }
                int firstSample = firstFrame * incomingNumberOfChannels;
                dbLevel = StaticFunctions.CalculateAverageDbForAudioBuffer(audioData, firstSample);
            }
            else
            {
                // Failed to retrieve audio samples. zero the data back out.
                // TODO: Maybe we should fade in/out here? Maybe the native interface can do that for us?
                for (int i = 0; i < data.Length; i++)
                {
                    data[i] = 0.0f;
                }

                // Zero db level
                dbLevel = -42.0f;
            }
        }
Example #3
0
        void OnAudioFilterRead(float[] data, int channels)
        {
            if (_audioOutputStream == null || _audioOutputStream.nativePointerIsNull)
            {
                // Zero the data back out.
                for (int i = 0; i < data.Length; i++)
                {
                    data[i] = 0.0f;
                }

                // Zero db level
                _dbLevel = -42.0f;

                // Bail
                return;
            }

            // Configure the AudioOutputStream to resample to our desired sample rate
            _audioOutputStream.SetSampleRate(_systemSampleRate);

            int incomingNumberOfChannels = _audioOutputStream.Channels();
            int numberOfFramesNeeded     = data.Length / channels;

            int numberOfIncomingSamplesNeeded = numberOfFramesNeeded * incomingNumberOfChannels;

            if (_audioData == null || numberOfIncomingSamplesNeeded > _audioData.Length)
            {
                _audioData = new float[numberOfIncomingSamplesNeeded];
            }

            if (_audioOutputStream.GetAudioData(_audioData))
            {
                // Mix incoming audio data into buffer buffer
                for (int f = 0; f < numberOfFramesNeeded; f++)
                {
                    for (int c = 0; c < channels; c++)
                    {
                        int cIn = c;
                        if (cIn >= incomingNumberOfChannels)
                        {
                            cIn = incomingNumberOfChannels - 1;
                        }
                        int sIn  = f * incomingNumberOfChannels + cIn;
                        int sOut = f * channels + c;

                        data[sOut] = !_mute ? data[sOut] * _audioData[sIn] : 0.0f;
                    }
                }

                // Calculate db level using the last 256 frames
                int firstFrame = numberOfFramesNeeded - 256;
                if (firstFrame < 0)
                {
                    firstFrame = 0;
                }
                int firstSample = firstFrame * incomingNumberOfChannels;
                _dbLevel = !_mute?StaticFunctions.CalculateAverageDbForAudioBuffer(_audioData, firstSample) : -42.0f;
            }
            else
            {
                // Failed to retrieve audio samples. zero the data back out.
                // TODO: Maybe we should fade in/out here? Maybe the native interface can do that for us?
                for (int i = 0; i < data.Length; i++)
                {
                    data[i] = 0.0f;
                }

                // Zero db level
                _dbLevel = -42.0f;
            }
        }
        void SendMicrophoneData()
        {
            if (_microphoneStream == null)
            {
                return;
            }

            // Store the client ID / stream ID so remote clients can find the corresponding AudioOutputStream
            model.clientID = _microphoneStream.ClientID();
            model.streamID = _microphoneStream.StreamID();

            // Check if AudioInputStream is valid.
            if (model.clientID < 0 || model.streamID < 0)
            {
                return;
            }


            // Clear the previous microphone frame data
            Array.Clear(_microphoneFrameData, 0, _microphoneFrameData.Length);

            // Send audio data in _microphoneFrameSize chunks until we're out of microphone data to send
            bool didGetAudioData = false;

            while (GetMicrophoneAudioData(_microphoneFrameData))
            {
                // If we have an _audioPreprocessor, preprocess the microphone data to remove noise and echo
                if (_audioPreprocessor != null)
                {
                    _audioPreprocessor.ProcessRecordSamples(_microphoneFrameData);
                }

                // Note that even when muted audio still needs to run through the audio processor to make sure echo cancellation works properly when mute is turned back off.
                if (_mute)
                {
                    Array.Clear(_microphoneFrameData, 0, _microphoneFrameData.Length);
                }

                // Send out microphone data
                _microphoneStream.SendRawAudioData(_microphoneFrameData);

                didGetAudioData = true;
            }

            // Normcore queues audio packets locally so they can go out in a single packet. This sends them off.
            if (didGetAudioData)
            {
                _microphoneStream.SendQueuedMessages();
            }

            // If we got audio data, update the current microphone level.
            // Note: I moved this here so that we do our volume level calculations on microphone audio that has run through the AudioPreprocessor.
            if (didGetAudioData)
            {
                int firstFrame = _microphoneFrameData.Length - 256;
                if (firstFrame < 0)
                {
                    firstFrame = 0;
                }
                int firstSample = firstFrame * _microphoneChannels;
                _microphoneDbLevel = StaticFunctions.CalculateAverageDbForAudioBuffer(_microphoneFrameData, firstSample);
            }
        }