internal static bool Filter(SpeechSession session, [NotNull] float[] output, int channels, [NotNull] float[] temp, [CanBeNull] AudioFileWriter diagnosticOutput, out float arv)
        {
            //Read out data from source (exactly as much as we need for one channel)
            var samplesRequired = output.Length / channels;
            var complete        = session.Read(new ArraySegment <float>(temp, 0, samplesRequired));

            //Write the audio we're about to play to the diagnostics writer (on disk)
            if (diagnosticOutput != null)
            {
                diagnosticOutput.WriteSamples(new ArraySegment <float>(temp, 0, samplesRequired));
            }

            //Step through samples, stretching them (i.e. play mono input in all output channels)
            float accumulator = 0;
            var   sampleIndex = 0;

            for (var i = 0; i < output.Length; i += channels)
            {
                //Get a single sample from the source data
                var sample = temp[sampleIndex++];

                //Accumulate the sum of the audio signal
                accumulator += Mathf.Abs(sample);

                //Copy data into all channels
                for (var c = 0; c < channels; c++)
                {
                    output[i + c] *= sample;
                }
            }

            arv = accumulator / output.Length;

            return(complete);
        }
Example #2
0
        protected void SendSamplesToSubscribers([NotNull] float[] buffer)
        {
            //Write processed audio to file (for diagnostic purposes)
            //ncrunch: no coverage start (Justification: don't want to have to write to disk in a test)
            if (DebugSettings.Instance.EnableRecordingDiagnostics && DebugSettings.Instance.RecordPreprocessorOutput)
            {
                if (_diagnosticOutputRecorder == null)
                {
                    var filename = string.Format("Dissonance_Diagnostics/PreprocessorOutputAudio_{0}", DateTime.UtcNow.ToFileTime());
                    _diagnosticOutputRecorder = new AudioFileWriter(filename, OutputFormat);
                }

                _diagnosticOutputRecorder.WriteSamples(new ArraySegment <float>(buffer));
            }
            //ncrunch: no coverage end

            //Send the audio to subscribers
            using (var unlocker = _micSubscriptions.Lock())
            {
                var subs = unlocker.Value;
                for (var i = 0; i < subs.Count; i++)
                {
                    try
                    {
                        subs[i].ReceiveMicrophoneData(new ArraySegment <float>(buffer), OutputFormat);
                    }
                    catch (Exception ex)
                    {
                        Log.Error("Microphone subscriber '{0}' threw: {1}", subs[i].GetType().Name, ex);
                    }
                }
            }
        }
Example #3
0
        public bool Read(ArraySegment <float> frame)
        {
            EncodedAudio?encoded;
            var          lastFrame = _buffer.Read(out encoded);

            int decodedCount;

            if (encoded != null)
            {
                Log.Trace("Decoding frame {0}", encoded.Value.Sequence);
                decodedCount = _decoder.Decode(encoded.Value.Data, frame);
                _recycleFrame(encoded.Value);
            }
            else
            {
                Log.Trace("Running decoder PLC");
                decodedCount = _decoder.Decode(null, frame);
            }

            //Sanity check that decoding got correct number of samples
            if (decodedCount != _frameSize)
            {
                throw new InvalidOperationException(string.Format("Decoding a frame of audio got {0} samples, but should have decoded {1} samples", decodedCount, _frameSize));
            }

            if (_diagnosticOutput != null)
            {
                _diagnosticOutput.WriteSamples(frame);
            }

            return(lastFrame);
        }
Example #4
0
        public void Handle(ArraySegment <float> inputSamples, WaveFormat format)
        {
            if (_resetRequired)
            {
                Log.Trace("Resetting encoder pipeline");

                _resampler.Reset();
                _input.Reset();
                _output.Reset();

                _resetRequired = false;
            }

            if (!format.Equals(_inputFormat))
            {
                throw new ArgumentException(string.Format("Samples expected in format {0}, but supplied with format {1}", _inputFormat, format), "format");
            }

            if (_microphoneDiagnosticOutput != null)
            {
                _microphoneDiagnosticOutput.WriteSamples(inputSamples);
            }

            //Write samples to the pipeline (keep a running total of how many we have sent)
            //Keep sending until we've sent all of these samples
            var offset = 0;

            while (offset != inputSamples.Count)
            {
                offset += _input.Write(inputSamples.Array, offset + inputSamples.Offset, inputSamples.Count - offset);

                //Drain some of those samples just written, encode them and send them off
                EncodeFrames();
            }
        }
Example #5
0
        public bool Read(ArraySegment <float> frame)
        {
            VoicePacket?encoded;
            var         lastFrame = _buffer.Read(out encoded);

            int decodedCount;

            if (encoded != null)
            {
                Log.Trace("Decoding frame {0}", encoded.Value.SequenceNumber);
                decodedCount = _decoder.Decode(encoded.Value.EncodedAudioFrame, frame);

                //Expose the playback options for this packet
                using (var l = _options.Lock())
                    l.Value = encoded.Value.PlaybackOptions;

                //Expose the channel list for this packet (if it's null just assume the previous value is still correct)
                if (encoded.Value.Channels != null)
                {
                    using (var l = _channels.Lock())
                    {
                        _approxChannelCount = encoded.Value.Channels.Count;
                        l.Value.Clear();
                        l.Value.AddRange(encoded.Value.Channels);
                    }
                }

                _recycleFrame(encoded.Value);
            }
            else
            {
                Log.Trace("Running decoder PLC");
                decodedCount = _decoder.Decode(null, frame);
            }

            //Sanity check that decoding got correct number of samples
            if (decodedCount != _frameSize)
            {
                throw new InvalidOperationException(string.Format("Decoding a frame of audio got {0} samples, but should have decoded {1} samples", decodedCount, _frameSize));
            }

            if (_diagnosticOutput != null)
            {
                _diagnosticOutput.WriteSamples(frame);
            }

            return(lastFrame);
        }
Example #6
0
        internal static bool Filter(SpeechSession session, float[] data, int channels, float[] temp, [CanBeNull] AudioFileWriter diagnosticOutput, out float arv, out int samplesRead, bool multiply)
        {
            //Read out data from source (exactly as much as we need for one channel)
            var samplesRequired = data.Length / channels;
            var complete        = session.Read(new ArraySegment <float>(temp, 0, samplesRequired));

            if (diagnosticOutput != null)
            {
                diagnosticOutput.WriteSamples(new ArraySegment <float>(temp, 0, samplesRequired));
            }

            float accumulator = 0;

            //Step through samples, stretching them (i.e. play mono input in all output channels)
            var sampleIndex = 0;

            for (var i = 0; i < data.Length; i += channels)
            {
                //Get a single sample from the source data
                var sample = temp[sampleIndex++];

                //Accumulate the sum of the audio signal
                accumulator += Mathf.Abs(sample);

                //Copy data into all channels
                for (var c = 0; c < channels; c++)
                {
                    if (multiply)
                    {
                        data[i + c] *= sample;
                    }
                    else
                    {
                        data[i + c] = sample;
                    }
                }
            }

            arv         = accumulator / data.Length;
            samplesRead = samplesRequired;

            return(complete);
        }
Example #7
0
        private void EncodeFrames()
        {
            //Read frames of resampled samples (as many as we can, we want to keep this buffer empty and latency low)
            var encoderInput = new ArraySegment <float>(_plainSamples, 0, _encoder.FrameSize);

            while (_output.Read(encoderInput))
            {
                if (_preEncodeDiagnosticOutput != null)
                {
                    _preEncodeDiagnosticOutput.WriteSamples(encoderInput);
                }

                //Encode it
                var encoded = _encoder.Encode(encoderInput, new ArraySegment <byte>(_encodedBytes));

                //Transmit it
                _net.SendVoice(encoded);
            }
        }
Example #8
0
        /// <summary>
        /// Read as many frames as possible from the mic sample buffer and pass them to the encoding thread
        /// </summary>
        private void SendFrame()
        {
            //Drain as many frames as possible
            while (_rawMicSamples.Count > _rawMicFrames.FrameSize)
            {
                //Try to get a frame
                var segment   = new ArraySegment <float>(_frame);
                var available = _rawMicFrames.Read(segment);
                if (!available)
                {
                    break;
                }

                //Create diagnostic writer (if necessary)
                if (DebugSettings.Instance.EnableRecordingDiagnostics && DebugSettings.Instance.RecordMicrophoneRawAudio)
                {
                    if (_microphoneDiagnosticOutput == null)
                    {
                        var filename = string.Format("Dissonance_Diagnostics/MicrophoneRawAudio_{0}", DateTime.UtcNow.ToFileTime());
                        _microphoneDiagnosticOutput = new AudioFileWriter(filename, _format);
                    }
                }
                else if (_microphoneDiagnosticOutput != null)
                {
                    _microphoneDiagnosticOutput.Dispose();
                    _microphoneDiagnosticOutput = null;
                }

                //Write out the diagnostic info
                if (_microphoneDiagnosticOutput != null)
                {
                    _microphoneDiagnosticOutput.WriteSamples(segment);
                    _microphoneDiagnosticOutput.Flush();
                }

                //Send frame to subscribers
                for (var i = 0; i < _subscribers.Count; i++)
                {
                    _subscribers[i].ReceiveMicrophoneData(segment, _format);
                }
            }
        }
        public bool Read(ArraySegment <float> frame)
        {
            VoicePacket?encoded;
            var         lastFrame = _buffer.Read(out encoded);

            int decodedCount;

            if (encoded != null)
            {
                Log.Trace("Decoding frame {0}", encoded.Value.SequenceNumber);
                decodedCount = _decoder.Decode(encoded.Value.EncodedAudioFrame, frame);

                //Expose the playback options for this packet
                using (var l = _options.Lock())
                    l.Value = encoded.Value.PlaybackOptions;

                //Read the channel data into a separate list
                ExtractChannels(encoded.Value);

                _recycleFrame(encoded.Value);
            }
            else
            {
                Log.Trace("Running decoder PLC");
                decodedCount = _decoder.Decode(null, frame);
            }

            //Sanity check that decoding got correct number of samples
            if (decodedCount != _frameSize)
            {
                throw new InvalidOperationException(string.Format("Decoding a frame of audio got {0} samples, but should have decoded {1} samples", decodedCount, _frameSize));
            }

            if (_diagnosticOutput != null)
            {
                _diagnosticOutput.WriteSamples(frame);
            }

            return(lastFrame);
        }
Example #10
0
        public bool Read(ArraySegment <float> frame)
        {
            VoicePacket?encoded;
            bool        peekLostPacket;
            var         lastFrame = _buffer.Read(out encoded, out peekLostPacket);

            var p = new EncodedBuffer(encoded.HasValue ? encoded.Value.EncodedAudioFrame : (ArraySegment <byte>?)null, peekLostPacket || !encoded.HasValue);

            //Decode the frame
            int decodedCount = _decoder.Decode(p, frame);

            //If it was not a lost frame, also decode the metadata
            if (!p.PacketLost && encoded.HasValue)
            {
                //Expose the playback options for this packet
                using (var l = _options.Lock())
                    l.Value = encoded.Value.PlaybackOptions;

                //Read the channel data into a separate list
                ExtractChannels(encoded.Value);

                //Recycle the frame for re-use with a future packet. Only done with frames which were not peek ahead frames
                _recycleFrame(encoded.Value);
            }

            //Sanity check that decoding got correct number of samples
            if (decodedCount != _frameSize)
            {
                throw new InvalidOperationException(string.Format("Decoding a frame of audio got {0} samples, but should have decoded {1} samples", decodedCount, _frameSize));
            }

            if (_diagnosticOutput != null)
            {
                _diagnosticOutput.WriteSamples(frame);
            }

            return(lastFrame);
        }
Example #11
0
        /// <summary>
        /// Read as many frames as possible from the mic sample buffer and pass them to the encoding thread
        /// </summary>
        private void SendFrame()
        {
            while (_rawMicSamples.Count > _preprocessing.InputFrameSize)
            {
                //Get an empty buffer from the pool of buffers (sent back from the audio processing thread)
                var frameBuffer = _preprocessing.GetFrameBuffer();

                //Read a complete frame
                _rawMicFrames.Read(new ArraySegment <float>(frameBuffer));

                //Create diagnostic writer (if necessary)
                if (DebugSettings.Instance.EnableRecordingDiagnostics && DebugSettings.Instance.RecordMicrophoneRawAudio)
                {
                    if (_microphoneDiagnosticOutput == null)
                    {
                        var filename = string.Format("Dissonance_Diagnostics/MicrophoneRawAudio_{0}", DateTime.UtcNow.ToFileTime());
                        _microphoneDiagnosticOutput = new AudioFileWriter(filename, _rawMicSamples.WaveFormat);
                    }
                }
                else if (_microphoneDiagnosticOutput != null)
                {
                    _microphoneDiagnosticOutput.Dispose();
                    _microphoneDiagnosticOutput = null;
                }

                //Write out the diagnostic info
                if (_microphoneDiagnosticOutput != null)
                {
                    _microphoneDiagnosticOutput.WriteSamples(new ArraySegment <float>(frameBuffer));
                    _microphoneDiagnosticOutput.Flush();
                }

                //Send the full buffer to the audio thread for processing (no copying, just pass the entire buffer across by ref)
                _preprocessing.Send(frameBuffer);
            }
        }