예제 #1
0
        unsafe public void ProcessFrame(ProcessAudioFrameContext context)
        {
            AudioFrame frame = context.InputFrame;

            using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.ReadWrite))
                using (IMemoryBufferReference reference = buffer.CreateReference())
                {
                    ((IMemoryBufferByteAccess)reference).GetBuffer(out byte *dataInBytes, out uint capacity);
                    float *dataInFloat       = (float *)dataInBytes;
                    int    dataInFloatLength = (int)buffer.Length / sizeof(float);

                    // read parameters once
                    float currentWet      = this.Mix;
                    float currentDry      = 1.0f - currentWet;
                    float currentFeedback = this.Feedback;

                    // Process audio data
                    float sample, echoSample, outSample;
                    for (int i = 0; i < dataInFloatLength; i++)
                    {
                        // read values
                        sample     = dataInFloat[i];
                        echoSample = _echoBuffer.Dequeue();

                        // compute output sample
                        outSample      = (currentDry * sample) + (currentWet * echoSample);
                        dataInFloat[i] = outSample;

                        // compute delay sample
                        echoSample = sample + (currentFeedback * echoSample);
                        _echoBuffer.Enqueue(echoSample);
                    }
                }
        }
예제 #2
0
        unsafe void Graph_QuantumStarted(AudioGraph sender, object args)
        {
            // we'll only broadcast if we're actively monitoring audio packets
            if (!Active)
            {
                return;
            }

            try
            {
                // get an audio frame from the output node
                AudioFrame frame = outputNode.GetFrame();

                if (frame.Duration?.Milliseconds == 0)                 // discard any empty frames
                {
                    return;
                }

                using (var buffer = frame.LockBuffer(AudioBufferAccessMode.Read))
                    using (IMemoryBufferReference reference = buffer.CreateReference())
                    {
                        // Get the buffer from the AudioFrame
                        ((IMemoryBufferByteAccess)reference).GetBuffer(out byte *dataInBytes, out uint capacityInBytes);

                        // convert the bytes into float
                        float *dataInFloat = (float *)dataInBytes;

                        if (audioBytes == null)
                        {
                            audioBytes = new byte [buffer.Length * broadcastSize / 2];                     // buffer length * # of frames we want to accrue / 2 (because we're transforming float audio to Int 16)
                        }

                        for (int i = 0; i < capacityInBytes / sizeof(float); i++)
                        {
                            // convert the float into a double byte for 16 bit PCM
                            var     shortVal   = AudioFunctions.FloatToInt16(dataInFloat [i]);
                            byte [] chunkBytes = BitConverter.GetBytes(shortVal);

                            audioBytes [bufferPosition++] = chunkBytes [0];
                            audioBytes [bufferPosition++] = chunkBytes [1];
                        }

                        // we want to wait until we accrue <broadcastSize> # of frames and then broadcast them
                        //	in practice, this will take us from 20ms chunks to 100ms chunks and result in more accurate audio level calculations
                        //	we could maybe use the audiograph latency settings to achieve similar results but this seems to work well
                        if (bufferPosition == audioBytes.Length || !Active)
                        {
                            // broadcast the audio data to any listeners
                            OnBroadcast?.Invoke(this, audioBytes);

                            audioBytes     = null;
                            bufferPosition = 0;
                        }
                    }
            }
            catch (Exception ex)
            {
                OnException?.Invoke(this, new Exception($"AudioStream.QueueInputCompleted() :: Error: {ex.Message}"));
            }
        }
예제 #3
0
        private unsafe AudioFrame GenerateAudioData(uint samples)
        {
            // Buffer size is (number of samples) * (size of each sample)
            // We choose to generate single channel (mono) audio. For multi-channel, multiply by number of channels
            var bufferSize = samples * sizeof(float);
            var frame      = new AudioFrame(bufferSize);

            using (var buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                using (var reference = buffer.CreateReference())
                {
                    byte *dataInBytes;
                    uint  capacityInBytes;

                    // Get the buffer from the AudioFrame
                    ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);

                    // Cast to float since the data we are generating is float
                    var dataInFloat = (float *)dataInBytes;

                    const float amplitude       = 0.3f;
                    var         sampleRate      = (int)_audioGraph.EncodingProperties.SampleRate;
                    var         sampleIncrement = _freq * (Math.PI * 2) / sampleRate;

                    // Generate the sine wave and populate the values in the memory buffer
                    for (var i = 0; i < samples; i++)
                    {
                        var sinValue = amplitude * Math.Sin(_theta);
                        dataInFloat[i] = (float)sinValue;
                        _theta        += sampleIncrement;
                    }
                }

            return(frame);
        }
예제 #4
0
        /// <summary>
        /// Creates a new AudioFrame from the specified subset of input bytes.
        /// </summary>
        /// <param name="frameDataBuffer"> The bytes to use as the data source for the new AudioFrame. </param>
        /// <param name="length"> The number of bytes, from the beginning of the array, to use. </param>
        /// <returns> A new AudioFrame with the specified data. The caller is responsible for Disposing. </returns>
        public static unsafe AudioFrame CreateFrameFromBytes(byte[] frameDataBuffer, int length)
        {
            Contract.Requires(frameDataBuffer != null);

            if (length > frameDataBuffer.Length || length <= 0)
            {
                throw new ArgumentException($"Cannot create an AudioFrame of size {length}. Valid: 1 to {frameDataBuffer.Length}");
            }

            var resultFrame = new AudioFrame((uint)length);

            using (var audioBuffer = resultFrame.LockBuffer(AudioBufferAccessMode.Write))
                using (var bufferReference = audioBuffer.CreateReference())
                {
                    var bufferAccess = (IMemoryBufferByteAccess)bufferReference;
                    bufferAccess.GetBuffer(out byte *unsafeBuffer, out uint unsafeBufferCapacity);

                    for (uint i = 0; i < unsafeBufferCapacity; i++)
                    {
                        unsafeBuffer[i] = frameDataBuffer[i];
                    }
                }

            return(resultFrame);
        }
예제 #5
0
        /// <summary>
        /// When audioFrameUpdateMinimum is reached by audioFrameUpdateCount, this method gets the current audio frame, obtains the data from it
        /// and calculates the raw audio level from -100 to 0.
        /// </summary>
        private static unsafe void Graph_QuantumStarted(AudioGraph sender, object args)
        {
            audioFrameUpdateCount++;
            if (audioFrameUpdateCount >= audioFrameUpdateMinimum)
            {
                AudioFrame audioFrame = frameOutputNode.GetFrame();
                float[]    floatData;
                using (AudioBuffer audioBuffer = audioFrame.LockBuffer(AudioBufferAccessMode.Write))
                    using (IMemoryBufferReference reference = audioBuffer.CreateReference())
                    {
                        ((IMemoryBufferByteAccess)reference).GetBuffer(out byte *dataInBytes, out uint capacity);

                        float *unsafeFloatData = (float *)dataInBytes;
                        floatData = new float[capacity / sizeof(float)];

                        for (int i = 0; i < capacity / sizeof(float); i++)
                        {
                            floatData[i] = unsafeFloatData[i];
                        }
                    }

                double soundLevel = 0f;
                foreach (float sample in floatData)
                {
                    soundLevel += Math.Abs(sample);
                }
                soundLevel = Math.Log10(soundLevel / floatData.Length) * 20;

                NewRawSoundLevel(soundLevel);

                audioFrameUpdateCount = 0;
            }
        }
예제 #6
0
        private unsafe void ProcessFrameOutput(AudioFrame frame)
        {
            using (var buffer = frame.LockBuffer(AudioBufferAccessMode.Read))
                using (var reference = buffer.CreateReference())
                {
                    byte *dataInBytes;
                    uint  capacityInBytes;

                    ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);

                    var capacityInFloats = capacityInBytes / 4;
                    if (capacityInFloats != _frameSize) // Only send frames with the correct size.
                    {
                        return;
                    }

                    var dataInFloats = (float *)dataInBytes;
                    var floats       = new float[capacityInFloats];
                    Marshal.Copy((IntPtr)dataInFloats, floats, 0, (int)capacityInFloats);

                    var shorts = ConvertFloatsToShorts(floats);

                    ToxAvModel.Instance.SendAudioFrame(_friendNumber, new ToxAvAudioFrame(shorts, _samplingRate, 1));
                }
        }
예제 #7
0
        unsafe private static void ProcessFrameOutput(AudioFrame frame)
        {
            using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                using (IMemoryBufferReference reference = buffer.CreateReference())
                {
                    byte * dataInBytes;
                    uint   capacityInBytes;
                    float *dataInFloat;

                    // Get the buffer from the AudioFrame
                    ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);

                    dataInFloat = (float *)dataInBytes;
                    float[] dataInFloats = new float[capacityInBytes / sizeof(float)];

                    for (int i = 0; i < capacityInBytes / sizeof(float); i++)
                    {
                        dataInFloats[i] = dataInFloat[i];
                    }



                    InputRecieved?.Invoke(null, dataInFloats);
                }
        }
예제 #8
0
        // TODO: Fix frame receiving!
        private unsafe AudioFrame GenerateAudioData(uint samples, short[] shorts)
        {
            // Buffer size is (number of samples) * (size of each sample)
            // We choose to generate single channel (mono) audio. For multi-channel, multiply by number of channels
            var bufferSize = samples * sizeof(float);
            var frame      = new AudioFrame(bufferSize);

            using (var buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                using (var reference = buffer.CreateReference())
                {
                    byte *dataInBytes;
                    uint  capacityInBytes;

                    // Get the buffer from the AudioFrame
                    ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);

                    // Cast to float since the data we are generating is float
                    var dataInFloats = (float *)dataInBytes;

                    var floats           = ConvertShortsToFloats(shorts);
                    var capacityInFloats = capacityInBytes / 4;

                    Marshal.Copy(floats, 0, (IntPtr)dataInFloats, (int)capacityInFloats);
                }

            return(frame);
        }
        public unsafe void ProcessFrame(ProcessAudioFrameContext context)
        {
            AudioFrame inputFrame  = context.InputFrame;
            AudioFrame outputFrame = context.OutputFrame;

            using (AudioBuffer inputBuffer = inputFrame.LockBuffer(AudioBufferAccessMode.Read),
                   outputBuffer = outputFrame.LockBuffer(AudioBufferAccessMode.Write))
                using (IMemoryBufferReference inputReference = inputBuffer.CreateReference(),
                       outputReference = outputBuffer.CreateReference())
                {
                    ((IMemoryBufferByteAccess)inputReference).GetBuffer(out var inputDataInBytes, out _);
                    ((IMemoryBufferByteAccess)outputReference).GetBuffer(out var outputDataInBytes, out _);

                    float *inputDataInFloat  = (float *)inputDataInBytes;
                    float *outputDataInFloat = (float *)outputDataInBytes;

                    // Process audio data
                    int dataInFloatLength = (int)inputBuffer.Length / sizeof(float);

                    for (int i = 0; i < dataInFloatLength; i++)
                    {
                        // var inputData = inputDataInFloat[i] * (1.0f - Mix);
                        var inputData = inputDataInFloat[i];
                        outputDataInFloat[i] = ProcessFilterSample(inputData);
                    }
                }
        }
예제 #10
0
        unsafe private void ProcessFrameOutput(AudioFrame frame)
        {
            using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Read))
                using (IMemoryBufferReference reference = buffer.CreateReference())
                {
                    // get hold of the buffer pointer
                    byte *dataInBytes;
                    uint  capacityInBytes;
                    ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes,
                                                                   out capacityInBytes);

                    var dataInFloat = (float *)dataInBytes;

                    // examine
                    float max = 0;
                    for (int n = 0; n < graph.SamplesPerQuantum; n++)
                    {
                        max = Math.Max(Math.Abs(dataInFloat[n]), max);
                    }
                    currentPeak = max;

                    float x = currentPeak * 1000;

                    double Bri = Math.Pow(x, 3);                // Sensitivity slider value

                    byte Brightness = (byte)Math.Round(Bri, 0); // Calculating to a 0 - 255 value to control the light brightness

                    Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        OutputText.Text = Brightness.ToString();
                    });
                }
        }
예제 #11
0
        unsafe private void ProcessAudioFrame(AudioMediaFrame audioMediaFrame)
        {
            using (AudioFrame audioFrame = audioMediaFrame.GetAudioFrame())
                using (AudioBuffer buffer = audioFrame.LockBuffer(AudioBufferAccessMode.Read))
                    using (IMemoryBufferReference reference = buffer.CreateReference())
                    {
                        byte * dataInBytes;
                        uint   capacityInBytes;
                        float *dataInFloat;


                        ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);

                        // The requested format was float
                        dataInFloat = (float *)dataInBytes;

                        // Get the number of samples by multiplying the duration by sampling rate:
                        // duration [s] x sampling rate [samples/s] = # samples

                        // Duration can be gotten off the frame reference OR the audioFrame
                        TimeSpan duration = audioMediaFrame.FrameReference.Duration;

                        // frameDurMs is in milliseconds, while SampleRate is given per second.
                        uint frameDurMs  = (uint)duration.TotalMilliseconds;
                        uint sampleRate  = audioMediaFrame.AudioEncodingProperties.SampleRate;
                        uint sampleCount = (frameDurMs * sampleRate) / 1000;
                    }
        }
예제 #12
0
 unsafe private void ProcessInputFrame(AudioFrame frame)
 {
     using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Read))
     using (IMemoryBufferReference reference = buffer.CreateReference())
     {
         // We get data from current buffer
         ((IMemoryBufferByteAccess)reference).GetBuffer(
             out byte* dataInBytes,
             out uint capacityInBytes
             );
         // We discard first frame; it's full of zeros because of latency
         if (audioGraph.CompletedQuantumCount == 1) return;
         float* dataInFloat = (float*)dataInBytes;
         uint capacityInFloat = capacityInBytes / sizeof(float);
         // Number of channels defines step between samples in buffer
         uint step = fileInputNode.EncodingProperties.ChannelCount;
         // We transfer audio samples from buffer into audioData
         for (uint i = 0; i < capacityInFloat; i += step)
         {
             if (audioDataCurrentPosition < audioData.Length)
             {
                 audioData[audioDataCurrentPosition] = dataInFloat[i];
                 audioDataCurrentPosition++;
             }
         }
     }
 }
예제 #13
0
        public void ProcessFrame(ProcessAudioFrameContext context)
        {
            unsafe
            {
                AudioFrame inputFrame = context.InputFrame;

                using (AudioBuffer inputBuffer = inputFrame.LockBuffer(AudioBufferAccessMode.ReadWrite))
                    using (IMemoryBufferReference inputReference = inputBuffer.CreateReference())
                    {
                        ((IMemoryBufferByteAccess)inputReference).GetBuffer(out byte *inputDataInBytes, out uint inputCapacity);

                        float *inputDataInFloat  = (float *)inputDataInBytes;
                        int    dataInFloatLength = (int)inputBuffer.Length / sizeof(float);

                        // Process audio data
                        for (int n = 0; n < dataInFloatLength; n++)
                        {
                            int ch = n % channels;

                            // cascaded filter to perform eq
                            for (int band = 0; band < bandCount; band++)
                            {
                                inputDataInFloat[n] = filters[ch, band].Transform(inputDataInFloat[n]);
                            }
                        }
                    }
            }
        }
예제 #14
0
        private unsafe AudioFrame GenerateAudioData(uint samples)
        {
            var bufferSize = samples * sizeof(float) * 2;
            var frame      = new AudioFrame(bufferSize);

            _buffer = _buffer?.Length != samples * 2 ? new short[samples * 2] : _buffer;
            using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                using (IMemoryBufferReference reference = buffer.CreateReference())
                {
                    float *dataInFloat;
                    byte * dataInBytes;
                    uint   capacityInBytes;
                    ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);
                    dataInFloat = (float *)dataInBytes;
                    _player.GetBuffer(_buffer);

                    for (var i = 0; i < _buffer.Length; i++)
                    {
                        dataInFloat[i] = _buffer[i] * 0.00003f;                  // 乗算のほうが早いらしい
                    }

                    //foreach (float f in _buffer.Select(a => a * 0.00003f))
                    //	*dataInFloat++ = f;
                }

            return(frame);
        }
예제 #15
0
        /// <summary>
        /// Generates empty data for a neccessary quantity of samples
        /// </summary>
        /// <param name="samples">Sampel count</param>
        /// <returns>AudioFrame of sample count</returns>
        public static unsafe AudioFrame GenerateAudioData(uint samples)
        {
            // Buffer size is (number of samples) * (size of each sample) * (number of channels)
            uint       bufferSize = samples * sizeof(float) * 2;
            AudioFrame frame      = new AudioFrame(bufferSize);

            using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                using (IMemoryBufferReference reference = buffer.CreateReference())
                {
                    // Get the buffer from the AudioFrame
                    ((IMemoryBufferByteAccess)reference).GetBuffer(out byte *dataInBytes, out uint _);

                    // Cast to float since the data we are generating is float
                    float *dataInFloat = (float *)dataInBytes;

                    float  freq            = 17000; // choosing to generate frequency of 17kHz
                    float  amplitude       = 0.3f;
                    int    sampleRate      = (int)outgraph.EncodingProperties.SampleRate;
                    double sampleIncrement = (freq * (Math.PI * 2)) / sampleRate;

                    // Generate a 17kHz sine wave and populate the values in the memory buffer
                    for (int i = 0; i < samples; i++)
                    {
                        double sinValue = amplitude * Math.Sin(theta);
                        dataInFloat[i] = (float)sinValue;
                        theta         += sampleIncrement;
                    }
                }

            return(frame);
        }
예제 #16
0
        unsafe public void ProcessFrame(ProcessAudioFrameContext context)
        {
            AudioFrame inputFrame = context.InputFrame;

            using (AudioBuffer inputBuffer = inputFrame.LockBuffer(AudioBufferAccessMode.Read))
                using (IMemoryBufferReference inputReference = inputBuffer.CreateReference())
                {
                    byte *inputDataInBytes;
                    uint  inputCapacity;

                    ((IMemoryBufferByteAccess)inputReference).GetBuffer(out inputDataInBytes, out inputCapacity);

                    float *inputDataInFloat = (float *)inputDataInBytes;

                    float inputDataL;
                    float inputDataR;

                    // Process audio data
                    int dataInFloatLength = (int)inputBuffer.Length / sizeof(float);

                    if (_chart == null)
                    {
                        _chart = new float[dataInFloatLength];
                        propertySet["chart"] = _chart;
                    }
                    for (int i = 0; i < dataInFloatLength; i += 2)
                    {
                        inputDataL    = inputDataInFloat[i];
                        inputDataR    = inputDataInFloat[i + 1];
                        _chart[i]     = inputDataL;
                        _chart[i + 1] = inputDataR;
                    }
                }
        }
예제 #17
0
        //unsafe private void M_AudioGraph_QuantumProcessed(AudioGraph sender, object args)
        //{
        //}

        unsafe private void M_AudioGraph_QuantumStarted(AudioGraph sender, object args)
        {
            // draw every n frames
            //if (fctr++ % 5 == 0)
            //{
            using (AudioFrame audioFrame = this.m_AudioFrameOutputNode.GetFrame())
                using (AudioBuffer audioBuffer = audioFrame.LockBuffer(AudioBufferAccessMode.Read))
                    using (IMemoryBufferReference memBufferRef = audioBuffer.CreateReference())
                    {
                        IMemoryBufferByteAccess byteAccess = memBufferRef as IMemoryBufferByteAccess;

                        byte *byteBuffer;
                        uint  capacity;

                        byteAccess.GetBuffer(out byteBuffer, out capacity);

                        float *floatBuffer = (float *)byteBuffer;

                        for (int i = 0; i < this.m_AudioGraph.SamplesPerQuantum * this.m_AudioGraph.EncodingProperties.ChannelCount; i++)
                        {
                            this.m_QuantumSamples[i] = floatBuffer[i];
                        }

                        this.m_Capacity = capacity;
                        this.m_abCap    = audioBuffer.Capacity;
                        this.m_abLen    = audioBuffer.Length;
                    }
            AudioCanvas.Invalidate();
            //}
        }
        //Look into the data bit by bit
        public unsafe IList<float> ProcessFrameOutput(AudioFrame frame)
        {
            IList<float> points = new List<float>();

            using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Read))
            using (IMemoryBufferReference reference = buffer.CreateReference())
            {
                byte* dataInBytes;
                uint capacityInBytes;
                float* dataInFloat;

                // Get the buffer from the AudioFrame
                ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);

                dataInFloat = (float*)dataInBytes;

                int dataInFloatLength = (int)buffer.Length / sizeof(float);

                for (int i = 0; i < dataInFloatLength; i++)
                {
                    points.Add(dataInFloat[i]);
                }

                return points;
            }
        }
        private unsafe AudioFrame ProcessOutputFrame(int requiredSamples)
        {
            var bufferSize = (uint)requiredSamples * sizeof(float) *
                             _fileOutputNode.EncodingProperties.ChannelCount;

            var frame = new AudioFrame(bufferSize);

            using (var buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                using (var reference = buffer.CreateReference())
                {
                    // Get the buffer from the AudioFrame
                    (reference as IMemoryBufferByteAccess).GetBuffer(
                        out var dataInBytes,
                        out var capacityInBytes);

                    // Cast to float since the data we are generating is float
                    var dataInFloat     = (float *)dataInBytes;
                    var capacityInFloat = capacityInBytes / sizeof(float);

                    // Number of channels defines step between samples in buffer
                    var channelCount = _fileOutputNode.EncodingProperties.ChannelCount;

                    for (uint index = 0; index < capacityInFloat; index += channelCount)
                    {
                        if (_audioDataCurrentPosition < _audioData.LengthSamples())
                        {
                            GetAudioData().SetCurrentChannelType(ChannelType.Left);
                            dataInFloat[index] = _audioData.GetOutputSample(
                                _audioDataCurrentPosition);
                        }

                        // if it's stereo
                        if (channelCount == 2)
                        {
                            // if processed audio is sretero
                            if (_audioData.IsStereo)
                            {
                                GetAudioData().SetCurrentChannelType(ChannelType.Right);
                                dataInFloat[index + 1] = _audioData.GetOutputSample(
                                    _audioDataCurrentPosition);
                            }
                            else
                            {
                                // mute channel
                                dataInFloat[index + 1] = 0;
                            }
                        }

                        _audioDataCurrentPosition++;
                        if (_audioDataCurrentPosition >= _audioData.LengthSamples())
                        {
                            // last frame may be not full
                            _finished = true;
                            return(frame);
                        }
                    }
                }

            return(frame);
        }
예제 #20
0
        unsafe internal static AudioFrame GetAudioFrame(DataReader reader)
        {
            var numBytes = reader.UnconsumedBufferLength;

            var headerSize = 44;
            var bytes      = new byte[headerSize];

            reader.ReadBytes(bytes);

            var        numSamples = (uint)(numBytes - headerSize);
            AudioFrame frame      = new AudioFrame(numSamples);

            using (var buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                using (IMemoryBufferReference reference = buffer.CreateReference())
                {
                    byte *dataInBytes;
                    uint  capacityInBytes;

                    ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);

                    Int16 *dataInInt16 = (Int16 *)dataInBytes;

                    for (int i = 0; i < capacityInBytes / sizeof(Int16); i++)
                    {
                        dataInInt16[i] = reader.ReadInt16();
                    }
                }

            return(frame);
        }
예제 #21
0
        /// <summary>
        /// Handle frame of mic input
        /// </summary>
        /// <param name="frame"></param>
        private static unsafe void ProcessFrameOutput(AudioFrame frame)
        {
            float[] dataInFloats;
            using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                using (IMemoryBufferReference reference = buffer.CreateReference())
                {
                    // Get the buffer from the AudioFrame
                    ((IMemoryBufferByteAccess)reference).GetBuffer(out byte *dataInBytes, out uint capacityInBytes);

                    float *dataInFloat = (float *)dataInBytes;
                    dataInFloats = new float[capacityInBytes / sizeof(float)];

                    for (int i = 0; i < capacityInBytes / sizeof(float); i++)
                    {
                        dataInFloats[i] = dataInFloat[i];
                    }
                }

            // Don't bother if muted
            if (LocalState.VoiceState.SelfMute || LocalState.VoiceState.ServerMute)
            {
                AudioInSpec1   = 0;
                AudioInSpec2   = 0;
                AudioInSpec3   = 0;
                AudioInSpec4   = 0;
                AudioInSpec5   = 0;
                AudioInSpec6   = 0;
                AudioInSpec7   = 0;
                AudioInSpec8   = 0;
                AudioInSpec9   = 0;
                AudioInAverage = 0;
            }
            else
            {
                // Determine FFT data
                List <float[]> amplitudeData = FFT.Processing.HelperMethods.ProcessFrameOutput(frame);
                List <float[]> channelData   = FFT.Processing.HelperMethods.GetFftData(FFT.Processing.HelperMethods.ConvertTo512(amplitudeData, ingraph), ingraph);

                float[] leftChannel = channelData[1];

                // Assign each FFT data out channel
                AudioInSpec1   = HelperMethods.Max(leftChannel, 0, 1);
                AudioInSpec2   = HelperMethods.Max(leftChannel, 2, 3);
                AudioInSpec3   = HelperMethods.Max(leftChannel, 3, 4);
                AudioInSpec4   = HelperMethods.Max(leftChannel, 4, 5);
                AudioInSpec5   = HelperMethods.Max(leftChannel, 5, 6);
                AudioInSpec6   = HelperMethods.Max(leftChannel, 7, 8);
                AudioInSpec7   = HelperMethods.Max(leftChannel, 9, 10);
                AudioInSpec8   = HelperMethods.Max(leftChannel, 10, 12);
                AudioInSpec9   = HelperMethods.Max(leftChannel, 14, 26);
                AudioInAverage = (AudioInSpec1 + AudioInSpec2 + AudioInSpec3 + AudioInSpec4 + AudioInSpec5 + AudioInSpec5 + AudioInSpec6 + AudioInSpec7 + AudioInSpec8 + AudioInSpec9) / 9;
            }

            InputRecieved?.Invoke(null, dataInFloats);
        }
예제 #22
0
        unsafe public void ProcessFrame(ProcessAudioFrameContext context)
        {
            //foreach (var item in context.InputFrame.ExtendedProperties.Keys)
            //{
            //    Debug.WriteLine(item);
            //}


            const int videoFrameRate = 60; // TODO: we should probably measure this

            //Debug.WriteLine(sw.ElapsedMilliseconds.ToString());
            AudioFrame inputFrame = context.InputFrame;

            using (AudioBuffer inputBuffer = inputFrame.LockBuffer(AudioBufferAccessMode.Read))
                using (IMemoryBufferReference inputReference = inputBuffer.CreateReference())
                {
                    byte * inputInBytes;
                    uint   inputCapacity;
                    float *inputInFloats;

                    ((IMemoryBufferByteAccess)inputReference).GetBuffer(out inputInBytes, out inputCapacity);

                    inputInFloats = (float *)inputInBytes;
                    int inputLengthSamples = (int)inputBuffer.Length / sizeof(float);

                    int samplesPervBlank = (int)((float)currentEncodingProperties.SampleRate / (float)videoFrameRate);

                    int numVBlanksForCurrentAudioBuffer = (int)Math.Ceiling(((float)context.InputFrame.Duration.Value.Milliseconds / ((1.0f / (float)videoFrameRate) * 1000)));

                    var volumeSetLeft  = new double[numVBlanksForCurrentAudioBuffer];
                    var volumeSetRight = new double[numVBlanksForCurrentAudioBuffer];

                    //Left Channel
                    CalcAudioVolumedBPerVBlank(inputInFloats, inputLengthSamples, samplesPervBlank, volumeSetLeft, 0, (int)currentEncodingProperties.ChannelCount);

                    if (currentEncodingProperties.ChannelCount == 2)
                    {
                        //Right Channel
                        CalcAudioVolumedBPerVBlank(inputInFloats, inputLengthSamples, samplesPervBlank, volumeSetRight, 1, (int)currentEncodingProperties.ChannelCount);
                    }

                    lock (PassthroughEffect.GetBadLock())
                    {
                        for (var i = 0; i < numVBlanksForCurrentAudioBuffer; i++)
                        {
                            ((Queue <Tuple <double, double> >) this.propertySet["dataQueue"]).Enqueue(new Tuple <double, double>(volumeSetLeft[i], volumeSetRight[i]));
                        }
                        //((Queue<Double[]>)this.propertySet["AudioVolumeLeftQueue"]).Enqueue(volumeSetLeft);
                        //((Queue<Double[]>)this.propertySet["AudioVolumeRightQueue"]).Enqueue(volumeSetRight);
                        //this.propertySet["VolumeLeft"] = volumeSetLeft;
                        //this.propertySet["VolumeRight"] = volumeSetRight;
                    }
                }
        }
예제 #23
0
        public void Render_AddFrame()
        {
            var frame = new AudioFrame(_sut.BufferSize * 4);

            using (var buffer = frame.LockBuffer(AudioBufferAccessMode.Read))
            {
                Assert.AreEqual(_sut.FramesNeeded, _sut.BufferSize);
                _sut.AddFrames(buffer);
                Assert.AreEqual(_sut.FramesNeeded, 0u);
            }
        }
        internal unsafe VisualizationData(AudioFrame frame)
        {
            m_Buffer          = frame.LockBuffer(AudioBufferAccessMode.Read);
            m_BufferReference = m_Buffer.CreateReference();
            byte *pData;
            uint  capacity;

            ((IMemoryBufferByteAccess)m_BufferReference).GetBuffer(out pData, out capacity);
            m_pData        = (float *)pData;
            m_DataCapacity = m_Buffer.Length / sizeof(float);

            m_DataStep = (uint)frame.ExtendedProperties["{3F692E37-FC20-48DD-93D2-2234E1B1AA23}"];
        }
예제 #25
0
 private unsafe void ProcessFrameOutput(AudioFrame frame)
 {
     using (var buffer = frame.LockBuffer(AudioBufferAccessMode.Read))
         using (var reference = buffer.CreateReference())
         {
             // Get hold of the buffer pointer.
             ((IMemoryBufferByteAccess)reference).GetBuffer(out var dataInBytes, out var capacityInBytes);
             var dataInFloat = (float *)dataInBytes;
             for (var n = 0; n < _audioGraph.SamplesPerQuantum; n += 2)
             {
                 SpectrumProvider.Add(dataInFloat[n], dataInFloat[n + 1]);
             }
         }
 }
예제 #26
0
        private byte[] GetAudioDataFromAudioFrame(AudioFrame frame, out double outSoundLevel)
        {
            double sum = 0.0;

            using (var audioBuffer = frame.LockBuffer(AudioBufferAccessMode.Read))
            {
                var buffer = Windows.Storage.Streams.Buffer.CreateCopyFromMemoryBuffer(audioBuffer);
                buffer.Length = audioBuffer.Length;

                using (var dataReader = DataReader.FromBuffer(buffer))
                {
                    dataReader.ByteOrder = ByteOrder.LittleEndian;

                    byte[] byteData = new byte[buffer.Length];
                    int    pos      = 0;

                    while (dataReader.UnconsumedBufferLength > 0)
                    {
                        /* Reading Float -> Int 16 */

                        var singleTmp = dataReader.ReadSingle();
                        var int16Tmp  = (Int16)(singleTmp * Int16.MaxValue);

                        sum += Math.Abs(singleTmp);

                        byte[] chunkBytes = BitConverter.GetBytes(int16Tmp);
                        byteData[pos++] = chunkBytes[0];
                        byteData[pos++] = chunkBytes[1];

                        // Note: マイク入力を1チャンネルで取っている場合に、
                        // ステレオとして送るため1チャンネル分追加で
                        byteData[pos++] = chunkBytes[0];
                        byteData[pos++] = chunkBytes[1];
                    }

                    // 1サンプルあたり4byte使っているため4で割ってサンプル数を算出
                    var sampleCount = buffer.Length / 4;

                    // 1000掛けているのは扱いやすくするため
                    // outSoundLevelは無音時で5.0~8.0, 発話中で20.0以上程度を示す
                    outSoundLevel = (sum / sampleCount) * 1000;

#if DEBUG
                    Debug.WriteLine($"{sum} | {buffer.Length} | {sum / sampleCount * 1000}");
#endif

                    return(byteData);
                }
            }
        }
        private unsafe void FrameInputNode_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args)
        {
            var        bufferSize = args.RequiredSamples * sizeof(float) * 2;
            AudioFrame audioFrame = new AudioFrame((uint)bufferSize);

            if (fileStream == null)
            {
                return;
            }
            using (var audioBuffer = audioFrame.LockBuffer(AudioBufferAccessMode.Write))
            {
                using (var bufferReference = audioBuffer.CreateReference())
                {
                    byte * dataInBytes;
                    uint   capacityInBytes;
                    float *dataInFloat;

                    // Get the buffer from the AudioFrame
                    ((IMemoryBufferByteAccess)bufferReference).GetBuffer(out dataInBytes, out capacityInBytes);
                    dataInFloat = (float *)dataInBytes;

                    var managedBuffer = new byte[capacityInBytes];

                    var lastLength = fileStream.Length - fileStream.Position;
                    int readLength = (int)(lastLength < capacityInBytes ? lastLength : capacityInBytes);
                    if (readLength <= 0)
                    {
                        fileStream.Close();
                        fileStream = null;
                        return;
                    }
                    fileStream.Read(managedBuffer, 0, readLength);

                    for (int i = 0; i < readLength; i += 8)
                    {
                        dataInBytes[i + 4] = managedBuffer[i + 0];
                        dataInBytes[i + 5] = managedBuffer[i + 1];
                        dataInBytes[i + 6] = managedBuffer[i + 2];
                        dataInBytes[i + 7] = managedBuffer[i + 3];
                        dataInBytes[i + 0] = managedBuffer[i + 4];
                        dataInBytes[i + 1] = managedBuffer[i + 5];
                        dataInBytes[i + 2] = managedBuffer[i + 6];
                        dataInBytes[i + 3] = managedBuffer[i + 7];
                    }
                }
            }

            audioFrameInputNode.AddFrame(audioFrame);
        }
예제 #28
0
        //</SnippetQuantumProcessed>


        //<SnippetProcessFrameOutput>
        unsafe private void ProcessFrameOutput(AudioFrame frame)
        {
            using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                using (IMemoryBufferReference reference = buffer.CreateReference())
                {
                    byte * dataInBytes;
                    uint   capacityInBytes;
                    float *dataInFloat;

                    // Get the buffer from the AudioFrame
                    ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);

                    dataInFloat = (float *)dataInBytes;
                }
        }
예제 #29
0
        internal static void SendAudioFrame(AudioFrame frame, DataWriter writer)
        {
            var audioBuffer = frame.LockBuffer(AudioBufferAccessMode.Read);
            var buffer      = Windows.Storage.Streams.Buffer.CreateCopyFromMemoryBuffer(audioBuffer);

            buffer.Length = audioBuffer.Length;
            using (var dataReader = DataReader.FromBuffer(buffer))
            {
                dataReader.ByteOrder = ByteOrder.LittleEndian;
                while (dataReader.UnconsumedBufferLength > 0)
                {
                    writer.WriteInt16(FloatToInt16(dataReader.ReadSingle()));
                }
            }
        }
예제 #30
0
        unsafe public static void AddFrame(float[] framedata, uint samples)
        {
            if (!ready)
            {
                return;
            }
            //if (!started)
            //{
            //    //graph.Start();
            //    //started = true;
            //}
            AudioFrame frame = new AudioFrame(samples * 2 * sizeof(float));

            using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                using (IMemoryBufferReference reference = buffer.CreateReference())
                {
                    byte *dataInBytes;
                    uint  capacityInBytes;

                    // Get the buffer from the AudioFrame
                    ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);
                    // Cast to float since the data we are generating is float
                    float *dataInFloat = (float *)dataInBytes;
                    fixed(float *frames = framedata)
                    {
                        for (int i = 0; i < samples * 2; i++)
                        {
                            dataInFloat[i] = frames[i];
                        }
                    }
                }
            //List<float[]> amplitudeData = FFT.Processing.HelperMethods.ProcessFrameOutput(frame);
            //List<float[]> channelData = FFT.Processing.HelperMethods.GetFftData(FFT.Processing.HelperMethods.ConvertTo512(amplitudeData, outgraph), outgraph);

            //float[] leftChannel = channelData[1];

            //AudioSpec1 = HelperMethods.Max(leftChannel, 0, 1);
            //AudioSpec2 = HelperMethods.Max(leftChannel, 2, 3);
            //AudioSpec3 = HelperMethods.Max(leftChannel, 3, 4);
            //AudioSpec4 = HelperMethods.Max(leftChannel, 4, 5);
            //AudioSpec5 = HelperMethods.Max(leftChannel, 5, 6);
            //AudioSpec6 = HelperMethods.Max(leftChannel, 7, 8);
            //AudioSpec7 = HelperMethods.Max(leftChannel, 9, 10);
            //AudioSpec8 = HelperMethods.Max(leftChannel, 10, 12);
            //AudioSpec9 = HelperMethods.Max(leftChannel, 14, 26);
            frameInputNode.AddFrame(frame);
        }
        /// <summary>
        ///     Transfers samples from a frame to AudioData
        /// </summary>
        private unsafe void ProcessInputFrame(AudioFrame frame)
        {
            using (var buffer =
                       frame.LockBuffer(AudioBufferAccessMode.Read))
                using (var reference =
                           buffer.CreateReference())
                {
                    // Get data from current buffer
                    (reference as IMemoryBufferByteAccess).GetBuffer(
                        out var dataInBytes,
                        out var capacityInBytes
                        );
                    // Discard first frame; it's full of zeros because of latency
                    if (_audioGraph.CompletedQuantumCount == 1)
                    {
                        return;
                    }

                    var dataInFloat     = (float *)dataInBytes;
                    var capacityInFloat = capacityInBytes / sizeof(float);
                    // Number of channels defines step between samples in buffer
                    var channelCount = _fileInputNode.EncodingProperties.ChannelCount;
                    // Transfer audio samples from buffer into audioData
                    for (uint index = 0; index < capacityInFloat; index += channelCount)
                    {
                        if (_audioDataCurrentPosition < GetAudioData().LengthSamples())
                        {
                            GetAudioData().SetCurrentChannelType(ChannelType.Left);
                            GetAudioData().SetInputSample(
                                _audioDataCurrentPosition,
                                dataInFloat[index]
                                );
                            // if it's stereo
                            if (channelCount == 2)
                            {
                                GetAudioData().SetCurrentChannelType(ChannelType.Right);
                                GetAudioData().SetInputSample(
                                    _audioDataCurrentPosition,
                                    dataInFloat[index + 1]
                                    );
                            }

                            _audioDataCurrentPosition++;
                        }
                    }
                }
        }
예제 #32
0
        private AudioFrame GenerateAudioFrame(int samplesNumber)
        {
            AudioFrame frame = new AudioFrame((uint)samplesNumber * _channelsNumber * sizeof(float));

            using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
            {
                using (IMemoryBufferReference reference = buffer.CreateReference())
                {
                    _waveSource.GenerateWave(reference, samplesNumber);
                }
            }

            return frame;
        }
예제 #33
0
        private unsafe void ProcessFrameOutput(AudioFrame frame)
        {
            using (var buffer = frame.LockBuffer(AudioBufferAccessMode.Read))
            using (var reference = buffer.CreateReference())
            {
                byte* dataInBytes;
                uint capacityInBytes;

                ((IMemoryBufferByteAccess) reference).GetBuffer(out dataInBytes, out capacityInBytes);

                var capacityInFloats = capacityInBytes/4;
                if (capacityInFloats != _frameSize) // Only send frames with the correct size.
                    return;

                var dataInFloats = (float*) dataInBytes;
                var floats = new float[capacityInFloats];
                Marshal.Copy((IntPtr) dataInFloats, floats, 0, (int) capacityInFloats);

                var shorts = ConvertFloatsToShorts(floats);

                ToxAvModel.Instance.SendAudioFrame(_friendNumber, new ToxAvAudioFrame(shorts, _samplingRate, 1));
            }
        }
예제 #34
0
        // TODO: Fix frame receiving!
        private unsafe AudioFrame GenerateAudioData(uint samples, short[] shorts)
        {
            // Buffer size is (number of samples) * (size of each sample)
            // We choose to generate single channel (mono) audio. For multi-channel, multiply by number of channels
            var bufferSize = samples*sizeof (float);
            var frame = new AudioFrame(bufferSize);

            using (var buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
            using (var reference = buffer.CreateReference())
            {
                byte* dataInBytes;
                uint capacityInBytes;

                // Get the buffer from the AudioFrame
                ((IMemoryBufferByteAccess) reference).GetBuffer(out dataInBytes, out capacityInBytes);

                // Cast to float since the data we are generating is float
                var dataInFloats = (float*) dataInBytes;

                var floats = ConvertShortsToFloats(shorts);
                var capacityInFloats = capacityInBytes/4;

                Marshal.Copy(floats, 0, (IntPtr) dataInFloats, (int) capacityInFloats);
            }

            return frame;
        }