Exemple #1
0
        public void InitSender(string Name)
        {
            bool first = false;

            if (senderNameList.Count == 0)
            {
                first = true;
            }
            int exists = senderNameList.IndexOf(Name);

            if (exists == -1)
            {
                // We are going to create a 1920x1080 16:9 frame at 25.00Hz, progressive (default).
                // We are also going to create an audio frame
                // 48khz, stereo in the example.
                videoFrame               = new VideoFrame(videoSizeX, videoSizeY, videoAR, fps * 1000, 1000);
                videoFrame.TimeCode      = 0; // std je NDIlib_send_timecode_synthesize;
                audioFrame               = new AudioFrame(audioNumSamples, audioSamplesPerSec, 2);
                audioFrame.NumSamples    = audioNumSamples;
                audioFrame.ChannelStride = audioFrame.NumSamples * sizeof(float);
                Sender sendInstanceX = new Sender(Name, false, false); //Video/Audio clocks implicit ..first lepší, ostatní nesync streamy pak mají lepší timing vůči first
                senderList.Add(sendInstanceX);
                senderNameList.Add(Name);
                senderFrameSentList.Add(0);
                senderVideoFrameList.Add(videoFrame);
                videoFrame = new VideoFrame(videoSizeX, videoSizeY, videoAR, fps * 1000, 1000);
                senderVideoFrameList.Add(videoFrame);//so we added two frames, at positions 2* and 2*+1
                textFormat.Alignment     = StringAlignment.Center;
                textFormat.LineAlignment = StringAlignment.Center;
            }
        }
Exemple #2
0
        static void FillAudioBuffer(AudioFrame audioFrame, bool doTone)
        {
            // should never happen
            if (audioFrame.AudioBuffer == IntPtr.Zero)
            {
                return;
            }
            // temp space for floats
            float[] floatBuffer = new float[audioFrame.NumSamples];
            // make the tone or silence
            double cycleLength  = (double)audioFrame.SampleRate / 1000.0;
            int    sampleNumber = 0;

            for (int i = 0; i < audioFrame.NumSamples; i++)
            {
                double time = sampleNumber++ / cycleLength;
                floatBuffer[i] = doTone ? (float)(Math.Sin(2.0f * Math.PI * time) * 0.1) : 0.0f;
            }
            // fill each channel with our floats...
            for (int ch = 0; ch < audioFrame.NumChannels; ch++)
            {
                // scary pointer math ahead...
                // where does this channel start in the unmanaged buffer?
                IntPtr destStart = new IntPtr(audioFrame.AudioBuffer.ToInt64() + (ch * audioFrame.ChannelStride));
                // copy the float array into the channel
                Marshal.Copy(floatBuffer, 0, destStart, audioFrame.NumSamples);
            }
        }
Exemple #3
0
        public EncodeAudioByMat(string output)
        {
            using (MediaWriter writer = new MediaWriter(output))
            {
                writer.AddStream(MediaEncoder.CreateAudioEncode(writer.Format, 2, 44100));
                writer.Initialize();

                AudioFrame      dstFrame  = AudioFrame.CreateFrameByCodec(writer[0].Codec);
                SampleConverter converter = new SampleConverter(dstFrame);

                using (Mat mat = CreateMat(writer[0].Codec.AVCodecContext.channels))
                {
                    long pts = 0;
                    for (int i = 0; i < 1000; i++)
                    {
                        foreach (var item in converter.Convert(mat.ToAudioFrame(dstSampleRate: writer[0].Codec.AVCodecContext.sample_rate)))
                        {
                            pts     += item.NbSamples;
                            item.Pts = pts;
                            foreach (var packet in writer[0].WriteFrame(item))
                            {
                                writer.WritePacket(packet);
                            }
                        }
                    }
                }
                writer.FlushMuxer();
            }
        }
        // TODO: Fix frame receiving!
        private unsafe AudioFrame GenerateAudioData(uint samples, short[] shorts)
        {
            // Buffer size is (number of samples) * (size of each sample)
            // We choose to generate single channel (mono) audio. For multi-channel, multiply by number of channels
            var bufferSize = samples * sizeof(float);
            var frame      = new AudioFrame(bufferSize);

            using (var buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                using (var reference = buffer.CreateReference())
                {
                    byte *dataInBytes;
                    uint  capacityInBytes;

                    // Get the buffer from the AudioFrame
                    ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);

                    // Cast to float since the data we are generating is float
                    var dataInFloats = (float *)dataInBytes;

                    var floats           = ConvertShortsToFloats(shorts);
                    var capacityInFloats = capacityInBytes / 4;

                    Marshal.Copy(floats, 0, (IntPtr)dataInFloats, (int)capacityInFloats);
                }

            return(frame);
        }
Exemple #5
0
        /// <summary>
        /// Creates a new AudioFrame from the specified subset of input bytes.
        /// </summary>
        /// <param name="frameDataBuffer"> The bytes to use as the data source for the new AudioFrame. </param>
        /// <param name="length"> The number of bytes, from the beginning of the array, to use. </param>
        /// <returns> A new AudioFrame with the specified data. The caller is responsible for Disposing. </returns>
        public static unsafe AudioFrame CreateFrameFromBytes(byte[] frameDataBuffer, int length)
        {
            Contract.Requires(frameDataBuffer != null);

            if (length > frameDataBuffer.Length || length <= 0)
            {
                throw new ArgumentException($"Cannot create an AudioFrame of size {length}. Valid: 1 to {frameDataBuffer.Length}");
            }

            var resultFrame = new AudioFrame((uint)length);

            using (var audioBuffer = resultFrame.LockBuffer(AudioBufferAccessMode.Write))
                using (var bufferReference = audioBuffer.CreateReference())
                {
                    var bufferAccess = (IMemoryBufferByteAccess)bufferReference;
                    bufferAccess.GetBuffer(out byte *unsafeBuffer, out uint unsafeBufferCapacity);

                    for (uint i = 0; i < unsafeBufferCapacity; i++)
                    {
                        unsafeBuffer[i] = frameDataBuffer[i];
                    }
                }

            return(resultFrame);
        }
Exemple #6
0
        private string ProcessAudioLog(string log, AudioFrame frame, AudioEncoding encoding, ConnectionInfo remoteConnectionInfo)
        {
            var pcmBuffer        = GetPcmAudioBuffer(frame);
            var compressedBuffer = GetCompressedAudioBuffer(frame);

            return(log
                   .Replace("{footprint}", compressedBuffer.Footprint.ToString())
                   .Replace("{duration}", frame.Duration.ToString())
                   .Replace("{clockRate}", pcmBuffer.Format.ClockRate.ToString())
                   .Replace("{channelCount}", pcmBuffer.Format.ChannelCount.ToString())
                   .Replace("{mediaStreamId}", frame.Mid)
                   .Replace("{rtpStreamId}", frame.RtpStreamId)
                   .Replace("{sequenceNumber}", frame.SequenceNumber.ToString())
                   .Replace("{synchronizationSource}", frame.SynchronizationSource.ToString())
                   .Replace("{systemTimestamp}", frame.SystemTimestamp.ToString())
                   .Replace("{timestamp}", frame.Timestamp.ToString())
                   .Replace("{encoding}", encoding.ToString())
                   .Replace("{applicationId}", Options.ApplicationId)
                   .Replace("{channelId}", Options.ChannelId)
                   .Replace("{userId}", remoteConnectionInfo.UserId)
                   .Replace("{userAlias}", remoteConnectionInfo.UserAlias)
                   .Replace("{deviceId}", remoteConnectionInfo.DeviceId)
                   .Replace("{deviceAlias}", remoteConnectionInfo.DeviceAlias)
                   .Replace("{clientId}", remoteConnectionInfo.ClientId)
                   .Replace("{clientTag}", remoteConnectionInfo.ClientTag)
                   .Replace("{connectionId}", remoteConnectionInfo.Id)
                   .Replace("{connectionTag}", remoteConnectionInfo.Tag)
                   .Replace("{mediaId}", remoteConnectionInfo.MediaId));
        }
Exemple #7
0
        unsafe private void ProcessAudioFrame(AudioMediaFrame audioMediaFrame)
        {
            using (AudioFrame audioFrame = audioMediaFrame.GetAudioFrame())
                using (AudioBuffer buffer = audioFrame.LockBuffer(AudioBufferAccessMode.Read))
                    using (IMemoryBufferReference reference = buffer.CreateReference())
                    {
                        byte * dataInBytes;
                        uint   capacityInBytes;
                        float *dataInFloat;


                        ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);

                        // The requested format was float
                        dataInFloat = (float *)dataInBytes;

                        // Get the number of samples by multiplying the duration by sampling rate:
                        // duration [s] x sampling rate [samples/s] = # samples

                        // Duration can be gotten off the frame reference OR the audioFrame
                        TimeSpan duration = audioMediaFrame.FrameReference.Duration;

                        // frameDurMs is in milliseconds, while SampleRate is given per second.
                        uint frameDurMs  = (uint)duration.TotalMilliseconds;
                        uint sampleRate  = audioMediaFrame.AudioEncodingProperties.SampleRate;
                        uint sampleCount = (frameDurMs * sampleRate) / 1000;
                    }
        }
        //Look into the data bit by bit
        public unsafe IList<float> ProcessFrameOutput(AudioFrame frame)
        {
            IList<float> points = new List<float>();

            using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Read))
            using (IMemoryBufferReference reference = buffer.CreateReference())
            {
                byte* dataInBytes;
                uint capacityInBytes;
                float* dataInFloat;

                // Get the buffer from the AudioFrame
                ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);

                dataInFloat = (float*)dataInBytes;

                int dataInFloatLength = (int)buffer.Length / sizeof(float);

                for (int i = 0; i < dataInFloatLength; i++)
                {
                    points.Add(dataInFloat[i]);
                }

                return points;
            }
        }
Exemple #9
0
        public void AudioAnalyzer_Buffer_SequenceReadIsCorrect()
        {
            // Generate audio signal with steps after 600 frames, packaged in frame of length 800 frames.
            // Output frames should contain exactly steps
            var sut = new AudioAnalyzer(2400, 2, 48000, 600, 300, 2048, false);

            RegisterOutputHandler(sut);
            AudioFrame[] frames = new AudioFrame[3]
            {
                new AudioFrame(4 * 2 * 800), new AudioFrame(4 * 2 * 800), new AudioFrame(4 * 2 * 800)
            };
            frames[0].Generate(2, 0, (frameIndex, channelIndex) => { return(frameIndex >= 600 ? 1.0f : 0.0f); });
            frames[1].Generate(2, 800, (frameIndex, channelIndex) => { return(frameIndex >= 1200 ? 2.0f : 1.0f); });
            frames[2].Generate(2, 1600, (frameIndex, channelIndex) => { return(frameIndex >= 1800 ? 3.0f : 2.0f); });

            foreach (var frame in frames)
            {
                sut.ProcessInput(frame);
            }

            for (int outputFrameIndex = 0; outputFrameIndex < 4; outputFrameIndex++)
            {
                float expectedValue = (float)outputFrameIndex;
                Assert.AreEqual(expectedValue, outputFrames[outputFrameIndex].Peak[0], "Channel 0");
                Assert.AreEqual(expectedValue, outputFrames[outputFrameIndex].Peak[1], "Channel 1");
            }
        }
Exemple #10
0
        /// <summary>
        /// 音声出力の
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="args"></param>
        private async void FrameInputNode_QuantumStarted(AudioFrameInputNode sender, FrameInputNodeQuantumStartedEventArgs args)
        {
            if (AudioInStream == null)
            {
                return;
                //throw new Exception("not connected to discord audio channel.");
            }

            if (AudioInStream.AvailableFrames == 0)
            {
                return;
            }

            uint numSamplesNeeded = (uint)args.RequiredSamples;

            if (numSamplesNeeded == 0)
            {
                return;
            }

            // audioDataのサイズはAudioInStream内のFrameが示すバッファサイズと同一サイズにしておくべきだけど
            var sampleNeededBytes = numSamplesNeeded * OpusConvertConstants.SampleBytes * OpusConvertConstants.Channels;

            // Note: staticで持たせるべき?
            var audioData = new byte[sampleNeededBytes];

            var result = await AudioInStream.ReadAsync(audioData, 0, (int)sampleNeededBytes);



            AudioFrame audioFrame = GenerateAudioData(audioData, (uint)result);

            sender.AddFrame(audioFrame);
        }
Exemple #11
0
        private unsafe AudioFrame GenerateAudioData(uint samples)
        {
            var bufferSize = samples * sizeof(float) * 2;
            var frame      = new AudioFrame(bufferSize);

            _buffer = _buffer?.Length != samples * 2 ? new short[samples * 2] : _buffer;
            using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                using (IMemoryBufferReference reference = buffer.CreateReference())
                {
                    float *dataInFloat;
                    byte * dataInBytes;
                    uint   capacityInBytes;
                    ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);
                    dataInFloat = (float *)dataInBytes;
                    _player.GetBuffer(_buffer);

                    for (var i = 0; i < _buffer.Length; i++)
                    {
                        dataInFloat[i] = _buffer[i] * 0.00003f;                  // 乗算のほうが早いらしい
                    }

                    //foreach (float f in _buffer.Select(a => a * 0.00003f))
                    //	*dataInFloat++ = f;
                }

            return(frame);
        }
        unsafe private void ProcessFrameOutput(AudioFrame frame)
        {
            using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Read))
                using (IMemoryBufferReference reference = buffer.CreateReference())
                {
                    // get hold of the buffer pointer
                    byte *dataInBytes;
                    uint  capacityInBytes;
                    ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes,
                                                                   out capacityInBytes);

                    var dataInFloat = (float *)dataInBytes;

                    // examine
                    float max = 0;
                    for (int n = 0; n < graph.SamplesPerQuantum; n++)
                    {
                        max = Math.Max(Math.Abs(dataInFloat[n]), max);
                    }
                    currentPeak = max;

                    float x = currentPeak * 1000;

                    double Bri = Math.Pow(x, 3);                // Sensitivity slider value

                    byte Brightness = (byte)Math.Round(Bri, 0); // Calculating to a 0 - 255 value to control the light brightness

                    Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        OutputText.Text = Brightness.ToString();
                    });
                }
        }
Exemple #13
0
        unsafe public void ProcessFrame(ProcessAudioFrameContext context)
        {
            AudioFrame inputFrame = context.InputFrame;

            using (AudioBuffer inputBuffer = inputFrame.LockBuffer(AudioBufferAccessMode.Read))
                using (IMemoryBufferReference inputReference = inputBuffer.CreateReference())
                {
                    byte *inputDataInBytes;
                    uint  inputCapacity;

                    ((IMemoryBufferByteAccess)inputReference).GetBuffer(out inputDataInBytes, out inputCapacity);

                    float *inputDataInFloat = (float *)inputDataInBytes;

                    float inputDataL;
                    float inputDataR;

                    // Process audio data
                    int dataInFloatLength = (int)inputBuffer.Length / sizeof(float);

                    if (_chart == null)
                    {
                        _chart = new float[dataInFloatLength];
                        propertySet["chart"] = _chart;
                    }
                    for (int i = 0; i < dataInFloatLength; i += 2)
                    {
                        inputDataL    = inputDataInFloat[i];
                        inputDataR    = inputDataInFloat[i + 1];
                        _chart[i]     = inputDataL;
                        _chart[i + 1] = inputDataR;
                    }
                }
        }
        private void Peer_RemoteAudioFrameReady(AudioFrame frame)
        {
            lock (_isRemoteAudioPlayingLock)
            {
                uint channelCount = frame.channelCount;
                uint sampleRate   = frame.sampleRate;

                bool changed = false;
                if (!_isRemoteAudioPlaying)
                {
                    _isRemoteAudioPlaying = true;
                    changed = true;
                }
                else if ((_remoteAudioChannelCount != channelCount) || (_remoteAudioSampleRate != sampleRate))
                {
                    changed = true;
                }

                if (changed)
                {
                    _remoteAudioChannelCount = channelCount;
                    _remoteAudioSampleRate   = sampleRate;
                    RunOnMainThread(() => UpdateRemoteAudioStats(channelCount, sampleRate));
                }
            }
        }
 public SoundEditor(Control parent)
 {
     this.Parent = parent;
     InitializeComponent();
     wavePanel1.Init(parent);
     // pixels per second
     ToolButton100MS.Tag  = 1000;
     ToolButton1S.Tag     = 100;
     ToolButton10S.Tag    = 10;
     ListenToolButton.Tag = WavePanelState.Listen;
     EditToolButton.Tag   = WavePanelState.Edit;
     initPanels();
     _intervalLabels           = new List <Label>();
     ResultPicture.MouseEnter += new EventHandler(ResultPicture_MouseEnter);
     ResultPicture.Location    = new Point(3, 3);
     ResultPicture.BackColor   = Color.White;
     ResultPicture.Image       = null;
     SrcPic.Location           = new Point(3, 3);//Microsoft Sans Serif
     _colors      = new Color[] { Color.Violet, Color.LimeGreen, Color.Blue, Color.Orange, Color.DarkTurquoise, Color.Red };
     _brushChoose = new HatchBrush(HatchStyle.Percent10, Color.Orange, Color.Transparent);
     _storedParts = new List <Label>();
     _audioFrame  = new AudioFrame(false);
     eb1.Parent   = this;
     eb1.BringToFront();
     eb1.SetEditMode();
     // eb1.Dra
 }
 private void Peer_LocalAudioFrameReady(AudioFrame frame)
 {
     // The current underlying WebRTC implementation does not support
     // local audio frame callbacks, so this will never be called until
     // that implementation is changed.
     throw new NotImplementedException();
 }
Exemple #17
0
        /// <summary>
        /// transcode audio
        /// </summary>
        /// <param name="input">input audio file</param>
        /// <param name="output">output audio file</param>
        /// <param name="outChannels">output audio file channels</param>
        /// <param name="outSampleRate">output audio file sample rate</param>
        public AudioTranscode(string input, string output, int outChannels = 2, int outSampleRate = 44100)
        {
            using (MediaWriter writer = new MediaWriter(output))
                using (MediaReader reader = new MediaReader(input))
                {
                    int audioIndex = reader.First(_ => _.Codec.Type == AVMediaType.AVMEDIA_TYPE_AUDIO).Index;

                    writer.AddStream(MediaEncoder.CreateAudioEncode(writer.Format, outChannels, outSampleRate));
                    writer.Initialize();

                    AudioFrame      dst       = AudioFrame.CreateFrameByCodec(writer[0].Codec);
                    SampleConverter converter = new SampleConverter(dst);
                    long            pts       = 0;
                    foreach (var packet in reader.ReadPacket())
                    {
                        foreach (var srcframe in reader[audioIndex].ReadFrame(packet))
                        {
                            foreach (var dstframe in converter.Convert(srcframe))
                            {
                                pts         += dstframe.AVFrame.nb_samples;
                                dstframe.Pts = pts; // audio's pts is total samples, pts can only increase.
                                foreach (var outpacket in writer[0].WriteFrame(dstframe))
                                {
                                    writer.WritePacket(outpacket);
                                }
                            }
                        }
                    }
                    writer.FlushMuxer();
                }
        }
Exemple #18
0
        public void Convert(AudioFrame source, AudioFrame target)
        {
            int targetSamples = GetOutputSampleCount(source.SampleCount);

            target.Resize(targetSamples);

            var sampleCount = ffmpeg.swr_convert(
                s: pointer,
                @out: target.Pointer->extended_data,
                out_count: targetSamples,
                @in: source.Pointer->extended_data,        // pointer to the planes / data
                in_count: source.SampleCount
                );

            if (sampleCount < 0)
            {
                throw new FFmpegException(sampleCount);
            }

            target.SampleCount   = sampleCount;
            target.Dts           = source.Dts;
            target.Pts           = source.Pts;
            target.ChannelCount  = targetFormat.ChannelCount;
            target.SampleRate    = targetFormat.SampleRate;
            target.ChannelLayout = targetFormat.ChannelLayout;
            target.SampleFormat  = targetFormat.SampleFormat;

            // TODO: Set the presentation time & duration

            // Console.WriteLine($"resampled {source.SampleFormat} {source.SampleCount} -> {targetFormat.SampleFormat} {target.SampleCount} | {sampleCount} | {target.ChannelCount} | {target.Memory.Length}");
        }
Exemple #19
0
        unsafe private static void ProcessFrameOutput(AudioFrame frame)
        {
            using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                using (IMemoryBufferReference reference = buffer.CreateReference())
                {
                    byte * dataInBytes;
                    uint   capacityInBytes;
                    float *dataInFloat;

                    // Get the buffer from the AudioFrame
                    ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);

                    dataInFloat = (float *)dataInBytes;
                    float[] dataInFloats = new float[capacityInBytes / sizeof(float)];

                    for (int i = 0; i < capacityInBytes / sizeof(float); i++)
                    {
                        dataInFloats[i] = dataInFloat[i];
                    }



                    InputRecieved?.Invoke(null, dataInFloats);
                }
        }
Exemple #20
0
        public void AudioAnalyzer_Sync_Performance()
        {
            var sut = new AudioAnalyzer(48000, 2, 48000, 800, 400, 2048, false);

            System.Diagnostics.Stopwatch sw = new System.Diagnostics.Stopwatch();

            List <TimeSpan> outTimes = new List <TimeSpan>();

            sut.Output += new Windows.Foundation.TypedEventHandler <AudioAnalyzer, VisualizationDataFrame>(
                (a, data) =>
            {
                outTimes.Add(sw.Elapsed);
            }
                );
            AudioFrame frame = new AudioFrame(24000 * 4 * 2);   // 0.5 sec worth of audio data

            sw.Start();
            sut.ProcessInput(frame);

            List <TimeSpan> durations = new List <TimeSpan>();

            for (int i = 0; i < outTimes.Count(); i++)
            {
                durations.Add(outTimes[i].Subtract(i != 0 ? outTimes[i - 1] : TimeSpan.Zero));
            }

            double avg = durations.Average((time) => { return(time.TotalMilliseconds); });

            Logger.LogMessage($"Analyzer performance {avg}ms per run");
            Assert.IsTrue(avg < 5);
        }
        private unsafe AudioFrame ProcessOutputFrame(int requiredSamples)
        {
            var bufferSize = (uint)requiredSamples * sizeof(float) *
                             _fileOutputNode.EncodingProperties.ChannelCount;

            var frame = new AudioFrame(bufferSize);

            using (var buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                using (var reference = buffer.CreateReference())
                {
                    // Get the buffer from the AudioFrame
                    (reference as IMemoryBufferByteAccess).GetBuffer(
                        out var dataInBytes,
                        out var capacityInBytes);

                    // Cast to float since the data we are generating is float
                    var dataInFloat     = (float *)dataInBytes;
                    var capacityInFloat = capacityInBytes / sizeof(float);

                    // Number of channels defines step between samples in buffer
                    var channelCount = _fileOutputNode.EncodingProperties.ChannelCount;

                    for (uint index = 0; index < capacityInFloat; index += channelCount)
                    {
                        if (_audioDataCurrentPosition < _audioData.LengthSamples())
                        {
                            GetAudioData().SetCurrentChannelType(ChannelType.Left);
                            dataInFloat[index] = _audioData.GetOutputSample(
                                _audioDataCurrentPosition);
                        }

                        // if it's stereo
                        if (channelCount == 2)
                        {
                            // if processed audio is sretero
                            if (_audioData.IsStereo)
                            {
                                GetAudioData().SetCurrentChannelType(ChannelType.Right);
                                dataInFloat[index + 1] = _audioData.GetOutputSample(
                                    _audioDataCurrentPosition);
                            }
                            else
                            {
                                // mute channel
                                dataInFloat[index + 1] = 0;
                            }
                        }

                        _audioDataCurrentPosition++;
                        if (_audioDataCurrentPosition >= _audioData.LengthSamples())
                        {
                            // last frame may be not full
                            _finished = true;
                            return(frame);
                        }
                    }
                }

            return(frame);
        }
Exemple #22
0
 public WavePanel()
 {
     InitializeComponent();
     _timer          = new System.Windows.Forms.Timer();
     _timer.Interval = 10;
     _audioFrame     = new AudioFrame(false);
 }
        public override bool ProcessFrame(AudioFrame frame)
        {
            var dataBuffer = frame.LastBuffer.DataBuffer;

            var samples = new List <int>();

            for (var i = 0; i < dataBuffer.Length; i += sizeof(short))
            {
                samples.Add(dataBuffer.Read16Signed(i));
            }

            var max = samples.Max();
            var min = samples.Min();

            if (min < -1 && max > 1) // not silent
            {
                var soundDetected = SoundDetected;
                if (soundDetected != null && !soundDetected.Task.IsCompleted)
                {
                    soundDetected.TrySetResult(true);
                }
            }

            return(base.ProcessFrame(frame));
        }
Exemple #24
0
        /// <summary>
        /// When audioFrameUpdateMinimum is reached by audioFrameUpdateCount, this method gets the current audio frame, obtains the data from it
        /// and calculates the raw audio level from -100 to 0.
        /// </summary>
        private static unsafe void Graph_QuantumStarted(AudioGraph sender, object args)
        {
            audioFrameUpdateCount++;
            if (audioFrameUpdateCount >= audioFrameUpdateMinimum)
            {
                AudioFrame audioFrame = frameOutputNode.GetFrame();
                float[]    floatData;
                using (AudioBuffer audioBuffer = audioFrame.LockBuffer(AudioBufferAccessMode.Write))
                    using (IMemoryBufferReference reference = audioBuffer.CreateReference())
                    {
                        ((IMemoryBufferByteAccess)reference).GetBuffer(out byte *dataInBytes, out uint capacity);

                        float *unsafeFloatData = (float *)dataInBytes;
                        floatData = new float[capacity / sizeof(float)];

                        for (int i = 0; i < capacity / sizeof(float); i++)
                        {
                            floatData[i] = unsafeFloatData[i];
                        }
                    }

                double soundLevel = 0f;
                foreach (float sample in floatData)
                {
                    soundLevel += Math.Abs(sample);
                }
                soundLevel = Math.Log10(soundLevel / floatData.Length) * 20;

                NewRawSoundLevel(soundLevel);

                audioFrameUpdateCount = 0;
            }
        }
Exemple #25
0
        /// <summary>
        /// Generates empty data for a neccessary quantity of samples
        /// </summary>
        /// <param name="samples">Sampel count</param>
        /// <returns>AudioFrame of sample count</returns>
        public static unsafe AudioFrame GenerateAudioData(uint samples)
        {
            // Buffer size is (number of samples) * (size of each sample) * (number of channels)
            uint       bufferSize = samples * sizeof(float) * 2;
            AudioFrame frame      = new AudioFrame(bufferSize);

            using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                using (IMemoryBufferReference reference = buffer.CreateReference())
                {
                    // Get the buffer from the AudioFrame
                    ((IMemoryBufferByteAccess)reference).GetBuffer(out byte *dataInBytes, out uint _);

                    // Cast to float since the data we are generating is float
                    float *dataInFloat = (float *)dataInBytes;

                    float  freq            = 17000; // choosing to generate frequency of 17kHz
                    float  amplitude       = 0.3f;
                    int    sampleRate      = (int)outgraph.EncodingProperties.SampleRate;
                    double sampleIncrement = (freq * (Math.PI * 2)) / sampleRate;

                    // Generate a 17kHz sine wave and populate the values in the memory buffer
                    for (int i = 0; i < samples; i++)
                    {
                        double sinValue = amplitude * Math.Sin(theta);
                        dataInFloat[i] = (float)sinValue;
                        theta         += sampleIncrement;
                    }
                }

            return(frame);
        }
Exemple #26
0
        public void ProcessFrame(ProcessAudioFrameContext context)
        {
            unsafe
            {
                AudioFrame inputFrame = context.InputFrame;

                using (AudioBuffer inputBuffer = inputFrame.LockBuffer(AudioBufferAccessMode.ReadWrite))
                    using (IMemoryBufferReference inputReference = inputBuffer.CreateReference())
                    {
                        ((IMemoryBufferByteAccess)inputReference).GetBuffer(out byte *inputDataInBytes, out uint inputCapacity);

                        float *inputDataInFloat  = (float *)inputDataInBytes;
                        int    dataInFloatLength = (int)inputBuffer.Length / sizeof(float);

                        // Process audio data
                        for (int n = 0; n < dataInFloatLength; n++)
                        {
                            int ch = n % channels;

                            // cascaded filter to perform eq
                            for (int band = 0; band < bandCount; band++)
                            {
                                inputDataInFloat[n] = filters[ch, band].Transform(inputDataInFloat[n]);
                            }
                        }
                    }
            }
        }
Exemple #27
0
        unsafe internal static AudioFrame GetAudioFrame(DataReader reader)
        {
            var numBytes = reader.UnconsumedBufferLength;

            var headerSize = 44;
            var bytes      = new byte[headerSize];

            reader.ReadBytes(bytes);

            var        numSamples = (uint)(numBytes - headerSize);
            AudioFrame frame      = new AudioFrame(numSamples);

            using (var buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                using (IMemoryBufferReference reference = buffer.CreateReference())
                {
                    byte *dataInBytes;
                    uint  capacityInBytes;

                    ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);

                    Int16 *dataInInt16 = (Int16 *)dataInBytes;

                    for (int i = 0; i < capacityInBytes / sizeof(Int16); i++)
                    {
                        dataInInt16[i] = reader.ReadInt16();
                    }
                }

            return(frame);
        }
Exemple #28
0
        public void WriteS16(byte[] data)
        {
            AudioFrame audioFrame = new AudioFrame(Channels, data.Length / Channels / 2, AVSampleFormat.AV_SAMPLE_FMT_S16, SampleRate);

            Marshal.Copy(data, 0, audioFrame.Data[0], data.Length);
            WriteAudioFrame(audioFrame);
        }
Exemple #29
0
 unsafe private void ProcessInputFrame(AudioFrame frame)
 {
     using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Read))
     using (IMemoryBufferReference reference = buffer.CreateReference())
     {
         // We get data from current buffer
         ((IMemoryBufferByteAccess)reference).GetBuffer(
             out byte* dataInBytes,
             out uint capacityInBytes
             );
         // We discard first frame; it's full of zeros because of latency
         if (audioGraph.CompletedQuantumCount == 1) return;
         float* dataInFloat = (float*)dataInBytes;
         uint capacityInFloat = capacityInBytes / sizeof(float);
         // Number of channels defines step between samples in buffer
         uint step = fileInputNode.EncodingProperties.ChannelCount;
         // We transfer audio samples from buffer into audioData
         for (uint i = 0; i < capacityInFloat; i += step)
         {
             if (audioDataCurrentPosition < audioData.Length)
             {
                 audioData[audioDataCurrentPosition] = dataInFloat[i];
                 audioDataCurrentPosition++;
             }
         }
     }
 }
        /// <summary>
        /// Writes the specified audio data to the stream as the next frame.
        /// </summary>
        /// <param name="data">The audio data to write.</param>
        /// <param name="customPtsValue">(optional) custom PTS value for the frame.</param>
        public void AddFrame(AudioData data, long customPtsValue)
        {
            if (customPtsValue <= lastFramePts)
            {
                throw new Exception("Cannot add a frame that occurs chronologically before the most recently written frame!");
            }

            frame.UpdateFromAudioData(data);

            var converted = AudioFrame.Create(
                frame.SampleRate,
                frame.NumChannels,
                frame.NumSamples,
                frame.ChannelLayout,
                Configuration.SampleFormat);

            converted.PresentationTimestamp = customPtsValue;

            ffmpeg.swr_convert_frame(swrContext, converted.Pointer, frame.Pointer);

            stream.Push(converted);
            converted.Dispose();

            lastFramePts = customPtsValue;
        }
Exemple #31
0
        static void ReadWriteAudio(string input, string output)
        {
            var audio = new AudioReader(input);

            audio.LoadMetadataAsync().Wait();
            audio.Load();

            using (var writer = new AudioWriter(output, audio.Metadata.Channels, audio.Metadata.SampleRate))
            {
                writer.OpenWrite(true);

                var frame = new AudioFrame(1);
                while (true)
                {
                    // read next sample
                    var f = audio.NextFrame(frame);
                    if (f == null)
                    {
                        break;
                    }

                    writer.WriteFrame(frame);
                }
            }
        }
		void adjustAudioLength(AudioFrame frame) {

			videoDebug.AudioFrameLengthAdjust = 0;

			if(syncMode == SyncMode.AUDIO_SYNCS_TO_VIDEO) {

				int n = videoDecoder.NrChannels * videoDecoder.BytesPerSample;

				double diff = audioPlayer.getAudioClock() - getVideoClock();

				if(Math.Abs(diff) < AV_NOSYNC_THRESHOLD) {

					// accumulate the diffs
					audioDiffCum = diff + audioDiffAvgCoef * audioDiffCum;

					if(audioDiffAvgCount < AUDIO_DIFF_AVG_NB) {

						audioDiffAvgCount++;

					} else {

						double avgDiff = audioDiffCum * (1.0 - audioDiffAvgCoef);

						// Shrinking/expanding buffer code....
						if(Math.Abs(avgDiff) >= audioDiffThreshold) {

							int wantedSize = (int)(frame.Length + diff * videoDecoder.SamplesPerSecond * n);
								
							// get a correction percent from 10 to 60 based on the avgDiff
							// in order to converge a little faster
							double correctionPercent = Misc.clamp(10 + (Math.Abs(avgDiff) - audioDiffThreshold) * 15, 10, 60);

							//Util.DebugOut(correctionPercent);

							//AUDIO_SAMPLE_CORRECTION_PERCENT_MAX

							int minSize = (int)(frame.Length * ((100 - correctionPercent)
								/ 100));

							int maxSize = (int)(frame.Length * ((100 + correctionPercent) 
								/ 100));

							if(wantedSize < minSize) {

								wantedSize = minSize;

							} else if(wantedSize > maxSize) {

								wantedSize = maxSize;
							}

							// make sure the samples stay aligned after resizing the buffer
							wantedSize = (wantedSize / n) * n;

							if(wantedSize < frame.Length) {

								// remove samples 
								videoDebug.AudioFrameLengthAdjust = wantedSize - frame.Length;
								frame.Length = wantedSize;

							} else if(wantedSize > frame.Length) {
														
								// add samples by copying final samples
								int nrExtraSamples = wantedSize - frame.Length;
								videoDebug.AudioFrameLengthAdjust = nrExtraSamples;
						
								byte[] lastSample = new byte[n];

								for(int i = 0; i < n; i++) {

									lastSample[i] = frame.Data[frame.Length - n + i];
								}

								frame.Stream.Position = frame.Length;

								while(nrExtraSamples > 0) {
									
									frame.Stream.Write(lastSample, 0, n);
									nrExtraSamples -= n;
								}

								frame.Stream.Position = 0;
								frame.Length = wantedSize;
							}

						}

					}

				} else {

					// difference is TOO big; reset diff stuff 
					audioDiffAvgCount = 0;
					audioDiffCum = 0;
				}
			}
			
		}
		void adjustAudioSamplesPerSecond(AudioFrame frame) {

			videoDebug.AudioFrameLengthAdjust = 0;

			if(syncMode == SyncMode.AUDIO_SYNCS_TO_VIDEO) {

				int n = videoDecoder.NrChannels * videoDecoder.BytesPerSample;

				double diff = audioPlayer.getAudioClock() - getVideoClock();

				if(Math.Abs(diff) < AV_NOSYNC_THRESHOLD) {

					// accumulate the diffs
					audioDiffCum = diff + audioDiffAvgCoef * audioDiffCum;

					if(audioDiffAvgCount < AUDIO_DIFF_AVG_NB) {

						audioDiffAvgCount++;

					} else {

						double avgDiff = audioDiffCum * (1.0 - audioDiffAvgCoef);

						// Shrinking/expanding buffer code....
						if(Math.Abs(avgDiff) >= audioDiffThreshold) {

							int wantedSize = (int)(frame.Length + diff * videoDecoder.SamplesPerSecond * n);
								
							// get a correction percent from 10 to 60 based on the avgDiff
							// in order to converge a little faster
							double correctionPercent = Misc.clamp(10 + (Math.Abs(avgDiff) - audioDiffThreshold) * 15, 10, 60);

							//Util.DebugOut(correctionPercent);

							//AUDIO_SAMPLE_CORRECTION_PERCENT_MAX

							int minSize = (int)(frame.Length * ((100 - correctionPercent)
								/ 100));

							int maxSize = (int)(frame.Length * ((100 + correctionPercent) 
								/ 100));

							if(wantedSize < minSize) {

								wantedSize = minSize;

							} else if(wantedSize > maxSize) {

								wantedSize = maxSize;
							}

							// adjust samples per second to speed up or slow down the audio
							Int64 length = frame.Length;
							Int64 sps = videoDecoder.SamplesPerSecond;
							int samplesPerSecond = (int)((length * sps) / wantedSize);
							videoDebug.AudioFrameLengthAdjust = samplesPerSecond;
							audioPlayer.SamplesPerSecond = samplesPerSecond;
							
						} else {

							audioPlayer.SamplesPerSecond = videoDecoder.SamplesPerSecond;
						}

					}

				} else {

					// difference is TOO big; reset diff stuff 
					audioDiffAvgCount = 0;
					audioDiffCum = 0;
				}
			}
			
		}
        // TODO: Fix frame receiving!
        private unsafe AudioFrame GenerateAudioData(uint samples, short[] shorts)
        {
            // Buffer size is (number of samples) * (size of each sample)
            // We choose to generate single channel (mono) audio. For multi-channel, multiply by number of channels
            var bufferSize = samples*sizeof (float);
            var frame = new AudioFrame(bufferSize);

            using (var buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
            using (var reference = buffer.CreateReference())
            {
                byte* dataInBytes;
                uint capacityInBytes;

                // Get the buffer from the AudioFrame
                ((IMemoryBufferByteAccess) reference).GetBuffer(out dataInBytes, out capacityInBytes);

                // Cast to float since the data we are generating is float
                var dataInFloats = (float*) dataInBytes;

                var floats = ConvertShortsToFloats(shorts);
                var capacityInFloats = capacityInBytes/4;

                Marshal.Copy(floats, 0, (IntPtr) dataInFloats, (int) capacityInFloats);
            }

            return frame;
        }
Exemple #35
0
        private AudioFrame GenerateAudioFrame(int samplesNumber)
        {
            AudioFrame frame = new AudioFrame((uint)samplesNumber * _channelsNumber * sizeof(float));

            using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
            {
                using (IMemoryBufferReference reference = buffer.CreateReference())
                {
                    _waveSource.GenerateWave(reference, samplesNumber);
                }
            }

            return frame;
        }
        private unsafe void ProcessFrameOutput(AudioFrame frame)
        {
            using (var buffer = frame.LockBuffer(AudioBufferAccessMode.Read))
            using (var reference = buffer.CreateReference())
            {
                byte* dataInBytes;
                uint capacityInBytes;

                ((IMemoryBufferByteAccess) reference).GetBuffer(out dataInBytes, out capacityInBytes);

                var capacityInFloats = capacityInBytes/4;
                if (capacityInFloats != _frameSize) // Only send frames with the correct size.
                    return;

                var dataInFloats = (float*) dataInBytes;
                var floats = new float[capacityInFloats];
                Marshal.Copy((IntPtr) dataInFloats, floats, 0, (int) capacityInFloats);

                var shorts = ConvertFloatsToShorts(floats);

                ToxAvModel.Instance.SendAudioFrame(_friendNumber, new ToxAvAudioFrame(shorts, _samplingRate, 1));
            }
        }