/// <summary>
        /// Writes the specified audio data to the stream as the next frame.
        /// </summary>
        /// <param name="data">The audio data to write.</param>
        /// <param name="customPtsValue">(optional) custom PTS value for the frame.</param>
        public void AddFrame(AudioData data, long customPtsValue)
        {
            if (customPtsValue <= lastFramePts)
            {
                throw new Exception("Cannot add a frame that occurs chronologically before the most recently written frame!");
            }

            frame.UpdateFromAudioData(data);

            var converted = AudioFrame.Create(
                frame.SampleRate,
                frame.NumChannels,
                frame.NumSamples,
                frame.ChannelLayout,
                Configuration.SampleFormat);

            converted.PresentationTimestamp = customPtsValue;

            ffmpeg.swr_convert_frame(swrContext, converted.Pointer, frame.Pointer);

            stream.Push(converted);
            converted.Dispose();

            lastFramePts = customPtsValue;
        }
Example #2
0
        /// <summary>
        /// Reads the next frame from the audio stream.
        /// </summary>
        /// <returns>The decoded audio data.</returns>
        public new AudioData GetNextFrame()
        {
            var frame = base.GetNextFrame() as AudioFrame;

            var converted = AudioFrame.Create(
                frame.SampleRate,
                frame.NumChannels,
                frame.NumSamples,
                frame.ChannelLayout,
                SampleFormat.SingleP);

            ffmpeg.swr_convert_frame(swrContext, converted.Pointer, frame.Pointer);

            return(new AudioData(converted));
        }
Example #3
0
        /// <summary>
        /// Reads the video frame found at the specified timestamp.
        /// </summary>
        /// <param name="time">The frame timestamp.</param>
        /// <returns>The decoded video frame.</returns>
        public new AudioData GetFrame(TimeSpan time)
        {
            var frame = base.GetFrame(time) as AudioFrame;

            var converted = AudioFrame.Create(
                frame.SampleRate,
                frame.NumChannels,
                frame.NumSamples,
                frame.ChannelLayout,
                SampleFormat.SingleP,
                frame.DecodingTimestamp,
                frame.PresentationTimestamp);

            ffmpeg.swr_convert_frame(swrContext, converted.Pointer, frame.Pointer);

            return(new AudioData(converted));
        }
        /// <summary>
        /// Initializes a new instance of the <see cref="AudioOutputStream"/> class.
        /// </summary>
        /// <param name="stream">The audio stream.</param>
        /// <param name="config">The stream setting.</param>
        internal AudioOutputStream(OutputStream <AudioFrame> stream, AudioEncoderSettings config)
        {
            this.stream = stream;

            long channelLayout = ffmpeg.av_get_default_channel_layout(config.Channels);

            swrContext = ffmpeg.swr_alloc_set_opts(
                null,
                channelLayout,
                (AVSampleFormat)config.SampleFormat,
                config.SampleRate,
                channelLayout,
                (AVSampleFormat)SampleFormat.SingleP,
                config.SampleRate,
                0,
                null);

            ffmpeg.swr_init(swrContext);

            Configuration = config;
            frame         = AudioFrame.Create(config.SampleRate, config.Channels, config.SamplesPerFrame, channelLayout, SampleFormat.SingleP);
        }