public AudioFifo(AVSampleFormat format, int channels, int nbSamples = 1) { unsafe { pAudioFifo = ffmpeg.av_audio_fifo_alloc(format, channels, nbSamples <= 0 ? 1 : nbSamples); } }
public static extern System.Int32 av_opt_set_sample_fmt( IntPtr /* void* */ obj, [MarshalAs(UnmanagedType.LPStr)] string name, AVSampleFormat fmt, [MarshalAs(UnmanagedType.I4)] System.Int32 search_flags);
private AVFrame *AllocAudioFrame(AVSampleFormat sampleFmt, ulong channelLayout, int sampleRate, int nbSamples) { AVFrame *frame = av_frame_alloc(); int ret; if (frame == null) { throw new FFmpegException("Could not allocate audio frame"); } frame->format = (int)sampleFmt; frame->channel_layout = channelLayout; frame->sample_rate = sampleRate; frame->nb_samples = nbSamples; if (nbSamples != 0) { ret = av_frame_get_buffer(frame, 0); if (ret < 0) { throw new FFmpegException("Could not allocate audio buffer", ret); } } return(frame); }
public static SampleFormat ToFormat(this AVSampleFormat format) { switch (format) { case AV_SAMPLE_FMT_NONE: return(SampleFormat.Unknown); case AV_SAMPLE_FMT_U8: return(SampleFormat.U8); case AV_SAMPLE_FMT_S16: return(SampleFormat.I16); case AV_SAMPLE_FMT_S32: return(SampleFormat.I32); case AV_SAMPLE_FMT_FLT: return(SampleFormat.F32); case AV_SAMPLE_FMT_DBL: return(SampleFormat.F64); // Planar Formats case AV_SAMPLE_FMT_U8P: return(SampleFormat.U8p); case AV_SAMPLE_FMT_S16P: return(SampleFormat.I16p); case AV_SAMPLE_FMT_S32P: return(SampleFormat.I32p); case AV_SAMPLE_FMT_FLTP: return(SampleFormat.F32p); case AV_SAMPLE_FMT_DBLP: return(SampleFormat.F64p); case AV_SAMPLE_FMT_S64: return(SampleFormat.I64); case AV_SAMPLE_FMT_S64P: return(SampleFormat.I64p); } throw new Exception("Invalid AVSampleFormat:" + format); }
/// <summary> /// refance <see cref="ffmpeg.av_samples_set_silence(byte**, int, int, int, AVSampleFormat)"/> /// </summary> /// <param name="offset">sample offset</param> /// <param name="fill"> /// default is new byte[] { 0x00 } /// <para> /// if fill is {0x01, 0x02}, loop fill data by {0x01, 0x02, 0x01, 0x02 ...}, all channels are the same. /// </para> /// </param> public void SetSilence(int offset = 0, params byte[] fill) { fill = (fill == null || fill.Length < 1) ? new byte[] { 0x00 } : fill; AVSampleFormat sample_fmt = (AVSampleFormat)pFrame->format; int planar = ffmpeg.av_sample_fmt_is_planar(sample_fmt); int planes = planar != 0 ? pFrame->channels : 1; int block_align = ffmpeg.av_get_bytes_per_sample(sample_fmt) * (planar != 0 ? 1 : pFrame->channels); int data_size = pFrame->nb_samples * block_align; if ((sample_fmt == AVSampleFormat.AV_SAMPLE_FMT_U8 || sample_fmt == AVSampleFormat.AV_SAMPLE_FMT_U8P)) { for (int i = 0; i < fill.Length; i++) { fill[i] &= 0x80; } } offset *= block_align; // convert to byte offset int fill_size = data_size - offset; // number of bytes to fill per plane List <byte> fill_data = new List <byte>(); // data to fill per plane while (fill_data.Count < fill_size) { fill_data.AddRange(fill); } for (int i = 0; i < planes; i++) { Marshal.Copy(fill_data.ToArray(), 0, (IntPtr)pFrame->extended_data[(uint)i] + offset, fill_size); } }
public AudioStreamDecoder(string url, AVHWDeviceType HWDeviceType = AVHWDeviceType.AV_HWDEVICE_TYPE_NONE) { _pFormatContext = ffmpeg.avformat_alloc_context(); _receivedFrame = ffmpeg.av_frame_alloc(); var pFormatContext = _pFormatContext; ffmpeg.avformat_open_input(&pFormatContext, url, null, null).ThrowExceptionIfError(); ffmpeg.avformat_find_stream_info(_pFormatContext, null).ThrowExceptionIfError(); AVCodec *videoCodec = null; _streamVideoIndex = ffmpeg.av_find_best_stream(_pFormatContext, AVMediaType.AVMEDIA_TYPE_VIDEO, -1, -1, &videoCodec, 0).ThrowExceptionIfError(); _pVideoCodecContext = ffmpeg.avcodec_alloc_context3(videoCodec); if (HWDeviceType != AVHWDeviceType.AV_HWDEVICE_TYPE_NONE) { ffmpeg.av_hwdevice_ctx_create(&_pVideoCodecContext->hw_device_ctx, HWDeviceType, null, null, 0).ThrowExceptionIfError(); } ffmpeg.avcodec_parameters_to_context(_pVideoCodecContext, _pFormatContext->streams[_streamVideoIndex]->codecpar).ThrowExceptionIfError(); if (_pFormatContext->streams[_streamVideoIndex]->avg_frame_rate.den != 0) { Fps = _pFormatContext->streams[_streamVideoIndex]->avg_frame_rate.num / _pFormatContext->streams[_streamVideoIndex]->avg_frame_rate.den; Console.WriteLine("计算得到FPS"); } else { Console.WriteLine("默认FPS"); Fps = 25; } ffmpeg.avcodec_open2(_pVideoCodecContext, videoCodec, null).ThrowExceptionIfError(); CodecName = ffmpeg.avcodec_get_name(videoCodec->id); FrameSize = new Size(_pVideoCodecContext->width, _pVideoCodecContext->height); PixelFormat = _pVideoCodecContext->pix_fmt; _pPacket = ffmpeg.av_packet_alloc(); _pFrame = ffmpeg.av_frame_alloc(); AVCodec *audioCodec = null; _streamAudioIndex = ffmpeg.av_find_best_stream(_pFormatContext, AVMediaType.AVMEDIA_TYPE_AUDIO, -1, -1, &audioCodec, 0).ThrowExceptionIfError(); _pAudioCodecContext = ffmpeg.avcodec_alloc_context3(audioCodec); ffmpeg.avcodec_parameters_to_context(_pAudioCodecContext, _pFormatContext->streams[_streamAudioIndex]->codecpar).ThrowExceptionIfError(); ffmpeg.avcodec_open2(_pAudioCodecContext, audioCodec, null).ThrowExceptionIfError(); if (_streamAudioIndex > 0) { AVStream *avs = _pFormatContext->streams[_streamAudioIndex]; Console.WriteLine($"codec_id:{avs->codecpar->codec_id}"); Console.WriteLine($"format:{avs->codecpar->format}"); Console.WriteLine($"sample_rate:{avs->codecpar->sample_rate}"); Console.WriteLine($"channels:{avs->codecpar->channels}"); Console.WriteLine($"frame_size:{avs->codecpar->frame_size}"); in_sample_fmt = _pAudioCodecContext->sample_fmt; in_sample_rate = _pAudioCodecContext->sample_rate; //输入的采样率 in_ch_layout = _pAudioCodecContext->channel_layout; //输入的声道布局 in_channels = _pAudioCodecContext->channels; in_start_time = avs->start_time; } }
public AudioFormat(int sampleRate, AVChannelLayout channelLayout, AVSampleFormat sampleFormat) { SampleRate = sampleRate; ChannelLayout = channelLayout; SampleFormat = sampleFormat; BitsPerSample = GetBytePerSample(sampleFormat) * 8; ValidBitsPerSample = SampleFormat.EqualsType(AVSampleFormat.Int32) ? 24 : BitsPerSample; }
public AudioFifoBuffer(AVSampleFormat sampleFormat, int numChannels, int initialSize = 1) { if ((audioFifo = ffmpeg.av_audio_fifo_alloc(sampleFormat, numChannels, initialSize)) == null) { Dispose(); throw new FFmpegException(ffmpeg.AVERROR(ffmpeg.ENOMEM), "Failed to allocate fifo buffer."); } }
public AudioFrame(int channels, int nbSamples, AVSampleFormat format, int sampleRate = 0, int align = 0) : base() { unsafe { AllocBuffer(channels, nbSamples, format, sampleRate, align); pFrame->channel_layout = (ulong)ffmpeg.av_get_default_channel_layout(channels); } }
/// <summary> /// </summary> /// <param name="channelLayout">see <see cref="AVChannelLayout"/></param> /// <param name="nbSamples">recommended use <see cref="AVCodecContext.frame_size"/></param> /// <param name="format"><see cref="AVCodecContext.sample_fmt"/></param> /// <param name="sampleRate"></param> /// <param name="align"> /// Required buffer size alignment. If equal to 0, alignment will be chosen automatically for /// the current CPU. It is highly recommended to pass 0 here unless you know what you are doing. /// </param> public AudioFrame(AVChannelLayout channelLayout, int nbSamples, AVSampleFormat format, int sampleRate = 0, int align = 0) : this(ffmpeg.av_get_channel_layout_nb_channels((ulong)channelLayout), nbSamples, format, sampleRate, align) { unsafe { pFrame->channel_layout = (ulong)channelLayout; } }
private bool IsPlanar(AVSampleFormat sampleFormat) { return(sampleFormat == AVSampleFormat.AV_SAMPLE_FMT_U8P || sampleFormat == AVSampleFormat.AV_SAMPLE_FMT_S16P || sampleFormat == AVSampleFormat.AV_SAMPLE_FMT_S32P || sampleFormat == AVSampleFormat.AV_SAMPLE_FMT_FLTP || sampleFormat == AVSampleFormat.AV_SAMPLE_FMT_DBLP); }
public static extern System.Int32 av_samples_set_silence( IntPtr /* IntPtr* */ audio_data, [MarshalAs(UnmanagedType.I4)] System.Int32 offset, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_samples, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_channels, AVSampleFormat sample_fmt);
public void Init(AVChannelLayout channelLayout, int nbSamples, AVSampleFormat format, int sampleRate = 0, int align = 0) { Clear(); AllocBuffer(ffmpeg.av_get_channel_layout_nb_channels((ulong)channelLayout), nbSamples, format, sampleRate, align); unsafe { pFrame->channel_layout = (ulong)channelLayout; } }
public static extern System.Int32 av_samples_get_buffer_size( IntPtr /* System.Int32* */ linesize, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_channels, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_samples, AVSampleFormat sample_fmt, [MarshalAs(UnmanagedType.I4)] System.Int32 align);
public static extern System.Int32 av_samples_alloc_array_and_samples( out IntPtr /* IntPtr* */ audio_data, out int /* System.Int32* */ linesize, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_channels, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_samples, AVSampleFormat sample_fmt, [MarshalAs(UnmanagedType.I4)] System.Int32 align);
/// <summary> /// create audio converter by dst output parames /// </summary> /// <param name="dstFormat"></param> /// <param name="dstChannels"></param> /// <param name="dstNbSamples"></param> /// <param name="dstSampleRate"></param> public SampleConverter(AVSampleFormat dstFormat, int dstChannels, int dstNbSamples, int dstSampleRate) { DstFormat = dstFormat; DstChannels = dstChannels; DstChannelLayout = FFmpegHelper.GetChannelLayout(dstChannels); DstNbSamples = dstNbSamples; DstSampleRate = dstSampleRate; dstFrame = new AudioFrame(DstChannels, DstNbSamples, DstFormat, DstSampleRate); AudioFifo = new AudioFifo(DstFormat, DstChannels); }
/// <summary> /// create audio converter by dst output parames /// </summary> /// <param name="dstFormat"></param> /// <param name="dstChannelLayout">see <see cref="AVChannelLayout"/></param> /// <param name="dstNbSamples"></param> /// <param name="dstSampleRate"></param> public SampleConverter(AVSampleFormat dstFormat, ulong dstChannelLayout, int dstNbSamples, int dstSampleRate) { DstFormat = dstFormat; DstChannelLayout = dstChannelLayout; DstChannels = ffmpeg.av_get_channel_layout_nb_channels(dstChannelLayout); DstNbSamples = dstNbSamples; DstSampleRate = dstSampleRate; dstFrame = new AudioFrame(DstChannels, DstNbSamples, DstFormat, DstSampleRate); AudioFifo = new AudioFifo(DstFormat, ffmpeg.av_get_channel_layout_nb_channels(DstChannelLayout), 1); }
internal static int AvGetBytesPerSample(AVSampleFormat sampleFormat) { int dataSize = ffmpeg.av_get_bytes_per_sample(sampleFormat); if (dataSize <= 0) { throw new FfmpegException("Could not calculate data size."); } return(dataSize); }
public AudioSampleBuffer(AVSampleFormat sampleFormat, int numChannels, int numSamples) { try { wasAllocated = true; Util.InitSampleBuffer(ref sampleBuffer, sampleFormat, numChannels, numSamples); } catch (Exception) when(this.DisposeOnException()) { } }
public static extern System.Int32 av_samples_fill_arrays( IntPtr /* IntPtr* */ audio_data, IntPtr /* System.Int32* */ linesize, IntPtr /* System.Byte* */ buf, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_channels, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_samples, AVSampleFormat sample_fmt, [MarshalAs(UnmanagedType.I4)] System.Int32 align);
private void AllocBuffer(int channels, int nbSamples, AVSampleFormat format, int sampleRate = 0, int align = 0) { if (ffmpeg.av_frame_is_writable(pFrame) != 0) { return; } pFrame->format = (int)format; pFrame->channels = channels; pFrame->nb_samples = nbSamples; pFrame->sample_rate = sampleRate; ffmpeg.av_frame_get_buffer(pFrame, align); }
/// <summary> /// Creates an audio frame with given dimensions and allocates a buffer for it. /// </summary> /// <param name="num_samples">The number of samples in the audio frame.</param> /// <param name="num_channels">The number of channels in the audio frame.</param> /// <param name="sampleFormat">The audio sample format.</param> /// <returns>The new audio frame.</returns> public static AudioFrame Create(int num_samples, int num_channels, AVSampleFormat sampleFormat) { var frame = ffmpeg.av_frame_alloc(); frame->nb_samples = num_samples; frame->channels = num_channels; frame->format = (int)sampleFormat; ffmpeg.av_frame_get_buffer(frame, 32); return(new AudioFrame(frame)); }
public static extern System.Int32 av_samples_copy( IntPtr /* IntPtr* */ dst, IntPtr /* IntPtr* */ src, [MarshalAs(UnmanagedType.I4)] System.Int32 dst_offset, [MarshalAs(UnmanagedType.I4)] System.Int32 src_offset, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_samples, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_channels, AVSampleFormat sample_fmt);
public void ConvertToFormat(AVSampleFormat sampleFormat, int sampleRate, int channels, ResampleQuality resampleQuality = ResampleQuality.High) { if (format == (int)sampleFormat && this.sampleRate == sampleRate && this.channels == channels) { return; } format = (int)sampleFormat; this.sampleRate = sampleRate; this.channels = channels; int channelLayout = (int)ffmpeg.av_get_default_channel_layout(channels); swrContext = ffmpeg.swr_alloc(); ffmpeg.av_opt_set_int(swrContext, "in_channel_layout", (int)codecContext->channel_layout, 0); ffmpeg.av_opt_set_int(swrContext, "out_channel_layout", channelLayout, 0); ffmpeg.av_opt_set_int(swrContext, "in_channel_count", codecContext->channels, 0); ffmpeg.av_opt_set_int(swrContext, "out_channel_count", channels, 0); ffmpeg.av_opt_set_int(swrContext, "in_sample_rate", codecContext->sample_rate, 0); ffmpeg.av_opt_set_int(swrContext, "out_sample_rate", sampleRate, 0); ffmpeg.av_opt_set_sample_fmt(swrContext, "in_sample_fmt", codecContext->sample_fmt, 0); ffmpeg.av_opt_set_sample_fmt(swrContext, "out_sample_fmt", sampleFormat, 0); switch (resampleQuality) { case ResampleQuality.Low: ffmpeg.av_opt_set_int(swrContext, "filter_size", 0, 0); ffmpeg.av_opt_set_int(swrContext, "phase_shift", 0, 0); break; case ResampleQuality.Medium: // default ffmpeg settings break; case ResampleQuality.High: ffmpeg.av_opt_set_int(swrContext, "filter_size", 128, 0); ffmpeg.av_opt_set_double(swrContext, "cutoff", 1.0, 0); break; case ResampleQuality.Highest: ffmpeg.av_opt_set_int(swrContext, "filter_size", 256, 0); ffmpeg.av_opt_set_double(swrContext, "cutoff", 1.0, 0); break; } if (ffmpeg.swr_init(swrContext) != 0) { throw new ApplicationException("Failed init SwrContext: " + FFmpegHelper.logLastLine); } }
private static AudioFrame MatToAudioFrame(Mat mat, AVSampleFormat srctFormat, int sampleRate) { int channels = mat.Channels() > 1 ? mat.Channels() : mat.Height; AudioFrame frame = new AudioFrame(srctFormat, channels, mat.Width, sampleRate); bool isPlanar = ffmpeg.av_sample_fmt_is_planar(srctFormat) > 0; int stride = (int)mat.Step(); for (int i = 0; i < (isPlanar ? channels : 1); i++) { FFmpegHelper.CopyMemory(frame.Data[i], mat.Data + i * stride, (uint)stride); } return(frame); }
/// <summary> /// Overwrites the target Audio sampleformat. /// </summary> /// <param name="targetFormat">Target format.</param> void OverwriteTargetSampleformat(AVSampleFormat targetFormat) { if (ContextCreated) { throw new InvalidOperationException("Cannot overwrite target format if context already initialized"); } avTargetSampleFormat = targetFormat; if (avTargetSampleFormat != avSourceSampleFormat) { doResample = true; } }
private static AudioFrame MatToAudioFrame(Mat mat, AVSampleFormat srctFormat, int sampleRate) { int channels = mat.NumberOfChannels > 1 ? mat.NumberOfChannels : mat.Height; AudioFrame frame = new AudioFrame(channels, mat.Width, srctFormat, sampleRate); bool isPlanar = ffmpeg.av_sample_fmt_is_planar(srctFormat) > 0; int stride = mat.Step; for (int i = 0; i < (isPlanar ? channels : 1); i++) { FFmpegHelper.CopyMemory(mat.DataPointer + i * stride, frame.Data[i], stride); } return(frame); }
public AudioFormat(int sampleRate, AVChannelLayout channelLayout, AVSampleFormat sampleFormat) { SampleRate = sampleRate; ChannelLayout = channelLayout; SampleFormat = sampleFormat; Channels = GetChannels(channelLayout); BitsPerSample = GetBytePerSample(sampleFormat) * 8; SampleType = GetSampleType(sampleFormat); IsPlanarFormat = sampleFormat.IsPlanar(); LineCount = IsPlanarFormat ? Channels : 1; LineBlock = IsPlanarFormat ? (BitsPerSample >> 3) : (BitsPerSample >> 3) * Channels; ValidBitsPerSample = SampleFormat.EqualsType(AVSampleFormat.Int32) ? 24 : BitsPerSample; }
/// <summary> /// Convert to audio frame to <paramref name="dstFotmat"/> /// <para><see cref="DepthType"/> to <see cref="AVSampleFormat"/> mapping table. /// if <see cref="Mat.NumberOfChannels"/> > 1, use packet format, otherwise planar</para> /// <list type="table" > /// <item> /// <term><see cref="DepthType.Cv8U"/></term> /// <description1><see cref="AVSampleFormat.AV_SAMPLE_FMT_U8"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_U8P"/></description1> /// </item> /// <item> /// <term><see cref="DepthType.Cv16S"/></term> /// <description1><see cref="AVSampleFormat.AV_SAMPLE_FMT_S16"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_S16P"/></description1> /// </item> /// <item> /// <term><see cref="DepthType.Cv32S"/></term> /// <description1><see cref="AVSampleFormat.AV_SAMPLE_FMT_S32"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_S32P"/></description1> /// </item> /// <item> /// <term><see cref="DepthType.Cv32F"/></term> /// <description1><see cref="AVSampleFormat.AV_SAMPLE_FMT_FLT"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_FLTP"/></description1> /// </item> /// <item> /// <term><see cref="DepthType.Cv64F"/></term> /// <description1><see cref="AVSampleFormat.AV_SAMPLE_FMT_DBL"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_DBLP"/></description1> /// </item> /// <item> /// <term><see cref="DepthType.Cv64F"/></term> /// <description1><see cref="AVSampleFormat.AV_SAMPLE_FMT_S64"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_S64P"/></description1> /// </item> /// <item>NOTE: Emgucv not supported int64, mapping Cv64F to int64, /// so set Mat with int64 if <paramref name="dstFotmat"/> is <see cref="AVSampleFormat.AV_SAMPLE_FMT_S64"/> or <see cref="AVSampleFormat.AV_SAMPLE_FMT_S64P"/> /// </item> /// </list> /// </summary> /// <param name="mat"></param> /// <param name="dstFotmat">Default is auto format by <see cref="Mat.Depth"/> and <see cref="Mat.NumberOfChannels"/> use mapping table</param> /// <param name="dstSampleRate">Mat not have sample rate, set value here or later</param> /// <returns></returns> public static AudioFrame ToAudioFrame(this Mat mat, AVSampleFormat dstFotmat = AVSampleFormat.AV_SAMPLE_FMT_NONE, int dstSampleRate = 0) { AVSampleFormat srcformat; switch (mat.Depth) { case DepthType.Default: case DepthType.Cv8U: case DepthType.Cv8S: srcformat = mat.NumberOfChannels > 1 ? AVSampleFormat.AV_SAMPLE_FMT_U8 : AVSampleFormat.AV_SAMPLE_FMT_U8P; break; case DepthType.Cv16U: case DepthType.Cv16S: srcformat = mat.NumberOfChannels > 1 ? AVSampleFormat.AV_SAMPLE_FMT_S16 : AVSampleFormat.AV_SAMPLE_FMT_S16P; break; case DepthType.Cv32S: srcformat = mat.NumberOfChannels > 1 ? AVSampleFormat.AV_SAMPLE_FMT_S32 : AVSampleFormat.AV_SAMPLE_FMT_S32P; break; case DepthType.Cv32F: srcformat = mat.NumberOfChannels > 1 ? AVSampleFormat.AV_SAMPLE_FMT_FLT : AVSampleFormat.AV_SAMPLE_FMT_FLTP; break; case DepthType.Cv64F: srcformat = mat.NumberOfChannels > 1 ? AVSampleFormat.AV_SAMPLE_FMT_DBL : AVSampleFormat.AV_SAMPLE_FMT_DBLP; break; default: throw new FFmpegException(FFmpegException.NotSupportFormat); } if (dstFotmat != AVSampleFormat.AV_SAMPLE_FMT_NONE && dstFotmat != srcformat) { // converter must need set sample rate using (SampleConverter converter = new SampleConverter(dstFotmat, mat.NumberOfChannels > 1 ? mat.NumberOfChannels : mat.Height, mat.Width, Math.Min(1, dstSampleRate))) { AudioFrame frame = converter.ConvertFrame(MatToAudioFrame(mat, srcformat, Math.Min(1, dstSampleRate)), out int a, out int b); unsafe { // set real sample rate after convert ((AVFrame *)frame)->sample_rate = dstSampleRate; } } } return(MatToAudioFrame(mat, srcformat, dstSampleRate)); }
/// <summary> /// create audio converter by dst frame /// </summary> /// <param name="dstFrame"></param> public SampleConverter(AudioFrame dstFrame) { ffmpeg.av_frame_make_writable(dstFrame).ThrowIfError(); DstFormat = (AVSampleFormat)dstFrame.AVFrame.format; DstChannels = dstFrame.AVFrame.channels; DstChannelLayout = dstFrame.AVFrame.channel_layout; if (DstChannelLayout == 0) { DstChannelLayout = FFmpegHelper.GetChannelLayout(DstChannels); } DstNbSamples = dstFrame.AVFrame.nb_samples; DstSampleRate = dstFrame.AVFrame.sample_rate; base.dstFrame = dstFrame; AudioFifo = new AudioFifo(DstFormat, DstChannels); }
public static extern IntPtr/* SwrContext* */ swr_alloc_set_opts( IntPtr/* SwrContext* */ s, [MarshalAs(UnmanagedType.I8)] System.Int64 out_ch_layout, AVSampleFormat out_sample_fmt, [MarshalAs(UnmanagedType.I4)] System.Int32 out_sample_rate, [MarshalAs(UnmanagedType.I8)] System.Int64 in_ch_layout, AVSampleFormat in_sample_fmt, [MarshalAs(UnmanagedType.I4)] System.Int32 in_sample_rate, [MarshalAs(UnmanagedType.I4)] System.Int32 log_offset, IntPtr/* void* */ log_ctx);
public static extern AVSampleFormat av_get_alt_sample_fmt( AVSampleFormat sample_fmt, [MarshalAs(UnmanagedType.I4)] System.Int32 planar);
public static extern int av_samples_copy(byte** dst, byte** src, int dst_offset, int src_offset, int nb_samples, int nb_channels, AVSampleFormat sample_fmt);
public static extern SwrContext* swr_alloc_set_opts(SwrContext* s, long out_ch_layout, AVSampleFormat out_sample_fmt, int out_sample_rate, long in_ch_layout, AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void* log_ctx);
public static extern System.Int32 av_samples_fill_arrays( IntPtr/* IntPtr* */ audio_data, IntPtr/* System.Int32* */ linesize, IntPtr/* System.Byte* */ buf, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_channels, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_samples, AVSampleFormat sample_fmt, [MarshalAs(UnmanagedType.I4)] System.Int32 align);
public static extern int av_opt_set_sample_fmt(void* obj, String name, AVSampleFormat fmt, int search_flags);
public static extern AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays_channels(byte** data, int linesize, int perms, int nb_samples, AVSampleFormat sample_fmt, int channels, ulong channel_layout);
public static extern AVSampleFormat av_get_planar_sample_fmt(AVSampleFormat sample_fmt);
public static extern int avcodec_fill_audio_frame(AVFrame* frame, int nb_channels, AVSampleFormat sample_fmt, byte* buf, int buf_size, int align);
public static extern AVSampleFormat av_get_alt_sample_fmt(AVSampleFormat sample_fmt, int planar);
public static extern AVSampleFormat av_get_packed_sample_fmt(AVSampleFormat sample_fmt);
public static extern String av_get_sample_fmt_name(AVSampleFormat sample_fmt);
public static extern System.Int32 av_samples_copy( IntPtr/* IntPtr* */ dst, IntPtr/* IntPtr* */ src, [MarshalAs(UnmanagedType.I4)] System.Int32 dst_offset, [MarshalAs(UnmanagedType.I4)] System.Int32 src_offset, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_samples, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_channels, AVSampleFormat sample_fmt);
public static extern System.Int32 av_samples_set_silence( IntPtr/* IntPtr* */ audio_data, [MarshalAs(UnmanagedType.I4)] System.Int32 offset, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_samples, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_channels, AVSampleFormat sample_fmt);
public static extern int av_samples_set_silence(byte** audio_data, int offset, int nb_samples, int nb_channels, AVSampleFormat sample_fmt);
public static extern String av_get_sample_fmt_string(sbyte* /*String*/ buf, int buf_size, AVSampleFormat sample_fmt);
public static extern ReSampleContext* av_audio_resample_init(int output_channels, int input_channels, int output_rate, int input_rate, AVSampleFormat sample_fmt_out, AVSampleFormat sample_fmt_in, int filter_length, int log2_phase_count, int linear, double cutoff);
public static extern int av_get_bytes_per_sample(AVSampleFormat sample_fmt);
public static extern AVCodecID av_get_pcm_codec(AVSampleFormat fmt, int be);
public static extern int av_sample_fmt_is_planar(AVSampleFormat sample_fmt);
public static extern int av_opt_get_sample_fmt(void* obj, String name, int search_flags, AVSampleFormat* out_fmt);
public static extern int av_samples_get_buffer_size(int* linesize, int nb_channels, int nb_samples, AVSampleFormat sample_fmt, int align);
public static extern AVAudioFifo* av_audio_fifo_alloc(AVSampleFormat sample_fmt, int channels, int nb_samples);
public static extern int av_samples_fill_arrays(byte** audio_data, int* linesize, byte* buf, int nb_channels, int nb_samples, AVSampleFormat sample_fmt, int align);
public static extern System.Int32 av_samples_alloc_array_and_samples( out IntPtr/* IntPtr* */ audio_data, out int/* System.Int32* */ linesize, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_channels, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_samples, AVSampleFormat sample_fmt, [MarshalAs(UnmanagedType.I4)] System.Int32 align);
public static extern int av_samples_alloc_array_and_samples(byte*** audio_data, int* linesize, int nb_channels, int nb_samples, AVSampleFormat sample_fmt, int align);
public static extern System.Int32 av_sample_fmt_is_planar( AVSampleFormat sample_fmt);
public static extern System.Int32 av_get_bytes_per_sample( AVSampleFormat sample_fmt);
public static extern string av_get_sample_fmt_string( [MarshalAs(UnmanagedType.LPStr)] string buf, [MarshalAs(UnmanagedType.I4)] System.Int32 buf_size, AVSampleFormat sample_fmt);
public static extern System.Int32 av_samples_get_buffer_size( IntPtr/* System.Int32* */ linesize, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_channels, [MarshalAs(UnmanagedType.I4)] System.Int32 nb_samples, AVSampleFormat sample_fmt, [MarshalAs(UnmanagedType.I4)] System.Int32 align);