/// <summary> /// Convert to audio frame to <paramref name="dstFotmat"/> /// <para><see cref="DepthType"/> to <see cref="AVSampleFormat"/> mapping table. /// if <see cref="Mat.NumberOfChannels"/> > 1, use packet format, otherwise planar</para> /// <list type="table" > /// <item> /// <term><see cref="DepthType.Cv8U"/></term> /// <description1><see cref="AVSampleFormat.AV_SAMPLE_FMT_U8"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_U8P"/></description1> /// </item> /// <item> /// <term><see cref="DepthType.Cv16S"/></term> /// <description1><see cref="AVSampleFormat.AV_SAMPLE_FMT_S16"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_S16P"/></description1> /// </item> /// <item> /// <term><see cref="DepthType.Cv32S"/></term> /// <description1><see cref="AVSampleFormat.AV_SAMPLE_FMT_S32"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_S32P"/></description1> /// </item> /// <item> /// <term><see cref="DepthType.Cv32F"/></term> /// <description1><see cref="AVSampleFormat.AV_SAMPLE_FMT_FLT"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_FLTP"/></description1> /// </item> /// <item> /// <term><see cref="DepthType.Cv64F"/></term> /// <description1><see cref="AVSampleFormat.AV_SAMPLE_FMT_DBL"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_DBLP"/></description1> /// </item> /// <item> /// <term><see cref="DepthType.Cv64F"/></term> /// <description1><see cref="AVSampleFormat.AV_SAMPLE_FMT_S64"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_S64P"/></description1> /// </item> /// <item>NOTE: Emgucv not supported int64, mapping Cv64F to int64, /// so set Mat with int64 if <paramref name="dstFotmat"/> is <see cref="AVSampleFormat.AV_SAMPLE_FMT_S64"/> or <see cref="AVSampleFormat.AV_SAMPLE_FMT_S64P"/> /// </item> /// </list> /// </summary> /// <param name="mat"></param> /// <param name="dstFotmat">Default is auto format by <see cref="Mat.Depth"/> and <see cref="Mat.NumberOfChannels"/> use mapping table</param> /// <param name="dstSampleRate">Mat not have sample rate, set value here or later</param> /// <returns></returns> public static AudioFrame ToAudioFrame(this Mat mat, AVSampleFormat dstFotmat = AVSampleFormat.AV_SAMPLE_FMT_NONE, int dstSampleRate = 0) { AVSampleFormat srcformat; switch (mat.Depth) { case DepthType.Default: case DepthType.Cv8U: case DepthType.Cv8S: srcformat = mat.NumberOfChannels > 1 ? AVSampleFormat.AV_SAMPLE_FMT_U8 : AVSampleFormat.AV_SAMPLE_FMT_U8P; break; case DepthType.Cv16U: case DepthType.Cv16S: srcformat = mat.NumberOfChannels > 1 ? AVSampleFormat.AV_SAMPLE_FMT_S16 : AVSampleFormat.AV_SAMPLE_FMT_S16P; break; case DepthType.Cv32S: srcformat = mat.NumberOfChannels > 1 ? AVSampleFormat.AV_SAMPLE_FMT_S32 : AVSampleFormat.AV_SAMPLE_FMT_S32P; break; case DepthType.Cv32F: srcformat = mat.NumberOfChannels > 1 ? AVSampleFormat.AV_SAMPLE_FMT_FLT : AVSampleFormat.AV_SAMPLE_FMT_FLTP; break; case DepthType.Cv64F: srcformat = mat.NumberOfChannels > 1 ? AVSampleFormat.AV_SAMPLE_FMT_DBL : AVSampleFormat.AV_SAMPLE_FMT_DBLP; break; default: throw new FFmpegException(FFmpegException.NotSupportFormat); } if (dstFotmat != AVSampleFormat.AV_SAMPLE_FMT_NONE && dstFotmat != srcformat) { // converter must need set sample rate using (SampleConverter converter = new SampleConverter(dstFotmat, mat.NumberOfChannels > 1 ? mat.NumberOfChannels : mat.Height, mat.Width, Math.Min(1, dstSampleRate))) { AudioFrame frame = converter.ConvertFrame(MatToAudioFrame(mat, srcformat, Math.Min(1, dstSampleRate)), out int a, out int b); unsafe { // set real sample rate after convert ((AVFrame *)frame)->sample_rate = dstSampleRate; } } } return(MatToAudioFrame(mat, srcformat, dstSampleRate)); }
/// <summary> /// convert current frame to packet frame. /// if current frame is planer return new packet frame /// else return current frame. /// </summary> /// <returns></returns> public AudioFrame ToPacket() { unsafe { if (ffmpeg.av_sample_fmt_is_planar((AVSampleFormat)pFrame->format) <= 0) { return(this); } AVSampleFormat outFormat = (AVSampleFormat)pFrame->format; if (outFormat == AVSampleFormat.AV_SAMPLE_FMT_NB || outFormat == AVSampleFormat.AV_SAMPLE_FMT_NONE) { throw new FFmpegException(FFmpegException.NotSupportFormat); } switch ((AVSampleFormat)pFrame->format) { case AVSampleFormat.AV_SAMPLE_FMT_U8P: outFormat = AVSampleFormat.AV_SAMPLE_FMT_U8; break; case AVSampleFormat.AV_SAMPLE_FMT_S16P: outFormat = AVSampleFormat.AV_SAMPLE_FMT_S16; break; case AVSampleFormat.AV_SAMPLE_FMT_S32P: outFormat = AVSampleFormat.AV_SAMPLE_FMT_S32; break; case AVSampleFormat.AV_SAMPLE_FMT_FLTP: outFormat = AVSampleFormat.AV_SAMPLE_FMT_FLT; break; case AVSampleFormat.AV_SAMPLE_FMT_DBLP: outFormat = AVSampleFormat.AV_SAMPLE_FMT_DBL; break; case AVSampleFormat.AV_SAMPLE_FMT_S64P: outFormat = AVSampleFormat.AV_SAMPLE_FMT_S64; break; } AudioFrame outFrame = new AudioFrame(pFrame->channels, pFrame->nb_samples, outFormat, pFrame->sample_rate); outFrame.pFrame->channel_layout = pFrame->channel_layout; using (SampleConverter converter = new SampleConverter(outFrame)) { return(converter.ConvertFrame(this, out int _, out int __)); } } }