コード例 #1
0
        public static void ToPlanar(AudioFrame inOutFrame)
        {
            AudioResampler resampler = GetPlanarResampler(inOutFrame.format);

            if (resampler == null)
            {
                return;
            }

            var samples = inOutFrame.SampleCount;
            int bytes   = resampler.GetOutBytes(samples);

            packedCache.Resize(bytes);
            var data = packedCache.data;

            Buffer.MemoryCopy((void *)inOutFrame.datas[0], (void *)data, bytes, bytes);
            int lineBytes = resampler.Destination.GetLineBytes(samples);

            inOutFrame.format = resampler.Destination;
            inOutFrame.Resize(samples);
            fixed(IntPtr *input = inOutFrame.datas)
            {
                resampler.Resample((IntPtr)(&data), samples, (IntPtr)input, samples);
            }

            inOutFrame.sampleCount = samples;
        }
コード例 #2
0
        public override void Convert(AudioFrame inFrame, AudioFrame outFrame)
        {
            if (kernelCount > 1 && kernelCount != inFrame.format.Channels)
            {
                throw new ArgumentException($"{nameof(inFrame)}的数据行数和卷积核个数不一致", nameof(inFrame));
            }

            var inFormat   = inFrame.format;
            var inSamples  = inFrame.sampleCount;
            var outSamples = convs[0].GetOutLength(inSamples);
            var outFormat  = inFormat;

            if (resampler == null && inFormat.SampleFormat != AVSampleFormat.DoublePlanar)
            {
                outFormat = new AudioFormat(inFormat.SampleRate, inFormat.ChannelLayout, inFormat.SampleFormat.ToPlanar());
                resampler = new AudioResampler(inFormat, outFormat);
            }
            if (resampler != null)
            {
                resampler.Resample(inFrame, tempInput);
                inFrame = tempInput;
            }

            outFrame.Resize(outFormat, outSamples);

            for (int i = 0; i < kernelCount; i++)
            {
                convs[kernelCount == 1 ? 0 : i].Convolve((double *)inFrame.datas[i], inSamples, (double *)outFrame.datas[i], outSamples);
            }
        }
コード例 #3
0
        public static void ToPacked(AudioFrame inOutFrame)
        {
            AudioResampler resampler = GetPackedResampler(inOutFrame.format);

            if (resampler == null)
            {
                return;
            }

            var samples = inOutFrame.SampleCount;
            int bytes   = resampler.GetOutBytes(samples);

            packedCache.Resize(bytes);
            var data = packedCache.data;

            fixed(IntPtr *input = inOutFrame.datas)
            {
                resampler.Resample((IntPtr)input, samples, (IntPtr)(&data), samples);
            }

            inOutFrame.format = resampler.Destination;
            inOutFrame.Update(samples, packedCache.data);
        }
コード例 #4
0
        public override bool Encode(Frame frame, Packet outPacket)
        {
            if (frame != null)
            {
                if (!(frame is AudioFrame))
                {
                    throw new ArgumentException($"{nameof(frame)}必须是{nameof(AudioFrame)}类型", nameof(frame));
                }
                if (!(frame as AudioFrame).format.Equals(InFormat))
                {
                    throw new ArgumentException("输入帧的格式和编码器输入格式不同");
                }
            }

            if (resampler != null)
            {
                if (frame != null)
                {
                    resampler.Resample(frame as AudioFrame, tempFrame);
                    frame = tempFrame;
                }
                else
                {
                    resampler.ResampleFinal(tempFrame);
                    if (tempFrame.SampleCount > 0)
                    {
                        frame = tempFrame;
                    }
                }
            }

            encodeFrames = 0;
            outPacket.ReleaseNativeBuffer();
            int gotPicture = 0;

            if (frame != null)
            {
                try {
                    frame.SetupToNative();
                    frame.presentTimestamp = new Timestamp(inputFrames, new AVRational(1, OutFormat.SampleRate));
                    frame.presentTimestamp.Transform(codecContext->TimeBase);
                    frame.frame->Pts = frame.presentTimestamp.Value;
                    FF.avcodec_encode_audio2(codecContext, outPacket.packet, frame.frame, &gotPicture).CheckFFmpegCode("音频编码发生错误");
                } finally {
                    frame.ReleaseSetup();
                }
                encodeFrames = frame.frame->NbSamples;
                inputFrames += encodeFrames;
            }
            else
            {
                FF.avcodec_encode_audio2(codecContext, outPacket.packet, null, &gotPicture).CheckFFmpegCode("音频编码发生错误");
            }

            if (gotPicture != 0)
            {
                ConfigPakcet(outPacket);
                return(true);
            }
            return(false);
        }