public AudioFifo(AVSampleFormat format, int channels, int nbSamples = 1) { unsafe { pAudioFifo = ffmpeg.av_audio_fifo_alloc(format, channels, nbSamples <= 0 ? 1 : nbSamples); } }
public AudioFifoBuffer(AVSampleFormat sampleFormat, int numChannels, int initialSize = 1) { if ((audioFifo = ffmpeg.av_audio_fifo_alloc(sampleFormat, numChannels, initialSize)) == null) { Dispose(); throw new FFmpegException(ffmpeg.AVERROR(ffmpeg.ENOMEM), "Failed to allocate fifo buffer."); } }
public unsafe static void readFile(Openfile openFlie, AVAudioFifo *aVAudioFifo1) //解码 { AVFrame *pFrame = ffmpeg.av_frame_alloc(); //接受解码后的数据 AVPacket packet; ffmpeg.av_init_packet(&packet); // while (_state == CaptureState.RUNNING) while (true) { packet.data = null; packet.size = 0; if (ffmpeg.av_read_frame(openFlie._pFormatContext, &packet) < 0) { Console.WriteLine("嘟嘟去读完"); count++; ffmpeg.av_packet_unref(&packet); break; } //if (packet.stream_index == openFlie._streamIndex) //{ //---- Console.WriteLine(packet.size); int ret = ffmpeg.avcodec_send_packet(openFlie._pCodecContext, &packet);//发送解码数据 if (ret < 0) { Console.WriteLine("avcodec_send_packet failed"); break; } ffmpeg.avcodec_receive_frame(openFlie._pCodecContext, pFrame);// 接受解码后的数据 int fifo_mic_space = ffmpeg.av_audio_fifo_space(aVAudioFifo1); while (fifo_mic_space < pFrame->nb_samples && _state == CaptureState.RUNNING)//样本数(nb_samples解码后一帧大小) { Console.WriteLine("_fifo_ full !\n"); fifo_mic_space = ffmpeg.av_audio_fifo_space(aVAudioFifo1); } if (fifo_mic_space >= pFrame->nb_samples) { int temp = ffmpeg.av_audio_fifo_space(aVAudioFifo1); int temp2 = pFrame->nb_samples; int nSizeOfPerson = Marshal.SizeOf(pFrame->data); //定义指针长度 IntPtr personX = Marshal.AllocHGlobal(nSizeOfPerson); //定义指针 Marshal.StructureToPtr(pFrame->data, personX, true); //将结构体person转为personX指针 int nWritten = ffmpeg.av_audio_fifo_write(aVAudioFifo1, (void **)personX, pFrame->nb_samples); } ffmpeg.av_packet_unref(&packet); // } } ffmpeg.av_frame_free(&pFrame); }
/** * Load one audio frame from the FIFO buffer, encode and write it to the * output file. * @param fifo Buffer used for temporary storage * @param output_format_context Format context of the output file * @param output_codec_context Codec context of the output file * @return Error code (0 if successful) */ int load_encode_and_write(AVAudioFifo *fifo, AVFormatContext *output_format_context, AVCodecContext *output_codec_context) { /* Temporary storage of the output samples of the frame written to the file. */ AVFrame *output_frame; /* Use the maximum number of possible samples per frame. * If there is less than the maximum possible frame size in the FIFO * buffer use this number. Otherwise, use the maximum possible frame size. */ int frame_size = Math.Min(av_audio_fifo_size(fifo), output_codec_context->frame_size); int data_written; /* Initialize temporary storage for one output frame. */ if (init_output_frame(&output_frame, output_codec_context, frame_size) < 0) { return(AVERROR_EXIT); } /* Read as many samples from the FIFO buffer as required to fill the frame. * The samples are stored in the frame temporarily. */ byte *[] temp = output_frame->data; fixed(byte **temp2 = temp) { if (av_audio_fifo_read(fifo, (void **)temp2, frame_size) < frame_size) { Console.WriteLine("error: Could not read data from FIFO"); av_frame_free(&output_frame); return(AVERROR_EXIT); } } /* Encode one frame worth of audio samples. */ if (encode_audio_frame(output_frame, output_format_context, output_codec_context, &data_written) < 0) { av_frame_free(&output_frame); return(AVERROR_EXIT); } av_frame_free(&output_frame); return(0); }
protected virtual void Dispose(bool disposing) { if (disposed) { return; } if (disposing) { // Dispose managed resources } // Dispose unmanaged resources if (audioFifo != null) { ffmpeg.av_audio_fifo_free(audioFifo); audioFifo = null; } disposed = true; }
/** * Add converted input audio samples to the FIFO buffer for later processing. * @param fifo Buffer to add the samples to * @param converted_input_samples Samples to be added. The dimensions are channel * (for multi-channel audio), sample. * @param frame_size Number of samples to be converted * @return Error code (0 if successful) */ int add_samples_to_fifo(AVAudioFifo *fifo, byte **converted_input_samples, int frame_size) { int error; /* Make the FIFO as large as it needs to be to hold both, * the old and the new samples. */ if ((error = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + frame_size)) < 0) { Console.WriteLine("error: Could not reallocate FIFO"); return(error); } /* Store the new samples in the FIFO buffer. */ if (av_audio_fifo_write(fifo, (void **)converted_input_samples, frame_size) < frame_size) { Console.WriteLine("error: Could not write data to FIFO"); return(AVERROR_EXIT); } return(0); }
public bool main(string inputFile, string outputFile) { AVFormatContext *input_format_context = null; AVFormatContext *output_format_context = null; AVCodecContext * input_codec_context = null; AVCodecContext * output_codec_context = null; SwrContext *resample_context = null; AVAudioFifo *fifo = null; bool ret = false; /* Open the input file for reading. */ if (open_input_file(inputFile, &input_format_context, &input_codec_context) < 0) { goto cleanup; } /* Open the output file for writing. */ if (open_output_file(outputFile, input_codec_context, &output_format_context, &output_codec_context) < 0) { goto cleanup; } /* Initialize the resampler to be able to convert audio sample formats. */ if (init_resampler(input_codec_context, output_codec_context, &resample_context) < 0) { goto cleanup; } /* Initialize the FIFO buffer to store audio samples to be encoded. */ if (init_fifo(&fifo, output_codec_context) < 0) { goto cleanup; } /* Write the header of the output file container. */ if (write_output_file_header(output_format_context) < 0) { goto cleanup; } /* Loop as long as we have input samples to read or output samples * to write; abort as soon as we have neither. */ while (true) { /* Use the encoder's desired frame size for processing. */ int output_frame_size = output_codec_context->frame_size; int finished = 0; /* Make sure that there is one frame worth of samples in the FIFO * buffer so that the encoder can do its work. * Since the decoder's and the encoder's frame size may differ, we * need to FIFO buffer to store as many frames worth of input samples * that they make up at least one frame worth of output samples. */ while (av_audio_fifo_size(fifo) < output_frame_size) { /* Decode one frame worth of audio samples, convert it to the * output sample format and put it into the FIFO buffer. */ if (read_decode_convert_and_store(fifo, input_format_context, input_codec_context, output_codec_context, resample_context, &finished) < 0) { goto cleanup; } /* If we are at the end of the input file, we continue * encoding the remaining audio samples to the output file. */ if (finished != 0) { break; } } /* If we have enough samples for the encoder, we encode them. * At the end of the file, we pass the remaining samples to * the encoder. */ while (av_audio_fifo_size(fifo) >= output_frame_size || (finished != 0 && av_audio_fifo_size(fifo) > 0)) { /* Take one frame worth of audio samples from the FIFO buffer, * encode it and write it to the output file. */ if (load_encode_and_write(fifo, output_format_context, output_codec_context) < 0) { goto cleanup; } } /* If we are at the end of the input file and have encoded * all remaining samples, we can exit this loop and finish. */ if (finished != 0) { int data_written; /* Flush the encoder as it may have delayed frames. */ do { data_written = 0; if (encode_audio_frame(null, output_format_context, output_codec_context, &data_written) < 0) { goto cleanup; } } while (data_written != 0); break; } } /* Write the trailer of the output file container. */ if (write_output_file_trailer(output_format_context) < 0) { goto cleanup; } ret = true; cleanup: if (fifo != null) { av_audio_fifo_free(fifo); } swr_free(&resample_context); if (output_codec_context != null) { avcodec_free_context(&output_codec_context); } if (output_format_context != null) { avio_closep(&output_format_context->pb); avformat_free_context(output_format_context); } if (input_codec_context != null) { avcodec_free_context(&input_codec_context); } if (input_format_context != null) { avformat_close_input(&input_format_context); } return(ret); }
/** * Read one audio frame from the input file, decode, convert and store * it in the FIFO buffer. * @param fifo Buffer used for temporary storage * @param input_format_context Format context of the input file * @param input_codec_context Codec context of the input file * @param output_codec_context Codec context of the output file * @param resampler_context Resample context for the conversion * @param[out] finished Indicates whether the end of file has * been reached and all data has been * decoded. If this flag is false, * there is more data to be decoded, * i.e., this function has to be called * again. * @return Error code (0 if successful) */ int read_decode_convert_and_store(AVAudioFifo *fifo, AVFormatContext *input_format_context, AVCodecContext *input_codec_context, AVCodecContext *output_codec_context, SwrContext *resampler_context, int *finished) { /* Temporary storage of the input samples of the frame read from the file. */ AVFrame *input_frame = null; /* Temporary storage for the converted input samples. */ byte **converted_input_samples = null; int data_present = 0; int ret = AVERROR_EXIT; /* Initialize temporary storage for one input frame. */ if (init_input_frame(&input_frame) < 0) { goto cleanup; } /* Decode one frame worth of audio samples. */ if (decode_audio_frame(input_frame, input_format_context, input_codec_context, &data_present, finished) < 0) { goto cleanup; } /* If we are at the end of the file and there are no more samples * in the decoder which are delayed, we are actually finished. * This must not be treated as an error. */ if (*finished != 0) { ret = 0; goto cleanup; } /* If there is decoded data, convert and store it. */ if (data_present != 0) { /* Initialize the temporary storage for the converted input samples. */ if (init_converted_samples(&converted_input_samples, output_codec_context, input_frame->nb_samples) < 0) { goto cleanup; } /* Convert the input samples to the desired output sample format. * This requires a temporary storage provided by converted_input_samples. */ if (convert_samples(input_frame->extended_data, converted_input_samples, input_frame->nb_samples, resampler_context) < 0) { goto cleanup; } /* Add the converted input samples to the FIFO buffer for later processing. */ if (add_samples_to_fifo(fifo, converted_input_samples, input_frame->nb_samples) < 0) { goto cleanup; } ret = 0; } ret = 0; cleanup: if (converted_input_samples != null) { av_freep(&converted_input_samples[0]); Marshal.FreeHGlobal((IntPtr)converted_input_samples); } av_frame_free(&input_frame); return(ret); }
public My2(AVFrame *pFrame, AVAudioFifo *aVAudioFifo) { this.pFrame = pFrame; this.aVAudioFifo = aVAudioFifo; }
public My(Openfile openfile, AVAudioFifo *aVAudioFifo) { this.openfile = openfile; this.aVAudioFifo = aVAudioFifo; }
unsafe static void Main(string[] args) { ffmpeg.RootPath = @"D:\cshapdemo\ConsoleApp1\ffmpeg"; var file1 = @"D:\cshapdemo\ConsoleApp1\会不会.mp3"; var file2 = @"D:\cshapdemo\ConsoleApp1\无人之岛-任然.mp3"; Openfile openfile1 = new Openfile(); Openfile openfile2 = new Openfile(); openfile1.open(file1); openfile2.open(file2); Openfile outfile = new Openfile(); outfile.OpenFileOutput(@"D:\cshapdemo\ConsoleApp1\3.mp3"); Console.WriteLine("eee"); AVAudioFifo *aVAudioFifo1 = ffmpeg.av_audio_fifo_alloc( openfile1._pCodecContext->sample_fmt, openfile1._pCodecContext->channels, 30 * openfile1._pCodecContext->frame_size); AVAudioFifo *aVAudioFifo2 = ffmpeg.av_audio_fifo_alloc( openfile2._pCodecContext->sample_fmt, openfile2._pCodecContext->channels, 30 * openfile2._pCodecContext->frame_size); //打开输入输出文件 Console.WriteLine("aVAudioFifo2 "); // readFile(openfile1, aVAudioFifo1); //配置过滤器 AVFilterGraph * _filter_graph = null; AVFilterContext *_filter_ctx_src_spk = null; AVFilterContext *_filter_ctx_src_mic = null; AVFilterContext *_filter_ctx_sink = null; string filter_desc = "[in0][in1]amix=inputs=2[out]"; // InitFilter(_filter_graph, filter_desc, _filter_ctx_src_spk, _filter_ctx_src_mic, _filter_ctx_sink, openfile1, openfile2, outfile); MyFilter myFilter = new MyFilter(); myFilter.InitFilter(_filter_graph, filter_desc, _filter_ctx_src_spk, _filter_ctx_src_mic, _filter_ctx_sink, openfile1, openfile2, outfile); //Thread thread1 = new Thread(start: new ThreadStart(new My(openfile1, aVAudioFifo1).C)); // Thread thread2 = new Thread(start: new ThreadStart(new My(openfile2, aVAudioFifo2).C)); readFile(openfile1, aVAudioFifo1); readFile(openfile2, aVAudioFifo2); if (count == 2) { Console.WriteLine("2222222222222222222"); } int tmpFifoFailed = 0; int frame_count = 0; while (true) { AVFrame *pFrame_spk = ffmpeg.av_frame_alloc(); AVFrame *pFrame_mic = ffmpeg.av_frame_alloc(); AVPacket packet_out; int got_packet_ptr = 0; int fifo_spk_size = ffmpeg.av_audio_fifo_size(aVAudioFifo1); int fifo_mic_size = ffmpeg.av_audio_fifo_size(aVAudioFifo2); int frame_spk_min_size = openfile1._pFormatContext->streams[openfile1._streamIndex]->codecpar->frame_size; int frame_mic_min_size = openfile2._pFormatContext->streams[openfile2._streamIndex]->codecpar->frame_size; Console.WriteLine("fifo_spk_size:==" + fifo_spk_size); if (fifo_spk_size >= frame_spk_min_size && fifo_mic_size >= frame_mic_min_size) { tmpFifoFailed = 0; pFrame_spk->nb_samples = frame_spk_min_size; pFrame_spk->channel_layout = openfile1._pFormatContext->streams[openfile1._streamIndex]->codecpar->channel_layout; pFrame_spk->format = (int)openfile1._pFormatContext->streams[openfile1._streamIndex]->codec->sample_fmt; pFrame_spk->sample_rate = openfile1._pFormatContext->streams[openfile1._streamIndex]->codecpar->sample_rate; // pFrame_spk->format = 8; int ret = ffmpeg.av_frame_get_buffer(pFrame_spk, 0); if (ret < 0) { Console.WriteLine("av_frame_get_buffer pFrame_spk failed"); } pFrame_mic->nb_samples = frame_mic_min_size; pFrame_mic->channel_layout = openfile2._pFormatContext->streams[openfile2._streamIndex]->codecpar->channel_layout; pFrame_mic->format = (int)openfile2._pFormatContext->streams[openfile2._streamIndex]->codec->sample_fmt; //pFrame_mic->format = 4; pFrame_mic->sample_rate = openfile2._pFormatContext->streams[openfile2._streamIndex]->codecpar->sample_rate; ret = ffmpeg.av_frame_get_buffer(pFrame_mic, 0); if (ret < 0) { Console.WriteLine("av_frame_get_buffer pFrame_mic failed"); } int nSizeOfPerson = Marshal.SizeOf(pFrame_spk->data); //定义指针长度 IntPtr spkX = Marshal.AllocHGlobal(nSizeOfPerson); //定义指针 Marshal.StructureToPtr(pFrame_spk->data, spkX, true); //将结构体person转为personX指针 ret = ffmpeg.av_audio_fifo_read(aVAudioFifo1, (void **)spkX, frame_spk_min_size); //读取数据1 Thread.Sleep(1000); int nSizeOfPerson2 = Marshal.SizeOf(pFrame_mic->data); //定义指针长度 IntPtr spkX2 = Marshal.AllocHGlobal(nSizeOfPerson2); //定义指针 Marshal.StructureToPtr(pFrame_mic->data, spkX2, true); //将结构体person转为personX指针 ret = ffmpeg.av_audio_fifo_read(aVAudioFifo2, (void **)spkX2, frame_mic_min_size); //读取数据2 // Thread thread1 = new Thread(start:new ThreadStart(new My2(pFrame_mic, aVAudioFifo2).C)); // pFrame_spk->pts = ffmpeg.av_frame_get_best_effort_timestamp(pFrame_spk); // pFrame_mic->pts = ffmpeg.av_frame_get_best_effort_timestamp(pFrame_mic); _filter_ctx_src_spk = myFilter._filter_ctx_src_spk; _filter_ctx_src_mic = myFilter._filter_ctx_src_mic; _filter_ctx_sink = myFilter._filter_ctx_sink; ret = ffmpeg.av_buffersrc_add_frame(_filter_ctx_src_spk, pFrame_spk);//交给filter if (ret < 0) { Console.WriteLine("Mixer: failed to call av_buffersrc_add_frame (speaker)\n"); break; } ret = ffmpeg.av_buffersrc_add_frame(_filter_ctx_src_mic, pFrame_mic); if (ret < 0) { Console.WriteLine("Mixer: failed to call av_buffersrc_add_frame (mic)\n"); break; } //取出滤镜混合后的样本数据 //对数据进行编码后写入IO //关闭资源关闭io while (true) { AVFrame *pFrame_out = ffmpeg.av_frame_alloc(); ret = ffmpeg.av_buffersink_get_frame_flags(_filter_ctx_sink, pFrame_out, 0); if (ret < 0) { var bufferSize = 1024; var buffer = stackalloc byte[bufferSize]; ffmpeg.av_strerror(ret, buffer, (ulong)bufferSize); var message = Marshal.PtrToStringAnsi((IntPtr)buffer); Console.WriteLine("Mixer: failed to call av_buffersink_get_frame_flags\n" + message); break; } if (pFrame_out->data[0] != null) { ffmpeg.av_init_packet(&packet_out); packet_out.data = null; packet_out.size = 0; //ret=ffmpeg.avcodec_send_frame(outfile._pCodecContext, pFrame_out); //if (ret < 0) //{ // Console.WriteLine("Mixer: failed to call avcodec_send_frame pFrame_out \n"); // break; //} //ret=ffmpeg.avcodec_receive_packet(outfile._pCodecContext, &packet_out); //if (ret < 0) //{ // Console.WriteLine("Mixer: failed to call avcodec_send_frame packet_out \n"); // break; //} ret = ffmpeg.avcodec_encode_audio2(outfile._pCodecContext, &packet_out, pFrame_out, &got_packet_ptr); if (ret < 0) { Console.WriteLine("Mixer: failed to call avcodec_decode_audio4\n"); break; } if (got_packet_ptr > 0) { packet_out.stream_index = outfile._streamIndex; packet_out.pts = frame_count * outfile._pCodecContext->frame_size; packet_out.dts = packet_out.pts; packet_out.duration = outfile._pCodecContext->frame_size; packet_out.pts = ffmpeg.av_rescale_q_rnd(packet_out.pts, outfile._pCodecContext->time_base, outfile._pCodecContext->time_base, (AVRounding)(1 | 8192)); packet_out.dts = packet_out.pts; packet_out.duration = ffmpeg.av_rescale_q_rnd(packet_out.duration, outfile._pCodecContext->time_base, outfile._pCodecContext->time_base, (AVRounding)(1 | 8192)); frame_count++; ret = ffmpeg.av_interleaved_write_frame(outfile._pFormatContext, &packet_out); if (ret < 0) { Console.WriteLine("Mixer: failed to call av_interleaved_write_frame\n"); } Console.WriteLine("Mixer: write frame to file\n"); } ffmpeg.av_packet_unref(&packet_out); } ffmpeg.av_frame_free(&pFrame_out); Thread.Sleep(1000); } Console.WriteLine("tmpFifoFailed:" + tmpFifoFailed); } else { Console.WriteLine("else tmpFifoFailed:" + tmpFifoFailed); //=========================================================================== tmpFifoFailed++; if (tmpFifoFailed > 300) { break; } ffmpeg.av_frame_free(&pFrame_spk); ffmpeg.av_frame_free(&pFrame_mic); } } ffmpeg.av_write_trailer(outfile._pFormatContext); }