protected override IEnumerable <(FFmpegCodec codec, AVHWDeviceType hwDeviceType)> GetAvailableDecoders( AVInputFormat *inputFormat, AVCodecID codecId, HardwareVideoDecoder targetHwDecoders ) { if (targetHwDecoders.HasFlagFast(HardwareVideoDecoder.MediaCodec)) { string formatName = Marshal.PtrToStringAnsi((IntPtr)inputFormat->name); switch (formatName) { // MediaCodec doesn't return correct timestamps when playing back AVI files // which results in the video running at ~30% less FPS than it's supposed to. case "avi": { Logger.Log($"Disabling HW decoding for this video because of unsupported input format: ${formatName}"); targetHwDecoders &= ~HardwareVideoDecoder.MediaCodec; break; } } } return(base.GetAvailableDecoders(inputFormat, codecId, targetHwDecoders)); }
/// <summary> /// init format demuxer by name /// </summary> /// <param name="name">e.g. mov,mp4 ...</param> public InFormat(string name) { unsafe { if (!string.IsNullOrEmpty(name)) { void * ifmtOpaque = null; AVInputFormat *iformat; while ((iformat = ffmpeg.av_demuxer_iterate(&ifmtOpaque)) != null) { InFormat format = new InFormat(iformat); // e.g. format.Name == "mov,mp4,m4a,3gp,3g2,mj2" string[] names = format.Name.Split(new char[] { ',' }, StringSplitOptions.RemoveEmptyEntries); foreach (var item in names) { if (item == name.ToLower()) { pInputFormat = iformat; return; } } } } throw new FFmpegException(ffmpeg.AVERROR_DEMUXER_NOT_FOUND); } }
internal InFormat(AVInputFormat *iformat) { if (iformat == null) { throw new FFmpegException(FFmpegException.NullReference); } pInputFormat = iformat; }
public unsafe FFmpegAudioDecoder(string url, AVInputFormat *inputFormat = null, bool repeat = false, bool isMicrophone = false) { _sourceUrl = url; _inputFormat = inputFormat; _repeat = repeat; _isMicrophone = isMicrophone; }
public MediaStream(objectsMicrophone source) : base(null) { _audiosource = source; _inputFormat = null; IsAudio = true; _timeoutMicroSeconds = Math.Max(5000000, source.settings.timeout * 1000); _options = source.settings.ffmpeg; }
private InputFormat(AVInputFormat *ptr) { if (ptr == null) { throw new ArgumentNullException(nameof(ptr)); } _ptr = ptr; }
/// <summary> /// <see cref="av_probe_input_format3(AVProbeData*, int, int*)"/> /// </summary> public static InputFormat?ProbeInputFormat3(AVProbeData *data, bool isOpened, out int score) { int scoreRet; AVInputFormat *ptr = av_probe_input_format3(data, isOpened ? 1 : 0, &scoreRet); score = scoreRet; return(ptr != null ? new InputFormat?(FromNative(ptr)) : null); }
/// <summary> /// <see cref="AVInputFormat"/> adapter. /// </summary> /// <param name="pAVInputFormat"></param> public InFormat(IntPtr pAVInputFormat) { if (pAVInputFormat == IntPtr.Zero) { throw new FFmpegException(FFmpegException.NullReference); } pInputFormat = (AVInputFormat *)pAVInputFormat; }
public VideoStreamDecoder(string url, VideoInputType inputType, AVHWDeviceType HWDeviceType = AVHWDeviceType.AV_HWDEVICE_TYPE_NONE) { try { ffmpeg.avdevice_register_all(); iFormatContext = ffmpeg.avformat_alloc_context(); receivedFrame = ffmpeg.av_frame_alloc(); var _iFormatContext = iFormatContext; AVDictionary *avDict; ffmpeg.av_dict_set(&avDict, "reorder_queue_size", "1", 0); switch (inputType) { case VideoInputType.CAM_DEVICE: AVInputFormat *iformat = ffmpeg.av_find_input_format("dshow"); ffmpeg.avformat_open_input(&_iFormatContext, url, iformat, null).ThrowExceptionIfError(); break; case VideoInputType.RTP_RTSP: ffmpeg.avformat_open_input(&_iFormatContext, url, null, &avDict).ThrowExceptionIfError(); break; default: break; } ffmpeg.avformat_find_stream_info(iFormatContext, null).ThrowExceptionIfError(); AVCodec *codec; dec_stream_index = ffmpeg.av_find_best_stream(iFormatContext, AVMediaType.AVMEDIA_TYPE_VIDEO, -1, -1, &codec, 0).ThrowExceptionIfError(); iCodecContext = ffmpeg.avcodec_alloc_context3(codec); if (HWDeviceType != AVHWDeviceType.AV_HWDEVICE_TYPE_NONE) { ffmpeg.av_hwdevice_ctx_create(&iCodecContext->hw_device_ctx, HWDeviceType, null, null, 0).ThrowExceptionIfError(); } ffmpeg.avcodec_parameters_to_context(iCodecContext, iFormatContext->streams[dec_stream_index]->codecpar).ThrowExceptionIfError(); ffmpeg.avcodec_open2(iCodecContext, codec, null).ThrowExceptionIfError(); CodecName = ffmpeg.avcodec_get_name(codec->id); FrameSize = new Size(iCodecContext->width, iCodecContext->height); PixelFormat = iCodecContext->pix_fmt; rawPacket = ffmpeg.av_packet_alloc(); decodedFrame = ffmpeg.av_frame_alloc(); } catch (AccessViolationException ex) { throw new AccessViolationException("Access Violation Exception", ex); } }
public MediaStream(string format, objectsCamera source) : base(source) { _source = source; _inputFormat = ffmpeg.av_find_input_format(format); if (_inputFormat == null) { throw new Exception("Can not find input format " + format); } }
public MediaStream(string format, string url) { URL = url; _inputFormat = ffmpeg.av_find_input_format(format); if (_inputFormat == null) { throw new Exception("Can not find input format " + format); } }
public MediaStream(objectsMicrophone source) : base(null) { _audiosource = source; _inputFormat = null; _modeAudio = true; _timeout = source.settings.timeout; _analyzeDuration = source.settings.analyzeduration; _options = source.settings.ffmpeg; }
public VideoStreamDecoder(string device) { _pFormatContext = ffmpeg.avformat_alloc_context(); var pFormatContext = _pFormatContext; //ffmpeg.av_register_all(); ffmpeg.avdevice_register_all(); //webcam AVInputFormat *iformat = ffmpeg.av_find_input_format("dshow"); ffmpeg.avformat_open_input(&pFormatContext, device, iformat, null).ThrowExceptionIfError(); //미디어 파일 열기 url주소 또는 파일 이름 필요 //ffmpeg.avformat_open_input(&pFormatContext, url, null, null).ThrowExceptionIfError(); ////미디어 정보 가져옴, blocking 함수라서 network protocol으로 가져올 시, 블락될수도 있슴 ffmpeg.avformat_find_stream_info(_pFormatContext, null).ThrowExceptionIfError(); // find the first video stream AVStream *pStream = null; for (var i = 0; i < _pFormatContext->nb_streams; i++) { if (_pFormatContext->streams[i]->codec->codec_type == AVMediaType.AVMEDIA_TYPE_VIDEO) { pStream = _pFormatContext->streams[i]; break; } } if (pStream == null) { throw new InvalidOperationException("Could not found video stream."); } _streamIndex = pStream->index; _pCodecContext = pStream->codec; var codecId = _pCodecContext->codec_id; var pCodec = ffmpeg.avcodec_find_decoder(codecId); //H264 if (pCodec == null) { throw new InvalidOperationException("Unsupported codec."); } //open codec ffmpeg.avcodec_open2(_pCodecContext, pCodec, null).ThrowExceptionIfError(); CodecName = ffmpeg.avcodec_get_name(codecId); FrameSize = new System.Windows.Size(_pCodecContext->width, _pCodecContext->height); // 640 480 PixelFormat = _pCodecContext->pix_fmt; _pPacket = ffmpeg.av_packet_alloc(); _pFrame = ffmpeg.av_frame_alloc(); }
/// <summary> /// <see cref="av_find_input_format(string)"/> /// </summary> public InputFormat(string shortName) { AVInputFormat *ptr = av_find_input_format(shortName); if (ptr == null) { throw new ArgumentOutOfRangeException(nameof(shortName), $"Cannot find InputFormat: {shortName}"); } _ptr = ptr; }
public unsafe void CreateVideoDecoder(String path, AVInputFormat *avInputFormat, bool repeat = false, bool isCamera = false) { _videoDecoder = new FFmpegVideoDecoder(path, avInputFormat, repeat, isCamera); _videoDecoder.OnVideoFrame += VideoDecoder_OnVideoFrame; _videoDecoder.OnEndOfFile += () => { logger.LogDebug($"File source decode complete for {path}."); OnEndOfFile?.Invoke(); _videoDecoder.Dispose(); }; }
public unsafe void CreateAudioDecoder(String path, AVInputFormat *avInputFormat, bool repeat = false, bool isMicrophone = false) { _audioDecoder = new FFmpegAudioDecoder(path, avInputFormat, repeat, isMicrophone); _audioDecoder.OnAudioFrame += AudioDecoder_OnAudioFrame; _audioDecoder.OnEndOfFile += () => { logger.LogDebug($"File source decode complete for {path}."); OnEndOfFile?.Invoke(); _audioDecoder.Dispose(); }; }
public MediaStream(objectsCamera source) : base(source) { _source = source; _inputFormat = null; _modeAudio = false; _cookies = source.settings.cookies; _analyzeDuration = source.settings.analyseduration; _timeout = source.settings.timeout; _userAgent = source.settings.useragent; _headers = source.settings.headers; RTSPmode = Helper.RTSPMode(source.settings.rtspmode); }
public unsafe FFmpegMicrophoneSource(string path, IAudioEncoder audioEncoder) : base(audioEncoder) { string inputFormat = RuntimeInformation.IsOSPlatform(OSPlatform.Windows) ? "dshow" : RuntimeInformation.IsOSPlatform(OSPlatform.Linux) ? "alsa" : RuntimeInformation.IsOSPlatform(OSPlatform.OSX) ? "avfoundation" : throw new NotSupportedException($"Cannot find adequate input format - OSArchitecture:[{RuntimeInformation.OSArchitecture}] - OSDescription:[{RuntimeInformation.OSDescription}]"); AVInputFormat *aVInputFormat = ffmpeg.av_find_input_format(inputFormat); CreateAudioDecoder(path, aVInputFormat, false, true); InitialiseDecoder(); }
public MediaStream(CameraWindow source) : base(source) { _source = source.Camobject; _inputFormat = null; IsAudio = false; _cookies = _source.settings.cookies; _timeout = Math.Max(3000, _source.settings.timeout); _userAgent = _source.settings.useragent; _headers = _source.settings.headers; _modeRTSP = Helper.RTSPMode(_source.settings.rtspmode); }
public MediaStream(CameraWindow source) : base(source) { _instance = this; _source = source.Camobject; _inputFormat = null; _modeAudio = false; _cookies = _source.settings.cookies; _analyzeDuration = _source.settings.analyseduration; _timeout = _source.settings.timeout; _userAgent = _source.settings.useragent; _headers = _source.settings.headers; _rtsPmode = Helper.RTSPMode(_source.settings.rtspmode); }
public MediaStream(CameraWindow source) : base(source) { _source = source.Camobject; _inputFormat = null; IsAudio = false; _cookies = _source.settings.cookies; _timeoutMicroSeconds = Math.Max(5000000, _source.settings.timeout * 1000); _userAgent = _source.settings.useragent; _headers = _source.settings.headers; _modeRTSP = Helper.RTSPMode(_source.settings.rtspmode); _useGPU = _source.settings.useGPU; _ignoreAudio = _source.settings.ignoreaudio; }
public unsafe FFmpegScreenSource(string path, Rectangle rect, int frameRate = 20) { string inputFormat; Dictionary <String, String>?options = null; if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) { inputFormat = "gdigrab"; options = new Dictionary <string, string>() { ["offset_x"] = rect.X.ToString(), ["offset_y"] = rect.Y.ToString(), ["video_size"] = $"{rect.Width.ToString()}X{rect.Height.ToString()}", ["framerate"] = frameRate.ToString() }; } else if (RuntimeInformation.IsOSPlatform(OSPlatform.OSX)) { inputFormat = "avfoundation"; options = new Dictionary <string, string>() { ["vf"] = $"crop={rect.Width.ToString()}:{rect.Height.ToString()}:{rect.X.ToString()}:{rect.Y.ToString()}", ["framerate"] = frameRate.ToString() }; } else if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux)) { inputFormat = "x11grab"; //https://superuser.com/questions/1562228/how-to-specify-the-size-to-record-the-screen-with-ffmpeg options = new Dictionary <string, string>() { ["video_size"] = $"{rect.Width.ToString()}X{rect.Height.ToString()}", ["grab_x"] = rect.X.ToString(), ["grab_y"] = rect.Y.ToString(), ["framerate"] = frameRate.ToString() }; } else { throw new NotSupportedException($"Cannot find adequate input format - OSArchitecture:[{RuntimeInformation.OSArchitecture}] - OSDescription:[{RuntimeInformation.OSDescription}]"); } AVInputFormat *aVInputFormat = ffmpeg.av_find_input_format(inputFormat); CreateVideoDecoder(path, aVInputFormat, false, true); InitialiseDecoder(options); }
private static unsafe String GetAvFoundationLogsAboutDevicesList() { String inputFormat = "avfoundation"; AVInputFormat * avInputFormat = ffmpeg.av_find_input_format(inputFormat); AVFormatContext *pFormatCtx = ffmpeg.avformat_alloc_context(); AVDictionary * options = null; ffmpeg.av_dict_set(&options, "list_devices", "true", 0); // We use temporarily a specific callback to log FFmpeg entries FFmpegInit.UseSpecificLogCallback(); ffmpeg.avformat_open_input(&pFormatCtx, null, avInputFormat, &options); // Here nb is < 0 ... But we have anyway an output from av_log which can be parsed ... ffmpeg.avformat_close_input(&pFormatCtx); // We no more need to use temporarily a specific callback to log FFmpeg entries FFmpegInit.UseDefaultLogCallback(); // returns logs return(FFmpegInit.GetStoredLogs()); }
public VideoStreamDecoder(string url, VIDEO_INPUT_TYPE inputType = VIDEO_INPUT_TYPE.RTP_RTSP, AVHWDeviceType HWDeviceType = AVHWDeviceType.AV_HWDEVICE_TYPE_NONE) { ffmpeg.avdevice_register_all(); AVFormatContext *pFormatCtx = ffmpeg.avformat_alloc_context(); AVDictionary * options = null; ffmpeg.av_dict_set(&options, "list_devices", "true", 0); AVInputFormat *iformat = ffmpeg.av_find_input_format("dshow"); Console.WriteLine("========Device Info=============\n"); ffmpeg.avformat_open_input(&pFormatCtx, null, iformat, &options); Console.WriteLine("===============================\n"); AVDeviceInfoList *device_list = null; int result = ffmpeg.avdevice_list_input_sources(iformat, null, options, &device_list); Console.WriteLine(result); //iFormatContext = ffmpeg.avform at_alloc_context(); //receivedFrame = ffmpeg.av_frame_alloc(); //var _iFormatContext = iFormatContext; //int i; //AVDictionary* avDict; //ffmpeg.av_dict_set(&avDict, "reorder_queue_size", "1", 0); //switch (inputType) //{ // case VIDEO_INPUT_TYPE.CAM_DEVICE: // AVInputFormat* iformat = ffmpeg.av_find_input_format("dshow"); // AVDeviceInfoList* listdevice = null; // ffmpeg.avdevice_list_devices(_iFormatContext, (AVDeviceInfoList**)listdevice); // Console.WriteLine(listdevice->devices[0]->ToString()); // //ffmpeg.avformat_open_input(&_iFormatContext, url, iformat, null).ThrowExceptionIfError(); // break; // case VIDEO_INPUT_TYPE.RTP_RTSP: // ffmpeg.avformat_open_input(&_iFormatContext, @"C:\Users\admin\Desktop\result1.avi", null, null); // break; // default: // break; //} Console.ReadLine(); //_iFormatContext->streams[0]->time_base = new AVRational { num = 1, den = 30 }; //_iFormatContext->streams[0]->avg_frame_rate = new AVRational { num = 30, den = 1 }; //AVCodec* videoCodec = null; //AVCodec* audioCodec = null; //for (i = 0; i < _iFormatContext->nb_streams; i++) //{ // if (_iFormatContext->streams[i]->codec->codec_type == AVMediaType.AVMEDIA_TYPE_VIDEO) // { // videoIndex = i; // videoCodecContext = _iFormatContext->streams[i]->codec; // videoCodec = ffmpeg.avcodec_find_decoder(videoCodecContext->codec_id); // } // else if (_iFormatContext->streams[i]->codec->codec_type == AVMediaType.AVMEDIA_TYPE_AUDIO) // { // audioCodeContext = _iFormatContext->streams[i]->codec; // audioCodec = ffmpeg.avcodec_find_decoder(audioCodeContext->codec_id); // audioIndex = i; // } //} //ffmpeg.avformat_find_stream_info(_iFormatContext, null).ThrowExceptionIfError(); //Stream에 접근하기 위해서는 미디어로부터 데이터 읽어야함. //videoStreamIndex = ffmpeg.av_find_best_stream(_iFormatContext, AVMediaType.AVMEDIA_TYPE_VIDEO, -1, -1, &videoCodec, 0).ThrowExceptionIfError(); //audioStreamIndex = ffmpeg.av_find_best_stream(_iFormatContext, AVMediaType.AVMEDIA_TYPE_AUDIO, -1, -1, &audioCodec, 0); //Console.WriteLine($"VideoStreamIndex : {videoIndex} AudioStreamIndex : {audioIndex}"); //Console.WriteLine($"VideoCodec : {videoCodec->id} AudioCodec : {audioCodec->id}"); //videoCodecContext = ffmpeg.avcodec_alloc_context3(videoCodec); //audioCodeContext = ffmpeg.avcodec_alloc_context3(audioCodec); //if (HWDeviceType != AVHWDeviceType.AV_HWDEVICE_TYPE_NONE) //{ // ffmpeg.av_hwdevice_ctx_create(&videoCodecContext->hw_device_ctx, HWDeviceType, null, null, 0).ThrowExceptionIfError(); //} //ffmpeg.avcodec_parameters_to_context(videoCodecContext, _iFormatContext->streams[videoStreamIndex]->codecpar).ThrowExceptionIfError(); // 동영상 파일에 있는 정보가 컨텍스트에 복사되고 없는 정보는 코덱의 원래 정보가 유지된다. 간단한 코덱은 별도의 옵션이 없지만 고성능 코덱은 동작에 필요한 필수 옵션이 있다. 이 정보를 복사하지 않으면 코덱이 제대로 동작하지 않아 일부 파일이 열리지 않는다. 다음 함수는 코덱을 열어 사용할 준비를 하고 컨텍스트도 코덱에 맞게 초기화한다. //ffmpeg.avcodec_parameters_to_context(audioCodeContext, _iFormatContext->streams[audioStreamIndex]->codecpar).ThrowExceptionIfError(); //ffmpeg.avcodec_open2(videoCodecContext, videoCodec, null).ThrowExceptionIfError(); // 세번째 인수는 코덱으로 전달할 옵션값이며 필요 없으면 NULL로 지정한다.여기까지 진행하면 코덱과 컨텍스트가 모두 완비되어 패킷의 압축을 풀어 프레임 정보를 만들 준비가 되었다. 코덱을 다 사용한 후 다음 함수로 컨텍스트와 관련 메모리를 모두 해제한다. //ffmpeg.avcodec_open2(audioCodeContext, audioCodec, null).ThrowExceptionIfError(); // 세번째 인수는 코덱으로 전달할 옵션값이며 필요 없으면 NULL로 지정한다.여기까지 진행하면 코덱과 컨텍스트가 모두 완비되어 패킷의 압축을 풀어 프레임 정보를 만들 준비가 되었다. 코덱을 다 사용한 후 다음 함수로 컨텍스트와 관련 메모리를 모두 해제한다. //CodecName = ffmpeg.avcodec_get_name(videoCodec->id); //AudioCodecName = ffmpeg.avcodec_get_name(audioCodec->id); //swrCtx = ffmpeg.swr_alloc(); //FrameSize = new Size(videoCodecContext->width, videoCodecContext->height); //PixelFormat = videoCodecContext->pix_fmt; ////Console.WriteLine(audioCodecName); //swrCtx_Audio = ffmpeg.swr_alloc(); //AVSampleFormat in_sample_fmt = audioCodeContext->sample_fmt; //int in_sample_rate = audioCodeContext->sample_rate; //long in_ch_layout = (long)audioCodeContext->channel_layout; //out_sample_fmt = AVSampleFormat.AV_SAMPLE_FMT_FLTP; //int out_sample_rate = 44100; //int out_ch_layout = ffmpeg.AV_CH_LAYOUT_MONO; //ffmpeg.swr_alloc_set_opts(swrCtx_Audio, out_ch_layout, out_sample_fmt, out_sample_rate, in_ch_layout, in_sample_fmt, in_sample_rate, 0, null); //ffmpeg.swr_init(swrCtx_Audio); ////Resampling setting options-------------------------------------------- ---------------end ////Get the number of output channels //out_channel_nb = ffmpeg.av_get_channel_layout_nb_channels((ulong)out_ch_layout); ////Store pcm data //out_buffer_audio = (byte*)ffmpeg.av_malloc(2 * 8000); //rawPacket = ffmpeg.av_packet_alloc(); //decodedFrame = ffmpeg.av_frame_alloc(); }
public static extern int avdevice_list_input_sources(AVInputFormat * @device, [MarshalAs(UnmanagedType.LPStr)] string @device_name, AVDictionary * @device_options, AVDeviceInfoList ** @device_list);
private static extern int avformat_open_input(AVFormatContext **ps, [MarshalAs(UnmanagedType.LPUTF8Str)] string url, AVInputFormat *fmt, AVDictionary **options);
public unsafe H264Decoder(List <byte[]> nalUnits) { if (!initialized) { ffmpeg.av_register_all(); initialized = true; } int localStreamSuffix = streamSuffix; streamSuffix++; int dataSize = 0; foreach (byte[] nalUnit in nalUnits) { dataSize += nalUnit.Length + startSequence.Length; } byte *dat = (byte *)ffmpeg.av_malloc((ulong)dataSize); fixed(byte *start = startSequence) { foreach (byte[] nalUnit in nalUnits) { fixed(byte *dataPtr = nalUnit) { UnmanagedMemory.CopyMemory(dat, start, (uint)startSequence.Length); dat += startSequence.Length; UnmanagedMemory.CopyMemory(dat, dataPtr, (uint)nalUnit.Length); dat += nalUnit.Length; } } dat -= dataSize; } AVFormatContext *icLocal = ffmpeg.avformat_alloc_context(); ic = icLocal; avio_alloc_context_write_packet_func writeCallback; writeCallback.Pointer = IntPtr.Zero; avio_alloc_context_seek_func seekCallback; seekCallback.Pointer = IntPtr.Zero; avio_alloc_context_read_packet_func readCallback; readCallback.Pointer = IntPtr.Zero; icLocal->pb = ffmpeg.avio_alloc_context(dat, bufferSize, 0, null, readCallback, writeCallback, seekCallback); if (icLocal->pb == null) { throw new Exception("Failed to allocate ffmpeg context."); } // Need to probe buffer for input format unless you already know it AVProbeData probe_data; probe_data.buf_size = dataSize; probe_data.filename = (byte *)Marshal.StringToHGlobalAnsi($"stream_{localStreamSuffix}"); probe_data.buf = (byte *)UnmanagedMemory.Alloc(probe_data.buf_size); UnmanagedMemory.CopyMemory(probe_data.buf, dat, (uint)probe_data.buf_size); AVInputFormat *pAVInputFormat = ffmpeg.av_probe_input_format(&probe_data, 1); if (pAVInputFormat == null) { pAVInputFormat = ffmpeg.av_probe_input_format(&probe_data, 0); } // cleanup UnmanagedMemory.DeAlloc((IntPtr)probe_data.buf, probe_data.buf_size); probe_data.buf = null; pAVInputFormat->flags |= ffmpeg.AVFMT_NOFILE; ffmpeg.avformat_open_input(&icLocal, $"stream_{localStreamSuffix}", pAVInputFormat, null); for (int i = 0; i < icLocal->nb_streams; i++) { AVCodecContext *enc = icLocal->streams[i]->codec; if (AVMediaType.AVMEDIA_TYPE_VIDEO == enc->codec_type) { AVCodec *codec = ffmpeg.avcodec_find_decoder(enc->codec_id); if (codec == null || ffmpeg.avcodec_open2(enc, codec, null) < 0) { //Console.WriteLine("Cannot find codec"); } video_st = icLocal->streams[i]; } } //Init picture yuv_image = ffmpeg.av_frame_alloc(); yuv_image->format = -1; //We do not know the format of the raw decoded image }
public void testSet() { FFmpegBinariesHelper.RegisterFFmpegBinaries(); ffmpeg.avdevice_register_all(); var fmt_ctx = _fmt_ctx; fmt_ctx = ffmpeg.avformat_alloc_context(); AVInputFormat *iformat = ffmpeg.av_find_input_format("dshow"); string device = "video=USB3. 0 capture:audio=디지털 오디오 인터페이스(5- USB3. 0 capture)"; var a = ffmpeg.avformat_open_input(&fmt_ctx, device, iformat, null); //음수이면 파일 안열려..그런 장치 없어!! var b = ffmpeg.avformat_find_stream_info(fmt_ctx, null); //Stream을 찾을수 없어... int videoIndex = -1; int audioIndex = -1; _fmt_ctx = fmt_ctx; AVFormatContext *outputFmtCtx; AudioIndex = -1; VideoIndex = -1; string filename = @"C:\Users\admin\Desktop\output223423423.avi"; AVFormatContext *inputFmtCtx = _fmt_ctx; if (ffmpeg.avformat_alloc_output_context2(&outputFmtCtx, null, null, filename) < 0) //음수가 나오면 에러인거야... { Console.WriteLine("파일 생성 못해!!!"); } var oCodec = ffmpeg.avcodec_find_encoder(AVCodecID.AV_CODEC_ID_MPEG4); for (int index = 0; index < inputFmtCtx->nb_streams; index++) { AVStream * in_stream = inputFmtCtx->streams[index]; AVCodecContext *in_codec_ctx = in_stream->codec; in_codec_ctx = ffmpeg.avcodec_alloc_context3(inputFmtCtx->data_codec); AVStream *out_stream = ffmpeg.avformat_new_stream(outputFmtCtx, null); if (out_stream == null) { Console.WriteLine("OUTPUT 스트림 NULL"); } // AVCodecContext *outCodecContext = out_stream->codec; outCodecContext->codec = oCodec; outCodecContext = ffmpeg.avcodec_alloc_context3(oCodec); outCodecContext->height = 500; outCodecContext->width = 600; // outCodecContext->sample_aspect_ratio = videoInfo.Sample_aspect_ratio; outCodecContext->pix_fmt = AVPixelFormat.AV_PIX_FMT_YUV420P; outCodecContext->time_base = new AVRational { num = 1, den = 15 }; // outCodecContext->framerate = ffmpeg.av_inv_q(videoInfo.Framerate); //context를 설정해야 뭔가 쓸수잇오..... if (ffmpeg.avcodec_parameters_from_context(out_stream->codecpar, outCodecContext) < 0) { Console.WriteLine("copy 못해에!!!"); } out_stream->time_base = in_stream->time_base; outCodecContext->codec_tag = 0; if ((outputFmtCtx->oformat->flags & ffmpeg.AVFMT_GLOBALHEADER) == 0) { outCodecContext->flags |= ffmpeg.AV_CODEC_FLAG_GLOBAL_HEADER; } // ffmpeg.avcodec_open2(outCodecContext, oCodec, null).ThrowExceptionIfError(); VideoIndex = 0; AudioIndex = 1; ffmpeg.av_dump_format(outputFmtCtx, 0, filename, 1); if ((outputFmtCtx->oformat->flags & ffmpeg.AVFMT_NOFILE) == 0) { // This actually open the file if (ffmpeg.avio_open(&outputFmtCtx->pb, filename, ffmpeg.AVIO_FLAG_WRITE) < 0) { Console.WriteLine("못만들오..."); } } if (ffmpeg.avformat_write_header(outputFmtCtx, null) < 0) { Console.WriteLine("헤더를 못써...\n"); } } //ffmpeg.av_write_trailer(outputFmtCtx); //ffmpeg.avio_closep(&outputFmtCtx->pb); //ffmpeg.avformat_free_context(outputFmtCtx); //nb_streams : 요소 몇갠지..!!! 내가 찾은거에서 뭐있는지 for (int index = 0; index < fmt_ctx->nb_streams; index++) { var avCodecContext = fmt_ctx->streams[index]->codec; if (avCodecContext->codec_type == AVMediaType.AVMEDIA_TYPE_VIDEO) { videoIndex = index; } else if (avCodecContext->codec_type == AVMediaType.AVMEDIA_TYPE_AUDIO) { audioIndex = index; Console.WriteLine(audioIndex + "***"); } if (avCodecContext->codec_type == AVMediaType.AVMEDIA_TYPE_VIDEO) { videoIndex = index; Console.WriteLine($"====================={avCodecContext->codec_type}======================"); //Console.WriteLine(avCodecContext->bit_rate); //W * H *FPS //Console.WriteLine(avCodecContext->codec_id); //Console.WriteLine(avCodecContext->width); //Console.WriteLine(avCodecContext->coded_width); //Console.WriteLine(avCodecContext->height); //Console.WriteLine(avCodecContext->coded_height); //Console.WriteLine(avCodecContext->pts_correction_num_faulty_pts); //Console.WriteLine(avCodecContext->pts_correction_last_dts); //Console.WriteLine(avCodecContext->pts_correction_last_pts); Console.WriteLine(); } else if (avCodecContext->codec_type == AVMediaType.AVMEDIA_TYPE_AUDIO) { audioIndex = index; Console.WriteLine($"====================={avCodecContext->codec_type}======================"); //Console.WriteLine(avCodecContext->bit_rate); //W * H *FPS //Console.WriteLine(avCodecContext->codec_id); //Console.WriteLine($"Channels : {avCodecContext->channels}"); //Console.WriteLine(avCodecContext->width); //Console.WriteLine(avCodecContext->coded_width); //Console.WriteLine(avCodecContext->height); //Console.WriteLine(avCodecContext->coded_height); //Console.WriteLine(avCodecContext->pts_correction_num_faulty_pts); //Console.WriteLine(avCodecContext->pts_correction_last_dts); //Console.WriteLine(avCodecContext->pts_correction_last_pts); } } int ret; AVPacket pkt; int out_stream_index; while (true) { ret = ffmpeg.av_read_frame(fmt_ctx, &pkt); //ret == 0 이면 if (ret == ffmpeg.AVERROR_EOF) { Console.WriteLine("frame end"); break; } if (pkt.stream_index == videoIndex) { Console.WriteLine("Video Packet"); } else if (pkt.stream_index == audioIndex) { Console.WriteLine("Audio Packet"); } AVStream *in_stream = fmt_ctx->streams[pkt.stream_index]; out_stream_index = (pkt.stream_index == videoIndex) ? videoIndex : audioIndex; AVStream *out_stream = outputFmtCtx->streams[out_stream_index]; ffmpeg.av_packet_rescale_ts(&pkt, in_stream->time_base, out_stream->time_base); pkt.stream_index = out_stream_index; if (ffmpeg.av_interleaved_write_frame(outputFmtCtx, &pkt) < 0) { Console.WriteLine("!!!!!!!!@#####!@#!@#!"); break; } ffmpeg.av_packet_unref(&pkt); //옛날엔 av_free_packet() } ffmpeg.av_write_trailer(outputFmtCtx); }
public static extern AVError avformat_open_input(out IntPtr ps, string filename, AVInputFormat *fmt, AVDictionary *options);
public static extern AVInputFormat *av_input_video_device_next(AVInputFormat * @d);