public void StartCapture(string path, float length) { Debug.Log("StartCapture!!"); if (length > 0) { length = Mathf.Max(1, length); StartCoroutine(WaitForSeconds(length, () => { StopCapture(); })); } else { //用户手动调用结束接口进行结束 } _session = FFmpegSession.Create( path, _webCamTex.width, _webCamTex.height, _frameRate, preset ); _startTime = Time.time; _frameCount = 0; _frameDropCount = 0; }
void Update() { var camera = GetComponent <Camera>(); // Lazy initialization if (_session == null) { // Give a newly created temporary render texture to the camera // if it's set to render to a screen. Also create a blitter // object to keep frames presented on the screen. if (camera.targetTexture == null) { _tempRT = new RenderTexture(_width, _height, 24, GetTargetFormat(camera)); _tempRT.antiAliasing = GetAntiAliasingLevel(camera); camera.targetTexture = _tempRT; _blitter = Blitter.CreateInstance(camera); } // Start an FFmpeg session. _session = FFmpegSession.Create( gameObject.name, camera.targetTexture.width, camera.targetTexture.height, _frameRate, preset ); _startTime = Time.time; _frameCount = 0; _frameDropCount = 0; } var gap = Time.time - FrameTime; var delta = 1 / _frameRate; if (gap < 0) { // Update without frame data. _session.PushFrame(null); } else if (gap < delta) { // Single-frame behind from the current time: // Push the current frame to FFmpeg. _session.PushFrame(camera.targetTexture); _frameCount++; } else if (gap < delta * 2) { // Two-frame behind from the current time: // Push the current frame twice to FFmpeg. Actually this is not // an efficient way to catch up. We should think about // implementing frame duplication in a more proper way. #fixme _session.PushFrame(camera.targetTexture); _session.PushFrame(camera.targetTexture); _frameCount += 2; } else { // Show a warning message about the situation. WarnFrameDrop(); // Push the current frame to FFmpeg. _session.PushFrame(camera.targetTexture); // Compensate the time delay. _frameCount += Mathf.FloorToInt(gap * _frameRate); } }
void Update() { var camera = GetComponent <Camera>(); // Lazy initialization if (_session == null) { // Give a newly created temporary render texture to the camera // if it's set to render to a screen. Also create a blitter // object to keep frames presented on the screen. if (camera.targetTexture == null) { _tempRT = new RenderTexture(_width, _height, 24, GetTargetFormat(camera)); _tempRT.antiAliasing = GetAntiAliasingLevel(camera); camera.targetTexture = _tempRT; _blitter = Blitter.CreateInstance(camera); } // Start an FFmpeg session. var targetTexture = camera.targetTexture; if (_isStream) { _session = FFmpegSession.CreateWithArguments( "-re" + " -y -f rawvideo -vcodec rawvideo -pixel_format rgba" + " -colorspace bt709" + " -video_size " + targetTexture.width + "x" + targetTexture.height + " -framerate " + _frameRate + " -loglevel warning -i - " + preset.GetOptions() + " -preset " + _compressionSpeed.ToString().ToLower() // compression preset + $" -b:v {_bitrate}k -maxrate {_bitrate}k -bufsize {_bitrate * 2}k" // Video bitrates + $" -f flv \"{_rtmpUrl}\"" ); } else { _session = FFmpegSession.Create( gameObject.name, targetTexture.width, targetTexture.height, _frameRate, preset ); } _startTime = Time.time; _frameCount = 0; _frameDropCount = 0; } var gap = Time.time - FrameTime; var delta = 1 / _frameRate; if (gap < 0) { // Update without frame data. _session.PushFrame(null); } else if (gap < delta) { // Single-frame behind from the current time: // Push the current frame to FFmpeg. _session.PushFrame(camera.targetTexture); _frameCount++; } else if (gap < delta * 2) { // Two-frame behind from the current time: // Push the current frame twice to FFmpeg. Actually this is not // an efficient way to catch up. We should think about // implementing frame duplication in a more proper way. #fixme _session.PushFrame(camera.targetTexture); _session.PushFrame(camera.targetTexture); _frameCount += 2; } else { // Show a warning message about the situation. WarnFrameDrop(); // Push the current frame to FFmpeg. _session.PushFrame(camera.targetTexture); // Compensate the time delay. _frameCount += Mathf.FloorToInt(gap * _frameRate); } }
protected void PushToPipe(Texture texture, string url, int width, int height) { RTSPServerLoader loader = RTSPServerLoader.GetInstance(); if (!loader.CoroutineStarted) { StartCoroutine(loader.WaitForServerToStart()); } // Lazy initialization if (_session == null && loader.RTSPServerloaded) { Debug.Log("Creating Session: " + url); // Give a newly created temporary render texture to the camera // if it's set to render to a screen. Also create a blitter // object to keep frames presented on the screen. if (texture == null) { Debug.LogError("Texture is null"); } // Start an FFmpeg session. _session = FFmpegSession.Create( url, width, height, _frameRate, preset, _crfValue, _maxBitrate ); _startTime = Time.time; _frameCount = 0; _frameDropCount = 0; } if (_session == null) { return; } var gap = Time.time - FrameTime; var delta = 1 / _frameRate; if (gap < 0) { // Update without frame data. _session.PushFrame(null); } else if (gap < delta) { // Single-frame behind from the current time: // Push the current frame to FFmpeg. _session.PushFrame(texture); _frameCount++; } else if (gap < delta * 2) { // Two-frame behind from the current time: // Push the current frame twice to FFmpeg. Actually this is not // an efficient way to catch up. We should think about // implementing frame duplication in a more proper way. #fixme _session.PushFrame(texture); _session.PushFrame(texture); _frameCount += 2; } else { // Show a warning message about the situation. WarnFrameDrop(); // Push the current frame to FFmpeg. _session.PushFrame(texture); // Compensate the time delay. _frameCount += Mathf.FloorToInt(gap * _frameRate); } }