public void BeginCapture(CaptureHeadbox headbox, string capture_dir, int max_frames, CaptureStatus status_interface) { start_time_ = Time.realtimeSinceStartup; headbox_ = headbox; dynamic_range_ = headbox_.dynamic_range_; samples_per_face_ = (int)headbox_.samples_per_face_; capture_dir_ = capture_dir; capture_frame_ = 0; status_interface_ = status_interface; max_frames_ = max_frames; status_interface_.SendProgress("Capturing Images...", 0.0f); List <Vector3> samples = new List <Vector3>(); // Use Hammersly point set to distribute samples. for (int position_sample_index = 0; position_sample_index < samples_per_face_; ++position_sample_index) { Vector3 headbox_position = new Vector3( (float)position_sample_index / (float)(samples_per_face_ - 1), RadicalInverse((ulong)position_sample_index, 2), RadicalInverse((ulong)position_sample_index, 3)); headbox_position.Scale(headbox.size_); headbox_position -= headbox.size_ * 0.5f; // Headbox samples are in camera space; transform to world space. headbox_position = headbox.transform.TransformPoint(headbox_position); samples.Add(headbox_position); } // Sort samples by distance from center of the headbox. samples.Sort(delegate(Vector3 a, Vector3 b) { float length_a = a.sqrMagnitude; float length_b = b.sqrMagnitude; return(length_a.CompareTo(length_b)); }); // Replace the sample closest to the center of the headbox with a sample at // exactly the center. This is important because Seurat requires // sampling information at the center of the headbox. samples[0] = headbox.transform.position; samples_ = samples; // Note this uses a modified version of Unity's standard internal depth // capture shader. See the shader in Assets/builtin_shaders/ // DefaultResourcesExtra/Internal-DepthNormalsTexture.shader. render_depth_shader_ = Shader.Find("GoogleVR/Seurat/CaptureEyeDepth"); capture_manifest_ = new JsonManifest.Capture(); // Setup cameras color_camera_ = headbox_.ColorCamera; depth_camera_object_ = new GameObject("Depth Camera"); depth_camera_ = depth_camera_object_.AddComponent <Camera>(); //Checks if we are using HDRP, if so, we need to add additional components. #if UNITY_RENDER_PIPELINE_HDRP OverrideMaterialRenderer overrideMaterialRenderer = depth_camera_object_.AddComponent <OverrideMaterialRenderer>(); overrideMaterialRenderer.EnableOverride(); #endif }
private void CaptureSample() { // Transforms all cameras from world space to the eye space // of the reference camera. Matrix4x4 reference_from_world = color_camera_.worldToCameraMatrix; const string base_image_name = "Cube"; string[] cube_face_names = { "Front", "Back", "Right", "Left", "Top", "Bottom", }; int num_sides = cube_face_names.Length; if (current_side_ == 0) { StartCaptureSamples(); view_group_ = new JsonManifest.ViewGroup(); view_group_.views = new JsonManifest.View[6]; } int side = current_side_; Quaternion face_rotation; switch (side) { case 0: face_rotation = Quaternion.identity; break; case 1: face_rotation = Quaternion.AngleAxis(180f, Vector3.up); break; case 2: face_rotation = Quaternion.AngleAxis(90f, Vector3.up); break; case 3: face_rotation = Quaternion.AngleAxis(-90f, Vector3.up); break; case 4: face_rotation = Quaternion.AngleAxis(-90f, Vector3.right); break; case 5: default: face_rotation = Quaternion.AngleAxis(90f, Vector3.right); break; } string progress_status = "Baking " + (sample_index_ + 1) + "/ " + samples_per_face_ + " Frame " + (capture_frame_ + 1) + "/" + max_frames_; int capture_task_index = sample_index_ * num_sides + side; int total_capture_tasks = samples_per_face_ * num_sides * max_frames_; status_interface_.SendProgress(progress_status, (float)capture_task_index / total_capture_tasks); if (!status_interface_.TaskContinuing()) { return; } // Use cached samples JsonManifest.View view = Capture( base_image_name + "_" + cube_face_names[side] + "_" + sample_index_, face_rotation, samples_[sample_index_], reference_from_world, export_path_); // Shows the task is complete. status_interface_.SendProgress(progress_status, (float)(capture_task_index + 1) / total_capture_tasks); switch (side) { case 0: view_group_.views[0] = view; break; case 1: view_group_.views[1] = view; break; case 2: view_group_.views[3] = view; break; case 3: view_group_.views[2] = view; break; case 4: view_group_.views[5] = view; break; case 5: default: view_group_.views[4] = view; break; } ++current_side_; if (current_side_ == num_sides) { if (sample_index_ == 0) { // Forces recreation of render targets at the normal resolution after // capturing the center headbox at the typically-higher resolution. DestroyRenderTargets(); } current_side_ = 0; capture_manifest_.view_groups.Add(view_group_); EndCaptureSample(); } }