Exemple #1
0
 void EnsureEgoInitialized()
 {
     if (m_EgoHandle == default)
     {
         m_EgoHandle = DatasetCapture.RegisterEgo(Description);
     }
 }
Exemple #2
0
        /// <inheritdoc/>
        protected override void Setup()
        {
            if (idLabelConfig == null)
            {
                throw new InvalidOperationException("BoundingBox2DLabeler's idLabelConfig field must be assigned");
            }

            m_AsyncAnnotations = new Dictionary <int, AsyncAnnotation>();

            m_BoundingBoxAnnotationDefinition = DatasetCapture.RegisterAnnotationDefinition("bounding box", idLabelConfig.GetAnnotationSpecification(),
                                                                                            "Bounding box for each labeled object visible to the sensor", id: new Guid(annotationId));

            perceptionCamera.RenderedObjectInfosCalculated += OnRenderedObjectInfosCalculated;

            visualizationEnabled = supportsVisualization;

            // Record the original screen size. The screen size can change during play, and the visual bounding
            // boxes need to be scaled appropriately
            m_OriginalScreenSize = new Vector2(Screen.width, Screen.height);

            m_BoundingBoxTexture = Resources.Load <Texture>("outline_box");
            m_LabelTexture       = Resources.Load <Texture>("solid_white");

            m_Style = new GUIStyle();
            m_Style.normal.textColor = Color.black;
            m_Style.fontSize         = 16;
            m_Style.padding          = new RectOffset(4, 4, 4, 4);
            m_Style.contentOffset    = new Vector2(4, 0);
            m_Style.alignment        = TextAnchor.MiddleLeft;
        }
        /// <inheritdoc/>
        protected override void Setup()
        {
            var myCamera = perceptionCamera.GetComponent <Camera>();
            var width    = myCamera.pixelWidth;
            var height   = myCamera.pixelHeight;

            if (labelConfig == null)
            {
                throw new InvalidOperationException(
                          "SemanticSegmentationLabeler's LabelConfig must be assigned");
            }

            m_AsyncAnnotations = new Dictionary <int, AsyncAnnotation>();

            var renderTextureDescriptor = new RenderTextureDescriptor(width, height, GraphicsFormat.R8G8B8A8_UNorm, 8);

            if (targetTexture != null)
            {
                targetTexture.descriptor = renderTextureDescriptor;
            }
            else
            {
                m_TargetTextureOverride = new RenderTexture(renderTextureDescriptor);
            }

            targetTexture.Create();
            targetTexture.name = "Labeling";

#if HDRP_PRESENT
            var gameObject       = perceptionCamera.gameObject;
            var customPassVolume = gameObject.GetComponent <CustomPassVolume>() ?? gameObject.AddComponent <CustomPassVolume>();
            customPassVolume.injectionPoint = CustomPassInjectionPoint.BeforeRendering;
            customPassVolume.isGlobal       = true;
            m_SemanticSegmentationPass      = new SemanticSegmentationPass(myCamera, targetTexture, labelConfig)
            {
                name = "Labeling Pass"
            };
            customPassVolume.customPasses.Add(m_SemanticSegmentationPass);
#endif
#if URP_PRESENT
            perceptionCamera.AddScriptableRenderPass(new SemanticSegmentationUrpPass(myCamera, targetTexture, labelConfig));
#endif

            var specs = labelConfig.labelEntries.Select((l) => new SemanticSegmentationSpec()
            {
                label_name  = l.label,
                pixel_value = l.color
            }).ToArray();

            m_SemanticSegmentationAnnotationDefinition = DatasetCapture.RegisterAnnotationDefinition(
                "semantic segmentation",
                specs,
                "pixel-wise semantic segmentation label",
                "PNG",
                id: Guid.Parse(annotationId));

            m_SemanticSegmentationTextureReader = new RenderTextureReader <Color32>(targetTexture, myCamera,
                                                                                    (frameCount, data, tex) => OnSemanticSegmentationImageRead(frameCount, data));
        }
Exemple #4
0
        // Start is called before the first frame update
        void Awake()
        {
            m_EgoMarker = this.GetComponentInParent <Ego>();
            var ego = m_EgoMarker == null?DatasetCapture.RegisterEgo("") : m_EgoMarker.EgoHandle;

            SensorHandle = DatasetCapture.RegisterSensor(ego, "camera", description, period, startTime);

            SetupInstanceSegmentation();

            RenderPipelineManager.beginCameraRendering += OnBeginCameraRendering;
            RenderPipelineManager.endCameraRendering   += CheckForRendererFeature;
            DatasetCapture.SimulationEnding            += OnSimulationEnding;
        }
        /// <inheritdoc/>
        protected override void Setup()
        {
            if (idLabelConfig == null)
            {
                throw new InvalidOperationException("BoundingBox2DLabeler's idLabelConfig field must be assigned");
            }

            m_AsyncAnnotations = new Dictionary <int, AsyncAnnotation>();

            m_BoundingBoxAnnotationDefinition = DatasetCapture.RegisterAnnotationDefinition("bounding box", idLabelConfig.GetAnnotationSpecification(),
                                                                                            "Bounding box for each labeled object visible to the sensor", id: new Guid(annotationId));

            perceptionCamera.RenderedObjectInfosCalculated += OnRenderedObjectInfosCalculated;
        }
Exemple #6
0
        // Start is called before the first frame update
        void Awake()
        {
            m_EgoMarker = this.GetComponentInParent <Ego>();
            var ego = m_EgoMarker == null?DatasetCapture.RegisterEgo("") : m_EgoMarker.EgoHandle;

            SensorHandle = DatasetCapture.RegisterSensor(ego, "camera", description, period, startTime);

            AsyncRequest.maxJobSystemParallelism = 0; // Jobs are not chained to one another in any way, maximizing parallelism
            AsyncRequest.maxAsyncRequestFrameAge = 4; // Ensure that readbacks happen before Allocator.TempJob allocations get stale

            SetupInstanceSegmentation();
            var cam = GetComponent <Camera>();

#if UNITY_EDITOR || DEVELOPMENT_BUILD
            SetupVisualizationCamera(cam);
#endif

            DatasetCapture.SimulationEnding += OnSimulationEnding;
        }
        /// <inheritdoc/>
        protected override void Setup()
        {
            if (idLabelConfig == null)
            {
                throw new InvalidOperationException("BoundingBox2DLabeler's idLabelConfig field must be assigned");
            }

            m_AsyncAnnotations = new Dictionary <int, AsyncAnnotation>();

            m_BoundingBoxAnnotationDefinition = DatasetCapture.RegisterAnnotationDefinition("bounding box", idLabelConfig.GetAnnotationSpecification(),
                                                                                            "Bounding box for each labeled object visible to the sensor", id: new Guid(annotationId));

            perceptionCamera.RenderedObjectInfosCalculated += OnRenderedObjectInfosCalculated;

            visualizationEnabled = supportsVisualization;

            // Record the original screen size. The screen size can change during play, and the visual bounding
            // boxes need to be scaled appropriately
            originalScreenSize = new Vector2(Screen.width, Screen.height);
        }