示例#1
0
        // Start is called before the first frame update
        void Awake()
        {
            m_EgoMarker = this.GetComponentInParent <Ego>();
            var ego = m_EgoMarker == null?DatasetCapture.RegisterEgo("") : m_EgoMarker.EgoHandle;

            SensorHandle = DatasetCapture.RegisterSensor(ego, "camera", description, period, startTime);

            SetupInstanceSegmentation();

            RenderPipelineManager.beginCameraRendering += OnBeginCameraRendering;
            RenderPipelineManager.endCameraRendering   += CheckForRendererFeature;
            DatasetCapture.SimulationEnding            += OnSimulationEnding;
        }
示例#2
0
        // Start is called before the first frame update
        void Awake()
        {
            m_EgoMarker = this.GetComponentInParent <Ego>();
            var ego = m_EgoMarker == null?DatasetCapture.RegisterEgo("") : m_EgoMarker.EgoHandle;

            SensorHandle = DatasetCapture.RegisterSensor(ego, "camera", description, period, startTime);

            AsyncRequest.maxJobSystemParallelism = 0; // Jobs are not chained to one another in any way, maximizing parallelism
            AsyncRequest.maxAsyncRequestFrameAge = 4; // Ensure that readbacks happen before Allocator.TempJob allocations get stale

            SetupInstanceSegmentation();
            var cam = GetComponent <Camera>();

#if UNITY_EDITOR || DEVELOPMENT_BUILD
            SetupVisualizationCamera(cam);
#endif

            DatasetCapture.SimulationEnding += OnSimulationEnding;
        }
        // Start is called before the first frame update
        void Awake()
        {
            //CaptureOptions.useAsyncReadbackIfSupported = false;

            m_EgoMarker = this.GetComponentInParent <Ego>();
            var ego = m_EgoMarker == null?SimulationManager.RegisterEgo("") : m_EgoMarker.EgoHandle;

            SensorHandle = SimulationManager.RegisterSensor(ego, "camera", description, period, startTime);

            var myCamera = GetComponent <Camera>();
            var width    = myCamera.pixelWidth;
            var height   = myCamera.pixelHeight;

            if ((produceSegmentationImages || produceObjectCountAnnotations || produceBoundingBoxAnnotations) && LabelingConfiguration == null)
            {
                Debug.LogError("LabelingConfiguration must be set if producing ground truth data");
                produceSegmentationImages     = false;
                produceObjectCountAnnotations = false;
                produceBoundingBoxAnnotations = false;
            }

            segmentationTexture      = new RenderTexture(new RenderTextureDescriptor(width, height, GraphicsFormat.R8G8B8A8_UNorm, 8));
            segmentationTexture.name = "Segmentation";
            labelingTexture          = new RenderTexture(new RenderTextureDescriptor(width, height, GraphicsFormat.R8G8B8A8_UNorm, 8));
            labelingTexture.name     = "Labeling";

#if HDRP_PRESENT
            var customPassVolume = this.GetComponent <CustomPassVolume>() ?? gameObject.AddComponent <CustomPassVolume>();
            customPassVolume.injectionPoint = CustomPassInjectionPoint.BeforeRendering;
            customPassVolume.isGlobal       = true;
            m_SegmentationPass = new InstanceSegmentationPass()
            {
                name          = "Segmentation Pass",
                targetCamera  = myCamera,
                targetTexture = segmentationTexture
            };
            m_SegmentationPass.EnsureInit();
            m_SemanticSegmentationPass = new SemanticSegmentationPass(myCamera, labelingTexture, LabelingConfiguration)
            {
                name = "Labeling Pass"
            };

            SetupPasses(customPassVolume);
#endif
#if URP_PRESENT
            instanceSegmentationUrpPass = new InstanceSegmentationUrpPass(myCamera, segmentationTexture);
            semanticSegmentationUrpPass = new SemanticSegmentationUrpPass(myCamera, labelingTexture, LabelingConfiguration);
#endif

            if (produceSegmentationImages)
            {
                var specs = LabelingConfiguration.LabelEntries.Select((l) => new SemanticSegmentationSpec()
                {
                    label_id    = l.id,
                    label_name  = l.label,
                    pixel_value = l.value
                }).ToArray();

                m_SegmentationAnnotationDefinition = SimulationManager.RegisterAnnotationDefinition("semantic segmentation", specs, "pixel-wise semantic segmentation label", "PNG");

                m_ClassLabelingTextureReader = new RenderTextureReader <short>(labelingTexture, myCamera,
                                                                               (frameCount, data, tex) => OnSemanticSegmentationImageRead(frameCount, data));
            }

            if (produceObjectCountAnnotations || produceBoundingBoxAnnotations || produceRenderedObjectInfoMetric)
            {
                var labelingMetricSpec = LabelingConfiguration.LabelEntries.Select((l) => new ObjectCountSpec()
                {
                    label_id   = l.id,
                    label_name = l.label,
                }).ToArray();

                if (produceObjectCountAnnotations)
                {
                    m_ObjectCountMetricDefinition = SimulationManager.RegisterMetricDefinition("object count", labelingMetricSpec, "Counts of objects for each label in the sensor's view", id: new Guid(objectCountId));
                }

                if (produceBoundingBoxAnnotations)
                {
                    m_BoundingBoxAnnotationDefinition = SimulationManager.RegisterAnnotationDefinition("bounding box", labelingMetricSpec, "Bounding box for each labeled object visible to the sensor", id: new Guid(boundingBoxId));
                }

                if (produceRenderedObjectInfoMetric)
                {
                    m_RenderedObjectInfoMetricDefinition = SimulationManager.RegisterMetricDefinition("rendered object info", labelingMetricSpec, "Information about each labeled object visible to the sensor", id: new Guid(renderedObjectInfoId));
                }

                m_RenderedObjectInfoGenerator = new RenderedObjectInfoGenerator(LabelingConfiguration);
                World.DefaultGameObjectInjectionWorld.GetExistingSystem <GroundTruthLabelSetupSystem>().Activate(m_RenderedObjectInfoGenerator);

                m_SegmentationReader = new RenderTextureReader <uint>(segmentationTexture, myCamera, (frameCount, data, tex) =>
                {
                    if (segmentationImageReceived != null)
                    {
                        segmentationImageReceived(frameCount, data);
                    }

                    m_RenderedObjectInfoGenerator.Compute(data, tex.width, boundingBoxOrigin, out var renderedObjectInfos, out var classCounts, Allocator.Temp);

                    using (s_RenderedObjectInfosCalculatedEvent.Auto())
                        renderedObjectInfosCalculated?.Invoke(frameCount, renderedObjectInfos);

                    if (produceObjectCountAnnotations)
                    {
                        OnObjectCountsReceived(classCounts, LabelingConfiguration.LabelEntries, frameCount);
                    }

                    if (produceBoundingBoxAnnotations)
                    {
                        ProduceBoundingBoxesAnnotation(renderedObjectInfos, LabelingConfiguration.LabelEntries, frameCount);
                    }

                    if (produceRenderedObjectInfoMetric)
                    {
                        ProduceRenderedObjectInfoMetric(renderedObjectInfos, frameCount);
                    }
                });