public bool Resolve(string directive, string value, bool codeEntered)
        {
            if (!_knownAnnotations.Any(directive.StartsWith))
            {
                return(false);
            }

            if (codeEntered)
            {
                throw new CompilerException("Директивы аннотаций должны предшествовать строкам кода");
            }

            var annotation = new AnnotationDefinition();

            annotation.Name = directive;

            if (value != null)
            {
                annotation.Parameters = ParseAnnotationParameters(value);
            }

            _annotations.Add(annotation);

            return(true);
        }
Пример #2
0
 public AnnotationData(AnnotationDefinition annotationDefinition, string path, JArray valuesJson)
     : this()
 {
     AnnotationDefinition = annotationDefinition;
     Path       = path;
     ValuesJson = valuesJson;
 }
Пример #3
0
 public void AddAnnotation(AnnotationDefinition annotation)
 {
     _attributes.Add(new UserAnnotationAttribute()
     {
         Annotation = annotation
     });
 }
        private static object MapHttpMethod(AnnotationDefinition anno)
        {
            if (anno.ParamCount < 1)
            {
                throw new AnnotationException(anno, "Missing parameter <Method>");
            }

            var methodNames = anno.Parameters[0].RuntimeValue.AsString();

            if (anno.ParamCount == 2)
            {
                return(new CustomHttpMethodAttribute(methodNames.Split(
                                                         new[] { ',' },
                                                         StringSplitOptions.RemoveEmptyEntries),
                                                     anno.Parameters[1].RuntimeValue.AsString()));
            }

            if (anno.ParamCount == 1)
            {
                return(new CustomHttpMethodAttribute(methodNames.Split(
                                                         new[] { ',' },
                                                         StringSplitOptions.RemoveEmptyEntries)));
            }

            throw new AnnotationException(anno, "Too many parameters");
        }
Пример #5
0
 public void DefineViewPortAnnotation()
 {
     plantAnnotationDefinition = DatasetCapture.RegisterAnnotationDefinition(
         "View_Port_Position",
         "The position relative to the screen",
         id: Guid.Parse("C0B4A22C-0420-4D9F-BAFC-954B8F7B35A7"));
 }
Пример #6
0
        /// <inheritdoc/>
        protected override void Setup()
        {
            if (idLabelConfig == null)
            {
                throw new InvalidOperationException("BoundingBox2DLabeler's idLabelConfig field must be assigned");
            }

            m_AsyncAnnotations = new Dictionary <int, AsyncAnnotation>();

            m_BoundingBoxAnnotationDefinition = DatasetCapture.RegisterAnnotationDefinition("bounding box", idLabelConfig.GetAnnotationSpecification(),
                                                                                            "Bounding box for each labeled object visible to the sensor", id: new Guid(annotationId));

            perceptionCamera.RenderedObjectInfosCalculated += OnRenderedObjectInfosCalculated;

            visualizationEnabled = supportsVisualization;

            // Record the original screen size. The screen size can change during play, and the visual bounding
            // boxes need to be scaled appropriately
            m_OriginalScreenSize = new Vector2(Screen.width, Screen.height);

            m_BoundingBoxTexture = Resources.Load <Texture>("outline_box");
            m_LabelTexture       = Resources.Load <Texture>("solid_white");

            m_Style = new GUIStyle();
            m_Style.normal.textColor = Color.black;
            m_Style.fontSize         = 16;
            m_Style.padding          = new RectOffset(4, 4, 4, 4);
            m_Style.contentOffset    = new Vector2(4, 0);
            m_Style.alignment        = TextAnchor.MiddleLeft;
        }
        /// <inheritdoc/>
        protected override void Setup()
        {
            var myCamera = perceptionCamera.GetComponent <Camera>();
            var width    = myCamera.pixelWidth;
            var height   = myCamera.pixelHeight;

            if (labelConfig == null)
            {
                throw new InvalidOperationException(
                          "SemanticSegmentationLabeler's LabelConfig must be assigned");
            }

            m_AsyncAnnotations = new Dictionary <int, AsyncAnnotation>();

            var renderTextureDescriptor = new RenderTextureDescriptor(width, height, GraphicsFormat.R8G8B8A8_UNorm, 8);

            if (targetTexture != null)
            {
                targetTexture.descriptor = renderTextureDescriptor;
            }
            else
            {
                m_TargetTextureOverride = new RenderTexture(renderTextureDescriptor);
            }

            targetTexture.Create();
            targetTexture.name = "Labeling";

#if HDRP_PRESENT
            var gameObject       = perceptionCamera.gameObject;
            var customPassVolume = gameObject.GetComponent <CustomPassVolume>() ?? gameObject.AddComponent <CustomPassVolume>();
            customPassVolume.injectionPoint = CustomPassInjectionPoint.BeforeRendering;
            customPassVolume.isGlobal       = true;
            m_SemanticSegmentationPass      = new SemanticSegmentationPass(myCamera, targetTexture, labelConfig)
            {
                name = "Labeling Pass"
            };
            customPassVolume.customPasses.Add(m_SemanticSegmentationPass);
#endif
#if URP_PRESENT
            perceptionCamera.AddScriptableRenderPass(new SemanticSegmentationUrpPass(myCamera, targetTexture, labelConfig));
#endif

            var specs = labelConfig.labelEntries.Select((l) => new SemanticSegmentationSpec()
            {
                label_name  = l.label,
                pixel_value = l.color
            }).ToArray();

            m_SemanticSegmentationAnnotationDefinition = DatasetCapture.RegisterAnnotationDefinition(
                "semantic segmentation",
                specs,
                "pixel-wise semantic segmentation label",
                "PNG",
                id: Guid.Parse(annotationId));

            m_SemanticSegmentationTextureReader = new RenderTextureReader <Color32>(targetTexture, myCamera,
                                                                                    (frameCount, data, tex) => OnSemanticSegmentationImageRead(frameCount, data));
        }
Пример #8
0
        public object Get(AnnotationDefinition annotation)
        {
            var found = _mappers.TryGetValue(annotation.Name, out var mapper);

            if (found)
            {
                return(mapper(annotation));
            }

            return(null);
        }
 public void Start()
 {
     //Metrics and annotations are registered up-front
     lightMetricDefinition = DatasetCapture.RegisterMetricDefinition(
         "Light position",
         "The world-space position of the light",
         Guid.Parse("1F6BFF46-F884-4CC5-A878-DB987278FE35"));
     boundingBoxAnnotationDefinition = DatasetCapture.RegisterAnnotationDefinition(
         "Target bounding box",
         "The position of the target in the camera's local space",
         id: Guid.Parse("C0B4A22C-0420-4D9F-BAFC-954B8F7B35A7"));
 }
        /// <inheritdoc/>
        protected override void Setup()
        {
            if (idLabelConfig == null)
            {
                throw new InvalidOperationException("BoundingBox2DLabeler's idLabelConfig field must be assigned");
            }

            m_AsyncAnnotations = new Dictionary <int, AsyncAnnotation>();

            m_BoundingBoxAnnotationDefinition = DatasetCapture.RegisterAnnotationDefinition("bounding box", idLabelConfig.GetAnnotationSpecification(),
                                                                                            "Bounding box for each labeled object visible to the sensor", id: new Guid(annotationId));

            perceptionCamera.RenderedObjectInfosCalculated += OnRenderedObjectInfosCalculated;
        }
        /// <inheritdoc/>
        protected override void Setup()
        {
            if (idLabelConfig == null)
            {
                throw new InvalidOperationException("BoundingBox2DLabeler's idLabelConfig field must be assigned");
            }

            m_AsyncAnnotations = new Dictionary <int, AsyncAnnotation>();

            m_BoundingBoxAnnotationDefinition = DatasetCapture.RegisterAnnotationDefinition("bounding box", idLabelConfig.GetAnnotationSpecification(),
                                                                                            "Bounding box for each labeled object visible to the sensor", id: new Guid(annotationId));

            perceptionCamera.RenderedObjectInfosCalculated += OnRenderedObjectInfosCalculated;

            visualizationEnabled = supportsVisualization;

            // Record the original screen size. The screen size can change during play, and the visual bounding
            // boxes need to be scaled appropriately
            originalScreenSize = new Vector2(Screen.width, Screen.height);
        }
        private static object MapAuthorizationAttribute(AnnotationDefinition anno)
        {
            var instance = new AuthorizeAttribute();

            if (anno.ParamCount == 0)
            {
                return(instance);
            }

            foreach (var parameter in anno.Parameters)
            {
                if (BiLingualEquals(parameter.Name, "roles", "роли"))
                {
                    instance.Roles = parameter.RuntimeValue.AsString();
                }
                else if (BiLingualEquals(parameter.Name, "policy", "политика"))
                {
                    instance.Policy = parameter.RuntimeValue.AsString();
                }
            }

            return(instance);
        }
        // Start is called before the first frame update
        void Awake()
        {
            //CaptureOptions.useAsyncReadbackIfSupported = false;

            m_EgoMarker = this.GetComponentInParent <Ego>();
            var ego = m_EgoMarker == null?SimulationManager.RegisterEgo("") : m_EgoMarker.EgoHandle;

            SensorHandle = SimulationManager.RegisterSensor(ego, "camera", description, period, startTime);

            var myCamera = GetComponent <Camera>();
            var width    = myCamera.pixelWidth;
            var height   = myCamera.pixelHeight;

            if ((produceSegmentationImages || produceObjectCountAnnotations || produceBoundingBoxAnnotations) && LabelingConfiguration == null)
            {
                Debug.LogError("LabelingConfiguration must be set if producing ground truth data");
                produceSegmentationImages     = false;
                produceObjectCountAnnotations = false;
                produceBoundingBoxAnnotations = false;
            }

            segmentationTexture      = new RenderTexture(new RenderTextureDescriptor(width, height, GraphicsFormat.R8G8B8A8_UNorm, 8));
            segmentationTexture.name = "Segmentation";
            labelingTexture          = new RenderTexture(new RenderTextureDescriptor(width, height, GraphicsFormat.R8G8B8A8_UNorm, 8));
            labelingTexture.name     = "Labeling";

#if HDRP_PRESENT
            var customPassVolume = this.GetComponent <CustomPassVolume>() ?? gameObject.AddComponent <CustomPassVolume>();
            customPassVolume.injectionPoint = CustomPassInjectionPoint.BeforeRendering;
            customPassVolume.isGlobal       = true;
            m_SegmentationPass = new InstanceSegmentationPass()
            {
                name          = "Segmentation Pass",
                targetCamera  = myCamera,
                targetTexture = segmentationTexture
            };
            m_SegmentationPass.EnsureInit();
            m_SemanticSegmentationPass = new SemanticSegmentationPass(myCamera, labelingTexture, LabelingConfiguration)
            {
                name = "Labeling Pass"
            };

            SetupPasses(customPassVolume);
#endif
#if URP_PRESENT
            instanceSegmentationUrpPass = new InstanceSegmentationUrpPass(myCamera, segmentationTexture);
            semanticSegmentationUrpPass = new SemanticSegmentationUrpPass(myCamera, labelingTexture, LabelingConfiguration);
#endif

            if (produceSegmentationImages)
            {
                var specs = LabelingConfiguration.LabelEntries.Select((l) => new SemanticSegmentationSpec()
                {
                    label_id    = l.id,
                    label_name  = l.label,
                    pixel_value = l.value
                }).ToArray();

                m_SegmentationAnnotationDefinition = SimulationManager.RegisterAnnotationDefinition("semantic segmentation", specs, "pixel-wise semantic segmentation label", "PNG");

                m_ClassLabelingTextureReader = new RenderTextureReader <short>(labelingTexture, myCamera,
                                                                               (frameCount, data, tex) => OnSemanticSegmentationImageRead(frameCount, data));
            }

            if (produceObjectCountAnnotations || produceBoundingBoxAnnotations || produceRenderedObjectInfoMetric)
            {
                var labelingMetricSpec = LabelingConfiguration.LabelEntries.Select((l) => new ObjectCountSpec()
                {
                    label_id   = l.id,
                    label_name = l.label,
                }).ToArray();

                if (produceObjectCountAnnotations)
                {
                    m_ObjectCountMetricDefinition = SimulationManager.RegisterMetricDefinition("object count", labelingMetricSpec, "Counts of objects for each label in the sensor's view", id: new Guid(objectCountId));
                }

                if (produceBoundingBoxAnnotations)
                {
                    m_BoundingBoxAnnotationDefinition = SimulationManager.RegisterAnnotationDefinition("bounding box", labelingMetricSpec, "Bounding box for each labeled object visible to the sensor", id: new Guid(boundingBoxId));
                }

                if (produceRenderedObjectInfoMetric)
                {
                    m_RenderedObjectInfoMetricDefinition = SimulationManager.RegisterMetricDefinition("rendered object info", labelingMetricSpec, "Information about each labeled object visible to the sensor", id: new Guid(renderedObjectInfoId));
                }

                m_RenderedObjectInfoGenerator = new RenderedObjectInfoGenerator(LabelingConfiguration);
                World.DefaultGameObjectInjectionWorld.GetExistingSystem <GroundTruthLabelSetupSystem>().Activate(m_RenderedObjectInfoGenerator);

                m_SegmentationReader = new RenderTextureReader <uint>(segmentationTexture, myCamera, (frameCount, data, tex) =>
                {
                    if (segmentationImageReceived != null)
                    {
                        segmentationImageReceived(frameCount, data);
                    }

                    m_RenderedObjectInfoGenerator.Compute(data, tex.width, boundingBoxOrigin, out var renderedObjectInfos, out var classCounts, Allocator.Temp);

                    using (s_RenderedObjectInfosCalculatedEvent.Auto())
                        renderedObjectInfosCalculated?.Invoke(frameCount, renderedObjectInfos);

                    if (produceObjectCountAnnotations)
                    {
                        OnObjectCountsReceived(classCounts, LabelingConfiguration.LabelEntries, frameCount);
                    }

                    if (produceBoundingBoxAnnotations)
                    {
                        ProduceBoundingBoxesAnnotation(renderedObjectInfos, LabelingConfiguration.LabelEntries, frameCount);
                    }

                    if (produceRenderedObjectInfoMetric)
                    {
                        ProduceRenderedObjectInfoMetric(renderedObjectInfos, frameCount);
                    }
                });
Пример #14
0
 public AnnotationException(AnnotationDefinition anno, string message)
     : base($"Неверное применение аннотации {anno.Name}: {message}")
 {
 }
Пример #15
0
 /// <summary>
 /// Initializes a new instance of the <see cref="TimeIntervalAnnotationDisplayData"/> class.
 /// </summary>
 /// <param name="parent">The annotations visualization object that owns this display data instance.</param>
 /// <param name="annotation">The annotation event.</param>
 /// <param name="definition">The annotation definition.</param>
 public TimeIntervalAnnotationDisplayData(TimeIntervalAnnotationVisualizationObject parent, Message <TimeIntervalAnnotation> annotation, AnnotationDefinition definition)
 {
     this.parent     = parent;
     this.Annotation = annotation;
     this.Definition = definition;
 }