/// <summary>
        /// FaceSentimentAnalyzerDescriptor constructor
        /// </summary>
        public FaceSentimentAnalyzerDescriptor()
        {
            Name = "FaceSentimentAnalyzer";

            Description = "Finds a face in the image and infers its predominant sentiment from a set of 8 possible labels";

            // {F8D275CE-C244-4E71-8A39-57335D291388}
            Id = new Guid(0xf8d275ce, 0xc244, 0x4e71, 0x8a, 0x39, 0x57, 0x33, 0x5d, 0x29, 0x13, 0x88);

            Version = SkillVersion.Create(
                0,                   // major version
                1,                   // minor version
                "Contoso Developer", // Author name
                "Contoso Publishing" // Publisher name
                );

            // Describe input feature
            m_inputSkillDesc = new List <ISkillFeatureDescriptor>();
            m_inputSkillDesc.Add(
                new SkillFeatureImageDescriptor(
                    FaceSentimentAnalyzerConst.SKILL_INPUTNAME_IMAGE,
                    "the input image onto which the sentiment analysis runs",
                    true, // isRequired (since this is an input, it is required to be bound before the evaluation occurs)
                    -1,   // width
                    -1,   // height
                    -1,   // maxDimension
                    BitmapPixelFormat.Nv12,
                    BitmapAlphaMode.Ignore)
                );

            // Describe first output feature
            m_outputSkillDesc = new List <ISkillFeatureDescriptor>();
            m_outputSkillDesc.Add(
                new SkillFeatureTensorDescriptor(
                    FaceSentimentAnalyzerConst.SKILL_OUTPUTNAME_FACERECTANGLE,
                    "a face bounding box in relative coordinates (left, top, right, bottom)",
                    false, // isRequired (since this is an output, it automatically get populated after the evaluation occurs)
                    new List <long>()
            {
                4
            },                              // tensor shape
                    SkillElementKind.Float)
                );

            // Describe second output feature
            m_outputSkillDesc.Add(
                new SkillFeatureTensorDescriptor(
                    FaceSentimentAnalyzerConst.SKILL_OUTPUTNAME_FACESENTIMENTSCORES,
                    "the prediction score for each class",
                    false, // isRequired (since this is an output, it automatically get populated after the evaluation occurs)
                    new List <long>()
            {
                1, 8
            },                                 // tensor shape
                    SkillElementKind.Float)
                );
        }
        /// <summary>
        /// NeuralStyleTransformerDescriptor constructor
        /// </summary>
        public NeuralStyleTransformerDescriptor(StyleChoices styleChoice = StyleChoices.Candy)
        {
            Name = "NeuralStyleTransformer";

            Description = "Transform your image to an art";

            // {F8D275CE-C244-4E71-8A39-57335D291387}
            Id = new Guid(0xf8d275ce, 0xc244, 0x4e71, 0x8a, 0x39, 0x57, 0x33, 0x5d, 0x29, 0x13, 0x87);

            Version = SkillVersion.Create(
                0,                       // major version
                1,                       // minor version
                "Makers ID",             // Author name
                "Buitenzorg Makers Club" // Publisher name
                );

            // Describe input feature
            m_inputSkillDesc = new List <ISkillFeatureDescriptor>();
            m_inputSkillDesc.Add(
                new SkillFeatureImageDescriptor(
                    NeuralStyleTransformerConst.SKILL_INPUTNAME_IMAGE,
                    "the input image onto which the model runs",
                    true, // isRequired (since this is an input, it is required to be bound before the evaluation occurs)
                    -1,   // width
                    -1,   // height
                    -1,   // maxDimension
                    BitmapPixelFormat.Nv12,
                    BitmapAlphaMode.Ignore)
                );

            // Describe first output feature
            m_outputSkillDesc = new List <ISkillFeatureDescriptor>();
            m_outputSkillDesc.Add(
                new SkillFeatureImageDescriptor(
                    NeuralStyleTransformerConst.SKILL_OUTPUTNAME_IMAGE,
                    "a transformed image",
                    true, // isRequired
                    -1,   // width
                    -1,   // height
                    -1,   // maxDimension
                    BitmapPixelFormat.Nv12,
                    BitmapAlphaMode.Ignore)
                );
            //default
            this.styleChoice = styleChoice;
        }
 /// <summary> Initializes a new instance of <see cref="SentimentSkill"/>. </summary>
 /// <param name="inputs"> Inputs of the skills could be a column in the source data set, or the output of an upstream skill. </param>
 /// <param name="outputs"> The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. </param>
 /// <param name="skillVersion"> Service version information of the skill. Default value is <see cref="SkillVersion.V1"/>. </param>
 /// <exception cref="ArgumentNullException"> <paramref name="inputs"/> or <paramref name="outputs"/> is null. </exception>
 public SentimentSkill(IEnumerable <InputFieldMappingEntry> inputs, IEnumerable <OutputFieldMappingEntry> outputs, SkillVersion skillVersion) : this(inputs, outputs)
 {
     _skillVersion = skillVersion;
     ODataType     = skillVersion.ToString();
 }