/// <summary>
 ///
 /// Performs face detection in a still image.
 ///
 /// The algorithm detects one or more faces. For each detected face a square facial bounding box is returned.
 ///
 /// The results are returned in form of VsRect objects. An array of VsRect objects passed to this method as output parameter should be allocated to maxFaces size.
 /// For example:
 ///
 /// \code
 /// boundingBoxArray = new VSRect[MAX_FACES];
 /// for (int i = 0; i < MAX_FACES; ++i)
 /// {
 ///     boundingBoxArray[i] = new VSRect();
 /// }
 ///
 /// if (gVisageFeaturesDetector.Initialized)
 /// {
 ///     numFaces = gVisageFeaturesDetector.DetectFaces(frame, boundingBoxArray);
 /// }
 /// \endcode
 ///
 /// After this call, n contains the number of detected faces. The first n members of the faces array are filled with resulting bounding boxes for each detected face.
 ///
 /// VsImage is the image storage class similar to IplImage from OpenCV, it has the same structure and members so it can be used like IplImage. Please refer to OpenCV documentation for details of accessing IplImage data members; the basic members are the size of the image (frame->width, frame->height) and the pointer to the actual pixel data of the image (frame->imageData).
 ///
 /// Following image formats are supported:
 /// - VISAGE_FORMAT.RGB: each pixel of the image is represented by three bytes representing red, green and blue channels, respectively.
 /// - VISAGE_FORMAT.RGBA: each pixel of the image is represented by four bytes representing red, green, blue and alpha (ignored) channels, respectively.
 /// - VISAGE_FORMAT.LUMINANCE: each pixel of the image is represented by one byte representing the luminance (gray level) of the image.
 /// Origin must be:
 /// - VISAGE_ORIGIN.TL: Origin is the top left pixel of the image. Pixels are ordered row-by-row starting from top left.
 ///
 /// Note that the input image is internally converted to grayscale.
 ///
 /// <param name="frame">The input image.</param>
 /// <param name="faces">Pointer to an array of VSRect objects in which the results will be returned.</param>
 /// <param name="minFaceScale">Scale of smallest face to be searched for, defined as decimal fraction [0.0 - 1.0] of input image size (min(width, height))</param>
 /// <param name="maxFaceScale">Scale of largest face to be searched for, defined as decimal fraction [0.0 - 1.0] of input image size (min(width, height))</param>
 /// <param name="useRefinementStep">If set to true, additional refinement algorithm will be used resulting with more precise facial bounding boxes and lower FPR, but higher detection time </param>
 /// <returns>Number of detected faces (0 or more)</returns>
 public int DetectFaces(VSImage frame, VSRect[] faces, float minFaceScale = 0.1f, float maxFaceScale = 1.0f, bool useRefinementStep = true);
 /// <summary>
 ///
 /// Performs faces and facial features detection in a still image.
 ///
 /// The algorithm detects one or more faces and their features. The results are, for each detected face, the 3D head pose, gaze direction, eye closure, the coordinates of facial feature points (e.g. chin tip, nose tip, lip corners etc.) and 3D face model fitted to the face.
 ///
 /// The results are returned in form of FaceData objects. An array of FaceData objects passed to this method as output parameter should be allocated to maxFaces size.
 /// For example:
 ///
 /// \code
 /// dataArray = new FaceData[MAX_FACES];
 /// for (int i = 0; i < MAX_FACES; ++i)
 /// {
 ///     dataArray[i] = new FaceData();
 /// }
 ///
 /// if (gVisageFeaturesDetector.Initialized)
 /// {
 ///     numFaces = gVisageFeaturesDetector.DetectFacialFeatures(frame, dataArray);
 /// }
 /// \endcode
 ///
 /// After this call, n contains the number of faces actually detected. The first n members of the data array are filled with resulting data for each detected face.
 /// Please refer to the FaceData documentation for detailed description of returned parameters.
 ///
 /// Following image formats are supported:
 /// - VISAGE_FORMAT.RGB: each pixel of the image is represented by three bytes representing red, green and blue channels, respectively.
 /// - VISAGE_FORMAT.RGBA: each pixel of the image is represented by four bytes representing red, green, blue and alpha (ignored) channels, respectively.
 /// - VISAGE_FORMAT.LUMINANCE: each pixel of the image is represented by one byte representing the luminance (gray level) of the image.
 /// Origin must be:
 /// - VISAGE_ORIGIN.TL: Origin is the top left pixel of the image. Pixels are ordered row-by-row starting from top left.
 ///
 /// Note that the input image is internally converted to grayscale.
 ///
 /// @see FaceData
 ///
 /// </summary>
 /// <param name="frame">The input image.</param>
 /// <param name="output">Pointer to an array of FaceData objects in which the results will be returned.</param>
 /// <param name="minFaceScale">Scale of smallest face to be searched for, defined as decimal fraction [0.0 - 1.0] of input image size (min(width, height))</param>
 /// <param name="maxFaceScale">Scale of smallest face to be searched for, defined as decimal fraction [0.0 - 1.0] of input image size (min(width, height))</param>
 /// <param name="outputOnly2DFeatures">If set, detection time will be reduced and only featurePoints2D will be returned.</param>
 /// <returns>Number of detected faces (0 or more)</returns>
 public int DetectFacialFeatures(VSImage frame, FaceData[] output, float minFaceScale = 0.1f, float maxFaceScale = 1.0f, bool outputOnly2DFeatures = false);
 /// <summary>
 /// Extracts a face descriptor from the input RGB image and adds it to the gallery.
 /// </summary>
 /// <param name="image"> VsImage pointer that contains the input RGB image. The image should contain only one face and this face will be added to the gallery. In case of multiple faces in the image, it is not defined which face would be used. </param>
 /// <param name="facedata"> Facial data obtained from VisageTracker or VisageFeaturesDetector. </param>
 /// <param name="name"> Name of the face in the image. </param>
 /// <returns> Returns 1 on success, 0 on failure. The function may fail if the face is not found in the image or if the image argument is not a valid RGB image pointer. </returns>
 ///
 ///
 /// See also: VSImage
 int AddDescriptor(VSImage ^ image, FaceData ^ facedata, System::String ^ name);
 /// <summary>
 /// Get normalized face image.
 ///
 /// This function returns normalized face image with corresponding feature points.
 /// Size of the normalized face in the image is such that inter-pupillary distance is approximately quarter of the image width.
 ///
 /// Face will be normalized to a varying degree depending on normalization type. For example rotated
 /// face with open mouth will only have its pose straightened with normalization type VS_NORM.POSE while
 /// with addition of VS_NORM.AU normalized face will also have closed mouth.
 ///
 /// Note that the face will always have its pose straightened.
 ///
 /// Types of normalization are:
 ///   - VS_NORM.POSE - face translation and rotation are set to zero thereby normalizing the pose
 ///   - VS_NORM.SU - parameters describing the face shape (shape units) are set to zero thereby normalizing the face shape
 ///   - VS_NORM.AU - parameters describing facial movements (action units) are set to zero, for example open mouth will be closed
 ///
 /// Different types of normalization can be combined with "|" operator, for example VS_NORM.POSE | VS_NORM.SU.
 /// </summary>
 /// <param name="frame">Image containing the face to be normalized, must be grey-scale</param>
 /// <param name="normFace">Image containing the normalized face; it must be allocated before calling the function; face size will depend on this image size</param>
 /// <param name="face_data">FaceData structure containing the information about the face that will be normalized</param>
 /// <param name="normFDP">Features points that correspond to the normalized face; coordinates are normalized to 0-1 range</param>
 /// <param name="norm_type">Normalization type, a binary combination of VS_NORM.POSE - normalizes pose, VS_NORM.SU - normalizes shape units and VS_NORM.AU - normalizes action units</param>
 /// <param name="dataPath">Path to the folder where Face Detector.cfg is located, default values is ""</param>
 ///
 public void GetNormalizedFaceImage(VSImage ^ frame, VSImage ^ normFace, FaceData ^ face_data, FDP ^ % normFDP, VS_NORM norm_type, System::String ^ dataPath);
 /// <summary>
 /// Extracts the face descriptor for face recognition from a facial image. Prior to using this function, it is neccessary to process the facial image or video frame using VisageTracker or VisageFeaturesDetector and pass the obtained facial data to this function.
 ///
 /// </summary>
 /// <param name="facedata"> Facial data obtained from VisageTracker or VisageFeaturesDetector. </param>
 /// <param name="image"> VsImage pointer to the input RGB image. </param>
 /// <param name="descriptor"> Pointer to a DESCRIPTOR_SIZE-dimensional array of short. The resulting face descriptor is returned in this array. </param>
 /// <returns> Returns 1 on success, 0 on failure. </returns>
 ///
 /// See also: FaceData, VisageTracker, VisageFeaturesDetector
 ///
 int ExtractDescriptor(FaceData ^ facedata, VSImage ^ image, short[] descriptor);