示例#1
0
        /// <summary>
        /// The main program entry point
        /// </summary>
        /// <param name="args">The command line arguments</param>
        static void Main(string[] args)
        {
            // set up Dlib facedetectors and shapedetectors
            using (var fd = Dlib.GetFrontalFaceDetector())
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                {
                    // load input image
                    var img = Dlib.LoadImage <RgbPixel>(inputFilePath);

                    // find all faces in the image
                    var faces = fd.Operator(img);
                    foreach (var face in faces)
                    {
                        // find the landmark points for this face
                        var shape = sp.Detect(img, face);

                        // draw the landmark points on the image
                        for (var i = 0; i < shape.Parts; i++)
                        {
                            var point = shape.GetPart((uint)i);
                            var rect  = new Rectangle(point);
                            Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), thickness: 4);
                        }
                    }

                    // export the modified image
                    Dlib.SaveJpeg(img, "output.jpg");
                }
        }
        public static void DetectFacesAsync(string inputFilePath, string subscriptionKey, string uriBase, IFaceClient client, string vocabularyPath)
        {
            // set up Dlib facedetector
            DirectoryInfo dir = new DirectoryInfo(inputFilePath);

            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                foreach (FileInfo files in dir.GetFiles("*.jpg"))
                {
                    string _inputFilePath = inputFilePath + files.Name;

                    // load input image
                    Array2D <RgbPixel> img = Dlib.LoadImage <RgbPixel>(_inputFilePath);

                    // find all faces in the image
                    Rectangle[] faces = fd.Operator(img);
                    if (faces.Length != 0)
                    {
                        Console.WriteLine("Picture " + files.Name + " have faces, sending data to Azure");
                        MakeAnalysisRequestAsync(_inputFilePath, subscriptionKey, uriBase, files.Name, client, vocabularyPath).Wait();
                    }

                    foreach (var face in faces)
                    {
                        // draw a rectangle for each face
                        Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                    }
                    // export the modified image
                    Dlib.SaveJpeg(img, "./Results/" + files.Name);
                }
            }
        public static void DetectFacesOnImage(string sourceImagePath, string destImagePath)
        {
            // set up Dlib facedetector
            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                // load input image
                var image = Dlib.LoadImage <RgbPixel>(sourceImagePath);

                DetectFacesOnImage(image);

                // export the modified image
                Dlib.SaveJpeg(image, destImagePath);
            }
        }
示例#4
0
        public void FindFaces()
        {
            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                var img = Dlib.LoadImage <RgbPixel>(path);

                // find all faces in the image
                var faces = fd.Operator(img);
                foreach (var face in faces)
                {
                    // draw a rectangle for each face
                    Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                }
                Dlib.SaveJpeg(img, @"D:\output.png");
            }
        }
示例#5
0
        public static void Recognize(string file)
        {
            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                var img = Dlib.LoadImage <RgbPixel>(file);

                //hola
                var faces = fd.Operator(img);

                foreach (var face in faces)
                {
                    Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                }


                Dlib.SaveJpeg(img, file);
            }
        }
示例#6
0
        /// <summary>
        /// The main program entry point
        /// </summary>
        /// <param name="args">The command line arguments</param>
        static void Main(string[] args)
        {
            // set up Dlib facedetector
            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                // load input image
                var img = Dlib.LoadImage <RgbPixel>(inputFilePath);

                // find all faces in the image
                var faces = fd.Operator(img);
                foreach (var face in faces)
                {
                    // draw a rectangle for each face
                    Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                }

                // export the modified image
                Dlib.SaveJpeg(img, "output.jpg");
            }
        }
示例#7
0
 public void WriteImageToFilePath(Array2D <RgbPixel> img, string filepath)
 {
     Dlib.SaveJpeg(img, filepath);
 }
示例#8
0
        private static void Preprocess(string type, string input, FrontalFaceDetector faceDetector, ShapePredictor posePredictor, string output)
        {
            var imageCount = 0;

            var r = new ulong[Size * Size];
            var g = new ulong[Size * Size];
            var b = new ulong[Size * Size];

            var csv       = ReadCsv(Path.Combine(input, $"{type}.csv"));
            var outputDir = Path.Combine(output, type);

            foreach (var kvp in csv)
            {
                using (var tmp = Dlib.LoadImageAsMatrix <RgbPixel>(Path.Combine(input, type, kvp.Key)))
                {
                    var dets = faceDetector.Operator(tmp);
                    if (!dets.Any())
                    {
                        Console.WriteLine($"Warning: Failed to detect face from '{kvp}'");
                        continue;
                    }

                    // Get max size rectangle. It could be better face.
                    var det = dets.Select((val, idx) => new { V = val, I = idx }).Aggregate((max, working) => (max.V.Area > working.V.Area) ? max : working).V;
                    using (var ret = posePredictor.Detect(tmp, det))
                        using (var chip = Dlib.GetFaceChipDetails(ret, Size, 0d))
                            using (var faceChips = Dlib.ExtractImageChip <RgbPixel>(tmp, chip))
                            {
                                var dst    = Path.Combine(outputDir, kvp.Key);
                                var dstDir = Path.GetDirectoryName(dst);
                                Directory.CreateDirectory(dstDir);
                                Dlib.SaveJpeg(faceChips, Path.Combine(outputDir, kvp.Key), 100);

                                var index = 0;
                                for (var row = 0; row < Size; row++)
                                {
                                    for (var col = 0; col < Size; col++)
                                    {
                                        var rgb = faceChips[row, col];
                                        r[index] += rgb.Red;
                                        g[index] += rgb.Green;
                                        b[index] += rgb.Blue;
                                        index++;
                                    }
                                }
                            }

                    imageCount++;
                }
            }

            using (var mean = new Matrix <RgbPixel>(Size, Size))
            {
                var index = 0;
                for (var row = 0; row < Size; row++)
                {
                    for (var col = 0; col < Size; col++)
                    {
                        var red   = (double)r[index] / imageCount;
                        var green = (double)g[index] / imageCount;
                        var blue  = (double)b[index] / imageCount;

                        var newRed   = (byte)Math.Round(red);
                        var newGreen = (byte)Math.Round(green);
                        var newBlue  = (byte)Math.Round(blue);
                        mean[row, col] = new RgbPixel(newRed, newGreen, newBlue);

                        index++;
                    }
                }

                Dlib.SaveBmp(mean, Path.Combine(output, $"{type}.mean.bmp"));
            }
        }
示例#9
0
        /// <summary>
        /// The main program entry point
        /// </summary>
        /// <param name="args">The command line arguments</param>
        static void Main(string[] args)
        {
            // set up Dlib facedetectors and shapedetectors
            using (var fd = Dlib.GetFrontalFaceDetector())
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                {
                    // load input image
                    var img = Dlib.LoadImage <RgbPixel>(inputFilePath);

                    // find all faces in the image
                    var faces = fd.Operator(img);
                    foreach (var face in faces)
                    {
                        // find the landmark points for this face
                        var shape = sp.Detect(img, face);

                        // build the 3d face model
                        var model = Utility.GetFaceModel();

                        // get the landmark point we need
                        var landmarks = new MatOfPoint2d(1, 6,
                                                         (from i in new int[] { 30, 8, 36, 45, 48, 54 }
                                                          let pt = shape.GetPart((uint)i)
                                                                   select new OpenCvSharp.Point2d(pt.X, pt.Y)).ToArray());

                        // build the camera matrix
                        var cameraMatrix = Utility.GetCameraMatrix((int)img.Rect.Width, (int)img.Rect.Height);

                        // build the coefficient matrix
                        var coeffs = new MatOfDouble(4, 1);
                        coeffs.SetTo(0);

                        // find head rotation and translation
                        Mat rotation    = new MatOfDouble();
                        Mat translation = new MatOfDouble();
                        Cv2.SolvePnP(model, landmarks, cameraMatrix, coeffs, rotation, translation);

                        // find euler angles
                        var euler = Utility.GetEulerMatrix(rotation);

                        // calculate head rotation in degrees
                        var yaw   = 180 * euler.At <double>(0, 2) / Math.PI;
                        var pitch = 180 * euler.At <double>(0, 1) / Math.PI;
                        var roll  = 180 * euler.At <double>(0, 0) / Math.PI;

                        // looking straight ahead wraps at -180/180, so make the range smooth
                        pitch = Math.Sign(pitch) * 180 - pitch;

                        // calculate if the driver is facing forward
                        // the left/right angle must be in the -25..25 range
                        // the up/down angle must be in the -10..10 range
                        var facingForward =
                            yaw >= -25 && yaw <= 25 &&
                            pitch >= -10 && pitch <= 10;

                        // create a new model point in front of the nose, and project it into 2d
                        var poseModel      = new MatOfPoint3d(1, 1, new Point3d(0, 0, 1000));
                        var poseProjection = new MatOfPoint2d();
                        Cv2.ProjectPoints(poseModel, rotation, translation, cameraMatrix, coeffs, poseProjection);

                        // draw the key landmark points in yellow on the image
                        foreach (var i in new int[] { 30, 8, 36, 45, 48, 54 })
                        {
                            var point = shape.GetPart((uint)i);
                            var rect  = new Rectangle(point);
                            Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), thickness: 4);
                        }

                        // draw a line from the tip of the nose pointing in the direction of head pose
                        var landmark = landmarks.At <Point2d>(0);
                        var p        = poseProjection.At <Point2d>(0);
                        Dlib.DrawLine(
                            img,
                            new DlibDotNet.Point((int)landmark.X, (int)landmark.Y),
                            new DlibDotNet.Point((int)p.X, (int)p.Y),
                            color: new RgbPixel(0, 255, 255));

                        // draw a box around the face if it's facing forward
                        if (facingForward)
                        {
                            Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                        }
                    }

                    // export the modified image
                    Dlib.SaveJpeg(img, "output.jpg");
                }
        }
示例#10
0
        private static InputDataImages GetFeaturesValuesFromImage(string str)
        {
            var returnClass = new InputDataImages();

            using (var fd = Dlib.GetFrontalFaceDetector())
                // ... and Dlib Shape DetectorS
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                {
                    // load input image
                    var img = Dlib.LoadImage <RgbPixel>(str);

                    // find all faces i n the image
                    var faces = fd.Operator(img);
                    // for each face draw over the facial landmarks


                    // Create the CSV file and fill in the first line with the header
                    foreach (var face in faces)
                    {
                        // find the landmark points for this face
                        var shape = sp.Detect(img, face);

                        // draw the landmark points on the image
                        for (var i = 0; i < shape.Parts; i++)
                        {
                            var point = shape.GetPart((uint)i);
                            var rect  = new Rectangle(point);
                            Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), thickness: 4);
                        }

                        /////////////// WEEK 9 LAB ////////////////

                        double[] LeftEyebrowDistances  = new double[4];
                        double[] RightEyebrowDistances = new double[4];

                        float LeftEyebrowSum  = 0;
                        float RightEyebrowSum = 0;

                        //LIP VARIABLES
                        double[] LeftLipDistances  = new double[4];
                        double[] RightLipDistances = new double[4];
                        float    LeftLipSum        = 0;
                        float    RightLipSum       = 0;


                        LeftEyebrowDistances[0] = (shape.GetPart(21) - shape.GetPart(39)).Length;
                        LeftEyebrowDistances[1] = (shape.GetPart(20) - shape.GetPart(39)).Length;
                        LeftEyebrowDistances[2] = (shape.GetPart(19) - shape.GetPart(39)).Length;
                        LeftEyebrowDistances[3] = (shape.GetPart(18) - shape.GetPart(39)).Length;

                        RightEyebrowDistances[0] = (shape.GetPart(22) - shape.GetPart(42)).Length;
                        RightEyebrowDistances[1] = (shape.GetPart(23) - shape.GetPart(42)).Length;
                        RightEyebrowDistances[2] = (shape.GetPart(24) - shape.GetPart(42)).Length;
                        RightEyebrowDistances[3] = (shape.GetPart(25) - shape.GetPart(42)).Length;


                        //LIP
                        LeftLipDistances[0] = (shape.GetPart(51) - shape.GetPart(33)).Length;
                        LeftLipDistances[1] = (shape.GetPart(50) - shape.GetPart(33)).Length;
                        LeftLipDistances[2] = (shape.GetPart(49) - shape.GetPart(33)).Length;
                        LeftLipDistances[3] = (shape.GetPart(48) - shape.GetPart(33)).Length;


                        RightLipDistances[0] = (shape.GetPart(51) - shape.GetPart(33)).Length;
                        RightLipDistances[1] = (shape.GetPart(52) - shape.GetPart(33)).Length;
                        RightLipDistances[2] = (shape.GetPart(53) - shape.GetPart(33)).Length;
                        RightLipDistances[3] = (shape.GetPart(54) - shape.GetPart(33)).Length;


                        for (int i = 0; i < 4; i++)
                        {
                            LeftEyebrowSum  += (float)(LeftEyebrowDistances[i] / LeftEyebrowDistances[0]);
                            RightEyebrowSum += (float)(RightEyebrowDistances[i] / RightEyebrowDistances[0]);
                        }

                        LeftLipSum += (float)(LeftLipDistances[1] / LeftLipDistances[0]);
                        LeftLipSum += (float)(LeftLipDistances[2] / LeftLipDistances[0]);
                        LeftLipSum += (float)(LeftLipDistances[3] / LeftLipDistances[0]);


                        RightLipSum += (float)(RightLipDistances[1] / RightLipDistances[0]);
                        RightLipSum += (float)(RightLipDistances[2] / RightLipDistances[0]);
                        RightLipSum += (float)(RightLipDistances[3] / RightLipDistances[0]);

                        double LipWidth  = (float)((shape.GetPart(48) - shape.GetPart(54)).Length / (shape.GetPart(33) - shape.GetPart(51)).Length);
                        double LipHeight = (float)((shape.GetPart(51) - shape.GetPart(57)).Length / (shape.GetPart(33) - shape.GetPart(51)).Length);

                        returnClass.LeftEyebrow  = LeftEyebrowSum;
                        returnClass.RightEyebrow = RightLipSum;
                        returnClass.LeftLip      = LeftLipSum;
                        returnClass.RightLip     = RightLipSum;
                        returnClass.LipWidth     = (float)LipWidth;
                        returnClass.LipHeight    = (float)LipHeight;


                        // export the modified image
                        string filePath = "output" + ".jpg";
                        Dlib.SaveJpeg(img, filePath);
                    }
                }

            using (System.IO.StreamWriter file = new System.IO.StreamWriter(@"TestingFeatureVectorValues.csv", true))
            {
                DirectoryInfo dr = new DirectoryInfo(str);
                //Console.WriteLine(dr.Parent.Name.ToString());
                string ParentFolderName = dr.Parent.Name.ToString();

                file.WriteLine(ParentFolderName + "," + returnClass.LeftEyebrow.ToString() + "," + returnClass.RightEyebrow.ToString()
                               + "," + returnClass.LeftLip.ToString() + "," + returnClass.RightLip.ToString() + "," + returnClass.LipWidth.ToString()
                               + "," + returnClass.LipHeight.ToString());
            }
            return(returnClass);
        }
示例#11
0
        static void Main(string[] args)
        {
            /// FaceDetectionWith_API
            Location[] coord = TestImage(fileName, Model.Hog);


            /// Face DetectionWith_DLIB
            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                var img = Dlib.LoadImage <RgbPixel>(fileName);

                // find all faces in the image
                var faces = fd.Operator(img);
                foreach (var face in faces)
                {
                    // draw a rectangle for each face
                    Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                }

                Dlib.SaveJpeg(img, outputName);
            }


            // The first thing we are going to do is load all our models.  First, since we need to
            // find faces in the image we will need a face detector:
            using (var detector = Dlib.GetFrontalFaceDetector())
                // We will also use a face landmarking model to align faces to a standard pose:  (see face_landmark_detection_ex.cpp for an introduction)
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                    // And finally we load the DNN responsible for face recognition.
                    using (var net = DlibDotNet.Dnn.LossMetric.Deserialize("dlib_face_recognition_resnet_model_v1.dat"))

                        using (var img = Dlib.LoadImageAsMatrix <RgbPixel>(fileName))

                            using (var win = new ImageWindow(img))
                            {
                                var faces = new List <Matrix <RgbPixel> >();
                                foreach (var face in detector.Operator(img))
                                {
                                    var shape          = sp.Detect(img, face);
                                    var faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                                    var faceChip       = Dlib.ExtractImageChip <RgbPixel>(img, faceChipDetail);

                                    //faces.Add(move(face_chip));
                                    faces.Add(faceChip);

                                    win.AddOverlay(face);
                                }

                                if (!faces.Any())
                                {
                                    Console.WriteLine("No faces found in image!");
                                    return;
                                }

                                // This call asks the DNN to convert each face image in faces into a 128D vector.
                                // In this 128D vector space, images from the same person will be close to each other
                                // but vectors from different people will be far apart.  So we can use these vectors to
                                // identify if a pair of images are from the same person or from different people.
                                var faceDescriptors = net.Operator(faces);

                                // In particular, one simple thing we can do is face clustering.  This next bit of code
                                // creates a graph of connected faces and then uses the Chinese whispers graph clustering
                                // algorithm to identify how many people there are and which faces belong to whom.
                                var edges = new List <SamplePair>();
                                for (uint i = 0; i < faceDescriptors.Count; ++i)
                                {
                                    for (var j = i; j < faceDescriptors.Count; ++j)
                                    {
                                        // Faces are connected in the graph if they are close enough.  Here we check if
                                        // the distance between two face descriptors is less than 0.6, which is the
                                        // decision threshold the network was trained to use.  Although you can
                                        // certainly use any other threshold you find useful.
                                        var diff = faceDescriptors[i] - faceDescriptors[j];
                                        if (Dlib.Length(diff) < 0.6)
                                        {
                                            edges.Add(new SamplePair(i, j));
                                        }
                                    }
                                }

                                Dlib.ChineseWhispers(edges, 100, out var numClusters, out var labels);

                                // This will correctly indicate that there are 4 people in the image.
                                Console.WriteLine($"number of people found in the image: {numClusters}");


                                // Отобразим результат в ImageList
                                var winClusters = new List <ImageWindow>();
                                for (var i = 0; i < numClusters; i++)
                                {
                                    winClusters.Add(new ImageWindow());
                                }
                                var tileImages = new List <Matrix <RgbPixel> >();
                                for (var clusterId = 0ul; clusterId < numClusters; ++clusterId)
                                {
                                    var temp = new List <Matrix <RgbPixel> >();
                                    for (var j = 0; j < labels.Length; ++j)
                                    {
                                        if (clusterId == labels[j])
                                        {
                                            temp.Add(faces[j]);
                                        }
                                    }

                                    winClusters[(int)clusterId].Title = $"face cluster {clusterId}";
                                    var tileImage = Dlib.TileImages(temp);
                                    tileImages.Add(tileImage);
                                    winClusters[(int)clusterId].SetImage(tileImage);
                                }


                                // Finally, let's print one of the face descriptors to the screen.
                                using (var trans = Dlib.Trans(faceDescriptors[0]))
                                {
                                    Console.WriteLine($"face descriptor for one face: {trans}");

                                    // It should also be noted that face recognition accuracy can be improved if jittering
                                    // is used when creating face descriptors.  In particular, to get 99.38% on the LFW
                                    // benchmark you need to use the jitter_image() routine to compute the descriptors,
                                    // like so:
                                    var jitterImages = JitterImage(faces[0]).ToArray();
                                    var ret          = net.Operator(jitterImages);
                                    using (var m = Dlib.Mat(ret))
                                        using (var faceDescriptor = Dlib.Mean <float>(m))
                                            using (var t = Dlib.Trans(faceDescriptor))
                                            {
                                                Console.WriteLine($"jittered face descriptor for one face: {t}");

                                                // If you use the model without jittering, as we did when clustering the bald guys, it
                                                // gets an accuracy of 99.13% on the LFW benchmark.  So jittering makes the whole
                                                // procedure a little more accurate but makes face descriptor calculation slower.

                                                Console.WriteLine("hit enter to terminate");
                                                Console.ReadKey();

                                                foreach (var jitterImage in jitterImages)
                                                {
                                                    jitterImage.Dispose();
                                                }

                                                foreach (var tileImage in tileImages)
                                                {
                                                    tileImage.Dispose();
                                                }

                                                foreach (var edge in edges)
                                                {
                                                    edge.Dispose();
                                                }

                                                foreach (var descriptor in faceDescriptors)
                                                {
                                                    descriptor.Dispose();
                                                }

                                                foreach (var face in faces)
                                                {
                                                    face.Dispose();
                                                }
                                            }
                                }
                            }

            System.Console.ReadLine();
        }
        // The main program entry point
        static void Main(string[] args)
        {
            bool use_mirror = false;

            // file paths
            string[] files = Directory.GetFiles("images", "*.*", SearchOption.AllDirectories);
            List <FullObjectDetection> shapes = new List <FullObjectDetection>();
            List <string> emotions            = new List <string>();

            // Set up Dlib Face Detector
            using (var fd = Dlib.GetFrontalFaceDetector())
                // ... and Dlib Shape Detector
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                {
                    // load input image
                    for (int i = 0; i < files.Length; i++)
                    {
                        var emotion = GetEmotion(files[i]);
                        var img     = Dlib.LoadImage <RgbPixel>(files[i]);

                        // find all faces in the image
                        var faces = fd.Operator(img);
                        // for each face draw over the facial landmarks
                        foreach (var face in faces)
                        {
                            // find the landmark points for this face
                            var shape = sp.Detect(img, face);
                            shapes.Add(shape);
                            emotions.Add(emotion);
                            // draw the landmark points on the image

                            for (var i2 = 0; i2 < shape.Parts; i2++)
                            {
                                var point = shape.GetPart((uint)i2);
                                var rect  = new Rectangle(point);

                                if (point == GetPoint(shape, 40) || point == GetPoint(shape, 22))
                                {
                                    Dlib.DrawRectangle(img, rect, color: new RgbPixel(0, 255, 0), thickness: 4);
                                }
                                else
                                {
                                    Dlib.DrawRectangle(img, rect, color: new RgbPixel(255, 255, 0), thickness: 4);
                                }
                            }
                        }

                        // export the modified image
                        Console.WriteLine(files[i]);
                        Dlib.SaveJpeg(img, "output_" + files[i]);
                    }

                    string header = "leftEyebrow,rightEyebrow,leftLip,rightLip,lipHeight,lipWidth,emotion\n";
                    System.IO.File.WriteAllText(@"feature_vectors.csv", header);
                    for (var i = 0; i < shapes.Count; i++)
                    {
                        var shape   = shapes[i];
                        var emotion = emotions[i];
                        using (System.IO.StreamWriter file = new System.IO.StreamWriter(@"feature_vectors.csv", true))
                        {
                            file.WriteLine(GetLeftEyebrow(shape) + "," + GetRightEyebrow(shape) + "," +
                                           GetLeftLip(shape) + "," + GetRightLip(shape) + "," + GetLipWidth(shape) + "," + GetLipHeight(shape) +
                                           "," + emotion);
                            if (use_mirror)
                            {
                                file.WriteLine(GetRightEyebrow(shape) + "," + GetLeftEyebrow(shape) + "," +
                                               GetRightLip(shape) + "," + GetLeftLip(shape) + "," + GetLipWidth(shape) + "," + GetLipHeight(shape) +
                                               "," + emotion);
                            }
                        }
                    }
                }
        }