/// <summary> /// Process the color 32. /// </summary> /// /// <remarks> /// This method is used to process raw data from Unity webcam frame textures. /// </remarks> /// /// <param name="pixels"> The pixels. </param> /// <param name="width"> The width. </param> /// <param name="height"> The height. </param> /// private void ProcessColor32(Color32[] pixels, Int32 width, Int32 height) { //RectTransform face = GameObject.Find("T_Image").GetComponent<RectTransform>(); // Convert raw ARGB data into a byte array. // byte[] raw = Color32ArrayToByteArray(pixels); // Disable Average and SpikeSupression. Needed only for single unrelated images // For video of the same person, adjst this to your need (or disable both lines for default // settings) . (eda.Settings as EmotionDetectionAssetSettings).Average = 1; (eda.Settings as EmotionDetectionAssetSettings).SuppressSpikes = false; // Load image into detection // // and // // Try to detect faces. This is the most time consuming part. // // Note there the formats supported are limited to 24 and 32 bits RGB at the moment. // if (eda.ProcessImage(raw, width, height, true)) { msg.text = String.Format("{0} Face(s detected.", eda.Faces.Count); // Process each detected face by detecting the 68 landmarks in each face // if (eda.ProcessFaces()) { // Process landmarks into emotions using fuzzy logic. // is_checked = true; if (eda.ProcessLandmarks()) { // Extract results. // Dictionary <string, double> emos = new Dictionary <string, double>(); foreach (String emo in eda.Emotions) { // Debug.LogFormat("{0} scores {1}.", emo, eda[0, emo]); // Extract (averaged) emotions of the first face only. // emos[emo] = eda[0, emo]; //if ((emos[emo] >= 0.86 && ca_app == false) || (emos[emo] >= 0.86 && ca_app == true && ca_button == true)) if ((ca_app == false) || (ca_app == true && ca_button == true)) { if (!is_waiting) { StartCoroutine("CheckTime"); } if (is_waiting) { if (emos[emo] >= 0.86) { if (emo == "Anger" || emo == "Fear" || emo == "Disgust" || emo == "Sad") { neg++; if (findmax < neg) { findmax = neg; findemo = "Negative"; } } if (emo == "Happy") { hap++; if (findmax < hap) { findmax = hap; findemo = emo; } } if (emo == "Neutral") { neu++; if (findmax < neu) { findmax = neu; findemo = emo; } } if (emo == "Surprise") { sur++; if (findmax < sur) { findmax = sur; findemo = emo; } } } is_start = false; } } if (is_print) { EmoionStationery(findemo); //GameObject.Find("Emo").GetComponent<Text>().text = emo; string path = @"Assets\Emo_Saving\";; string datefile = DateTime.Now.ToString("yyyy_MM_dd"); File.AppendAllText(path + datefile + "_emofile.txt", findemo + "\r\n"); is_print = false; } } //foreach (var temp in eda.Faces) //{ // Debug.Log(temp.Key); // face.offsetMax = new Vector2(temp.Key.Right, temp.Key.Bottom); // face.offsetMin = new Vector2(temp.Key.Left, temp.Key.Top); // if ((temp.Key.Bottom - temp.Key.Top) * (temp.Key.Right - temp.Key.Left)>80000) // { // webcam.Stop(); // } //} //Create the emotion strings. // emotions.text = String.Join("\r\n", emos.OrderBy(p => p.Key).Select(p => String.Format("{0}={1:0.00}", p.Key, p.Value)).ToArray()); //print("msg: " + emotions.text); //emotions.text = Log result in Console is_checked = false; } else { emotions.text = "No emotions detected"; } } else { emotions.text = "No landmarks detected"; } } else { msg.text = "No Face(s) detected"; //Debug.Log("얼굴을 인식해주세요"); } }
/// <summary> /// Process the color 32. /// </summary> /// /// <remarks> /// This method is used to process raw data from Unity webcam frame textures. /// </remarks> /// /// <param name="pixels"> The pixels. </param> /// <param name="width"> The width. </param> /// <param name="height"> The height. </param> private void ProcessColor32(Color32[] pixels, Int32 width, Int32 height) { // Convert raw ARGB data into a byte array. // byte[] raw = Color32ArrayToByteArray(pixels); // Disable Average and SpikeSupression. Needed only for single unrelated images // For video of the same person, adjst this to your need (or disable both lines for default // settings) . (eda.Settings as EmotionDetectionAssetSettings).Average = 1; (eda.Settings as EmotionDetectionAssetSettings).SuppressSpikes = false; // Load image into detection // // and // // Try to detect faces. This is the most time consuming part. // // Note there the formats supported are limited to 24 and 32 bits RGB at the moment. // if (eda.ProcessImage(raw, width, height, true)) { msg.text = String.Format("{0} Face(s detected.", eda.Faces.Count); // Process each detected face by detecting the 68 landmarks in each face // if (eda.ProcessFaces()) { // Process landmarks into emotions using fuzzy logic. // if (eda.ProcessLandmarks()) { // Extract results. // Dictionary <string, double> emos = new Dictionary <string, double>(); foreach (String emo in eda.Emotions) { // Debug.LogFormat("{0} scores {1}.", emo, eda[0, emo]); // Extract (averaged) emotions of the first face only. // emos[emo] = eda[0, emo]; } //Create the emotion strings. // emotions.text = String.Join("\r\n", emos.OrderBy(p => p.Key).Select(p => String.Format("{0}={1:0.00}", p.Key, p.Value)).ToArray()); } else { emotions.text = "No emotions detected"; } } else { emotions.text = "No landmarks detected"; } } else { msg.text = "No Face(s) detected"; } }