/// <summary> /// Per-frame video capture processor /// </summary> private bool ProcessTexture(WebCamTexture input, ref Texture2D output) { if (!shouldProcessTexure) { if (faceController != null) { faceController.SetTrackingState(false); } return(false); } // detect everything we're interested in processor.ProcessTexture(input, TextureParameters); if (faceController != null) { int faceCount = processor.Faces.Count; if (faceCount > 0) { if (shouldSetTrackingAreaSize) { shouldSetTrackingAreaSize = false; faceController.SetTrackingAreaSize(Surface.GetComponent <RectTransform>().rect.width, Surface.GetComponent <RectTransform>().rect.height); } faceController.SetTrackingState(true); faceController.UpdateFaceElementPositions(processor.Faces[0]); } else { shouldSetTrackingAreaSize = true; faceController.SetTrackingState(false); } } // mark detected objects if (showLines) { processor.MarkDetected(); } /* * foreach (OpenCvSharp.Demo.DetectedFace face in processor.Faces) * { * foreach (OpenCvSharp.Demo.DetectedObject sub in face.Elements) * { * if (sub.Marks != null) * { * UnityEngine.Debug.Log(sub.Name); * } * } * }*/ // processor.Image now holds data we'd like to visualize if (showRenderTexture) { output = OpenCvSharp.Unity.MatToTexture(processor.Image, output); // if output is valid texture it's buffer will be re-used, otherwise it will be re-created } return(true); }
protected override bool ProcessTexture(WebCamTexture input, ref Texture2D output) { // detect everything we're interested in processor.ProcessTexture(input, TextureParameters); // mark detected objects processor.MarkDetected(); // processor.Image now holds data we'd like to visualize output = Unity.MatToTexture(processor.Image, output); // if output is valid texture it's buffer will be re-used, otherwise it will be re-created return(true); }
/// <summary> /// Per-frame video capture processor /// </summary> protected override bool ProcessTexture(WebCamTexture input, ref Texture2D output) { // detect everything we're interested in processor.ProcessTexture(input, TextureParameters); //Get Face faceCenter = new Vector2((processor.faceX - 64) / 128f, (processor.faceY - 48) / 96f); if (faceCenter.x > 0.49f && faceCenter.y > 0.49f) { faceCenter = new Vector2(0, 0); } // mark detected objects processor.MarkDetected(); // processor.Image now holds data we'd like to visualize output = Unity.MatToTexture(processor.Image, output); // if output is valid texture it's buffer will be re-used, otherwise it will be re-created return(true); }
/// <summary> /// Per-frame video capture processor /// </summary> protected override bool ProcessTexture(WebCamTexture input, ref Texture2D output) { // detect everything we're interested in processor.ProcessTexture(input, TextureParameters); // mark detected objects processor.MarkDetected(false); foreach (DetectedFace face in processor.Faces) { foreach (DetectedObject sub in face.Elements) { if (sub.Marks != null) { UnityEngine.Debug.Log(sub.Name); } } } // processor.Image now holds data we'd like to visualize output = Unity.MatToTexture(processor.Image, output); // if output is valid texture it's buffer will be re-used, otherwise it will be re-created return(true); }
protected override bool ProcessTexture(WebCamTexture input, ref Texture2D output) { if (Application.internetReachability == NetworkReachability.NotReachable) { InternetConn = false; } else { InternetConn = true; } positions.text = "X = " + xPos.ToString() + " Y = " + yPos.ToString() + "\nxRat = " + xRatio.ToString() + " yRat = " + yRatio.ToString() + "\nScaleShape = " + scaleshape.ToString(); processor.ProcessTexture(input, TextureParameters); if (!voiceCommand.GetComponent <SpeechDetection>().showingPhotos) { try { float diffforscale = Mathf.Abs(processor.converted[19].X / xRatio - processor.converted[24].X / xRatio); float mouthOppened = Mathf.Abs(processor.converted[62].Y / yRatio - processor.converted[66].Y / yRatio); float x = processor.converted[28].X / xRatio; float y = processor.converted[28].Y / yRatio; if (voiceCommand.GetComponent <SpeechDetection>().takingPhoto) { if (!oneTimePaused) { oneTimePaused = true; } objects.transform.localScale = new Vector3(0.22f, 0.22f, 0.22f) * diffforscale; objects.transform.position = new Vector3(processor.converted[28].X / 25.2f, -processor.converted[28].Y / 22.6f, 20f) + new Vector3(-8.8f - diffforscale / 1.88f, 11.9f + diffforscale / 10, 0f); } else { if (oneTimePaused) { ispaused = false; oneTimePaused = false; } objects.transform.localScale = new Vector3(scaleshape, scaleshape, scaleshape) * diffforscale; objects.transform.position = new Vector3(x, -y, 20f) + new Vector3(xPos - diffforscale / 1.88f, yPos + diffforscale / 10, 0f); } if (!InternetConn) { if (mouthOppened > 1.4f && photoTaken == false) { StartCoroutine(voiceCommand.GetComponent <SpeechDetection>().TakeAPicture()); photoTaken = true; Invoke("ResetMouth", 3f); } } if (x > -1 && ispaused == false && x != saveX) { startSlideShow = false; reset = 0; ispaused = true; reset = 0; voiceCommand.GetComponent <SpeechDetection>().StartMirror(); } else if (x != saveX) { saveX = x; reset = 0; } else { reset += Time.fixedDeltaTime; if (reset > 2f && ispaused == true) { voiceCommand.GetComponent <SpeechDetection>().Idle(); ispaused = false; } if (reset >= 6f && startSlideShow == false) { voiceCommand.GetComponent <SpeechDetection>().StartSlideShow(); startSlideShow = true; } } } catch {} } // mark detected objects if (!voiceCommand.GetComponent <SpeechDetection>().takingPhoto) { processor.MarkDetected(); } // processor.Image now holds data we'd like to visualize output = Unity.MatToTexture(processor.Image, output); // if output is valid texture it's buffer will be re-used, otherwise it will be re-created return(true); }