コード例 #1
0
    /// <summary>
    /// Per-frame video capture processor
    /// </summary>
    private bool ProcessTexture(WebCamTexture input, ref Texture2D output)
    {
        if (!shouldProcessTexure)
        {
            if (faceController != null)
            {
                faceController.SetTrackingState(false);
            }
            return(false);
        }

        // detect everything we're interested in
        processor.ProcessTexture(input, TextureParameters);

        if (faceController != null)
        {
            int faceCount = processor.Faces.Count;
            if (faceCount > 0)
            {
                if (shouldSetTrackingAreaSize)
                {
                    shouldSetTrackingAreaSize = false;
                    faceController.SetTrackingAreaSize(Surface.GetComponent <RectTransform>().rect.width,
                                                       Surface.GetComponent <RectTransform>().rect.height);
                }
                faceController.SetTrackingState(true);
                faceController.UpdateFaceElementPositions(processor.Faces[0]);
            }
            else
            {
                shouldSetTrackingAreaSize = true;
                faceController.SetTrackingState(false);
            }
        }

        // mark detected objects
        if (showLines)
        {
            processor.MarkDetected();
        }

        /*
         * foreach (OpenCvSharp.Demo.DetectedFace face in processor.Faces)
         * {
         *  foreach (OpenCvSharp.Demo.DetectedObject sub in face.Elements)
         *  {
         *      if (sub.Marks != null)
         *      {
         *          UnityEngine.Debug.Log(sub.Name);
         *      }
         *  }
         * }*/

        // processor.Image now holds data we'd like to visualize
        if (showRenderTexture)
        {
            output = OpenCvSharp.Unity.MatToTexture(processor.Image, output);   // if output is valid texture it's buffer will be re-used, otherwise it will be re-created
        }
        return(true);
    }
コード例 #2
0
        protected override bool ProcessTexture(WebCamTexture input, ref Texture2D output)
        {
            // detect everything we're interested in
            processor.ProcessTexture(input, TextureParameters);

            // mark detected objects
            processor.MarkDetected();

            // processor.Image now holds data we'd like to visualize
            output = Unity.MatToTexture(processor.Image, output);   // if output is valid texture it's buffer will be re-used, otherwise it will be re-created

            return(true);
        }
コード例 #3
0
        /// <summary>
        /// Per-frame video capture processor
        /// </summary>
        protected override bool ProcessTexture(WebCamTexture input, ref Texture2D output)
        {
            // detect everything we're interested in
            processor.ProcessTexture(input, TextureParameters);

            //Get Face
            faceCenter = new Vector2((processor.faceX - 64) / 128f, (processor.faceY - 48) / 96f);
            if (faceCenter.x > 0.49f && faceCenter.y > 0.49f)
            {
                faceCenter = new Vector2(0, 0);
            }

            // mark detected objects
            processor.MarkDetected();

            // processor.Image now holds data we'd like to visualize
            output = Unity.MatToTexture(processor.Image, output);               // if output is valid texture it's buffer will be re-used, otherwise it will be re-created

            return(true);
        }
コード例 #4
0
        /// <summary>
        /// Per-frame video capture processor
        /// </summary>
        protected override bool ProcessTexture(WebCamTexture input, ref Texture2D output)
        {
            // detect faces
            processor.ProcessTexture(input, TextureParameters);

            // get regions of detected faces
            List <OpenCvSharp.Rect> faceBounds = processor.getFaceBounds();

            if (faceBounds.Count > 0)
            {
                // find the first found face
                OpenCvSharp.Rect foundFace = faceBounds[0];
                int diff_x = foundFace.TopLeft.X + (foundFace.BottomRight.X - foundFace.TopLeft.X) / 2;           // find middle of the head
                // move the object to the center of the face
                object_move.transform.position = new Vector3((diff_x) / -1.57f, (foundFace.TopLeft.Y) / -2.3f, 2);
            }

            // set the output to the texture the camera is aimed at
            canvas_show.GetComponent <MeshRenderer>().material.mainTexture = Unity.MatToTexture(processor.Image, output);
            return(true);
        }
コード例 #5
0
        /// <summary>
        /// Per-frame video capture processor
        /// </summary>
        protected override bool ProcessTexture(WebCamTexture input, ref Texture2D output)
        {
            // detect everything we're interested in
            processor.ProcessTexture(input, TextureParameters);

            // mark detected objects
            processor.MarkDetected(false);

            foreach (DetectedFace face in processor.Faces)
            {
                foreach (DetectedObject sub in face.Elements)
                {
                    if (sub.Marks != null)
                    {
                        UnityEngine.Debug.Log(sub.Name);
                    }
                }
            }

            // processor.Image now holds data we'd like to visualize
            output = Unity.MatToTexture(processor.Image, output);               // if output is valid texture it's buffer will be re-used, otherwise it will be re-created

            return(true);
        }
コード例 #6
0
        protected override bool ProcessTexture(WebCamTexture input, ref Texture2D output)
        {
            if (Application.internetReachability == NetworkReachability.NotReachable)
            {
                InternetConn = false;
            }
            else
            {
                InternetConn = true;
            }

            positions.text = "X = " + xPos.ToString() + "  Y = " + yPos.ToString() +
                             "\nxRat = " + xRatio.ToString() + "  yRat = " + yRatio.ToString() +
                             "\nScaleShape = " + scaleshape.ToString();

            processor.ProcessTexture(input, TextureParameters);

            if (!voiceCommand.GetComponent <SpeechDetection>().showingPhotos)
            {
                try
                {
                    float diffforscale = Mathf.Abs(processor.converted[19].X / xRatio - processor.converted[24].X / xRatio);
                    float mouthOppened = Mathf.Abs(processor.converted[62].Y / yRatio - processor.converted[66].Y / yRatio);
                    float x            = processor.converted[28].X / xRatio;
                    float y            = processor.converted[28].Y / yRatio;
                    if (voiceCommand.GetComponent <SpeechDetection>().takingPhoto)
                    {
                        if (!oneTimePaused)
                        {
                            oneTimePaused = true;
                        }
                        objects.transform.localScale = new Vector3(0.22f, 0.22f, 0.22f) * diffforscale;
                        objects.transform.position   = new Vector3(processor.converted[28].X / 25.2f, -processor.converted[28].Y / 22.6f, 20f) + new Vector3(-8.8f - diffforscale / 1.88f, 11.9f + diffforscale / 10, 0f);
                    }
                    else
                    {
                        if (oneTimePaused)
                        {
                            ispaused      = false;
                            oneTimePaused = false;
                        }
                        objects.transform.localScale = new Vector3(scaleshape, scaleshape, scaleshape) * diffforscale;
                        objects.transform.position   = new Vector3(x, -y, 20f) + new Vector3(xPos - diffforscale / 1.88f, yPos + diffforscale / 10, 0f);
                    }

                    if (!InternetConn)
                    {
                        if (mouthOppened > 1.4f && photoTaken == false)
                        {
                            StartCoroutine(voiceCommand.GetComponent <SpeechDetection>().TakeAPicture());
                            photoTaken = true;
                            Invoke("ResetMouth", 3f);
                        }
                    }

                    if (x > -1 && ispaused == false && x != saveX)
                    {
                        startSlideShow = false;
                        reset          = 0;
                        ispaused       = true;
                        reset          = 0;
                        voiceCommand.GetComponent <SpeechDetection>().StartMirror();
                    }
                    else if (x != saveX)
                    {
                        saveX = x;
                        reset = 0;
                    }
                    else
                    {
                        reset += Time.fixedDeltaTime;
                        if (reset > 2f && ispaused == true)
                        {
                            voiceCommand.GetComponent <SpeechDetection>().Idle();
                            ispaused = false;
                        }
                        if (reset >= 6f && startSlideShow == false)
                        {
                            voiceCommand.GetComponent <SpeechDetection>().StartSlideShow();
                            startSlideShow = true;
                        }
                    }
                }
                catch {}
            }
            // mark detected objects
            if (!voiceCommand.GetComponent <SpeechDetection>().takingPhoto)
            {
                processor.MarkDetected();
            }
            // processor.Image now holds data we'd like to visualize
            output = Unity.MatToTexture(processor.Image, output);               // if output is valid texture it's buffer will be re-used, otherwise it will be re-created
            return(true);
        }