/// <summary>
        /// Gets the mat of the current frame.
        /// The Mat object's type is 'CV_8UC4' or 'CV_8UC3' or 'CV_8UC1' (ColorFormat is determined by the outputColorFormat setting).
        /// Please do not dispose of the returned mat as it will be reused.
        /// </summary>
        /// <returns>The mat of the current frame.</returns>
        public override Mat GetMat()
        {
            if (!hasInitDone || !videoCapture.IsStreaming || latestImageBytes == null)
            {
                return((rotatedFrameMat != null) ? rotatedFrameMat : frameMat);
            }

            if (baseColorFormat == outputColorFormat)
            {
                MatUtils.copyToMat <byte>(latestImageBytes, frameMat);
            }
            else
            {
                MatUtils.copyToMat <byte>(latestImageBytes, baseMat);
                Imgproc.cvtColor(baseMat, frameMat, ColorConversionCodes(baseColorFormat, outputColorFormat));
            }

            if (rotatedFrameMat != null)
            {
                Core.rotate(frameMat, rotatedFrameMat, Core.ROTATE_90_CLOCKWISE);
                FlipMat(rotatedFrameMat, _flipVertical, _flipHorizontal);

                return(rotatedFrameMat);
            }
            else
            {
                FlipMat(frameMat, _flipVertical, _flipHorizontal);

                return(frameMat);
            }
        }
コード例 #2
0
        public void read(object root_json)
        {
            IDictionary pmodels_json = (IDictionary)root_json;

            IDictionary reference_json = (IDictionary)pmodels_json ["reference"];

            reference = new Mat((int)(long)reference_json ["rows"], (int)(long)reference_json ["cols"], CvType.CV_32F);
//              Debug.Log ("reference " + reference.ToString ());

            IList data_json = (IList)reference_json ["data"];

            float[] data  = new float[reference.rows() * reference.cols()];
            int     count = data_json.Count;

            for (int i = 0; i < count; i++)
            {
                data [i] = (float)(double)data_json [i];
            }
            MatUtils.copyToMat(data, reference);
//              Debug.Log ("reference dump " + reference.dump ());


            int n = (int)(long)pmodels_json ["n_patches"];

            patches = new List <PatchModel> (n);

            for (int i = 0; i < n; i++)
            {
                PatchModel patchModel = new PatchModel();
                patchModel.read(pmodels_json ["patch " + i]);

                patches.Add(patchModel);
            }
        }
コード例 #3
0
        public void read(object root_json)
        {
            IDictionary detector_json = (IDictionary)root_json;

            detector_fname = (string)detector_json ["fname"];
//              Debug.Log ("detector_fname " + detector_fname);


            detector_offset = new Vector3((float)(double)detector_json ["x offset"], (float)(double)detector_json ["y offset"], (float)(double)detector_json ["z offset"]);
//              Debug.Log ("detector_offset " + detector_offset.ToString ());


            IDictionary reference_json = (IDictionary)detector_json ["reference"];


            reference = new Mat((int)(long)reference_json ["rows"], (int)(long)reference_json ["cols"], CvType.CV_32F);
//              Debug.Log ("reference " + reference.ToString ());

            IList data_json = (IList)reference_json ["data"];

            float[] data = new float[reference.rows() * reference.cols()];
            for (int i = 0; i < data_json.Count; i++)
            {
                data [i] = (float)(double)data_json [i];
            }
            MatUtils.copyToMat(data, reference);
//              Debug.Log ("reference dump " + reference.dump ());

            detector = new CascadeClassifier(Utils.getFilePath(detector_fname));
            //              detector = new CascadeClassifier (System.IO.Path.Combine (Application.streamingAssetsPath, detector_fname));
        }
コード例 #4
0
        private Task OnRGBCameraUpdate(ZO.Sensors.ZORGBCamera camera, string cameraId, int width, int height, byte[] rgb24)
        {
            Mat rgbMat = new Mat(height, width, CvType.CV_8UC3);

            MatUtils.copyToMat <byte>(rgb24, rgbMat);
            Core.flip(rgbMat, rgbMat, 1);  //IMPORTANT OR DETECTION WILL NOT WORK!!!!

            List <ZOArucoTrackerDetection> detectedMarkers = DetectMarkers(rgbMat);

            if (_debug == true)
            {
                if (_rgbMat == null)
                {
                    _rgbMat = new Mat(height, width, CvType.CV_8UC3);
                }
                rgbMat.copyTo(_rgbMat);
            }

            if (OnPublishDelegate != null)
            {
                OnPublishDelegate(this, new List <ZOArucoTrackerDetection>(detectedMarkers));
            }

            return(Task.CompletedTask);
        }
コード例 #5
0
        protected override void UpdateTexture()
        {
            // Get the matrix
            switch (matCaptureMethod)
            {
            case MatCaptureMethod.GetRawTextureData_ByteArray:
                MatUtils.copyToMat(cameraSource.preview.GetRawTextureData(), frameMatrix);
                Core.flip(frameMatrix, frameMatrix, 0);
                break;

            case MatCaptureMethod.GetRawTextureData_NativeArray:

#if OPENCV_USE_UNSAFE_CODE && UNITY_2018_2_OR_NEWER
                // non-memory allocation.
                unsafe
                {
                    var ptr = (IntPtr)NativeArrayUnsafeUtility.GetUnsafeReadOnlyPtr(cameraSource.preview.GetRawTextureData <byte>());
                    MatUtils.copyToMat(ptr, frameMatrix);
                }
                Core.flip(frameMatrix, frameMatrix, 0);
#else
                MatUtils.copyToMat(cameraSource.preview.GetRawTextureData(), frameMatrix);
                Core.flip(frameMatrix, frameMatrix, 0);
                Imgproc.putText(frameMatrix, "NativeArray<T> GetRawTextureData() method can be used from Unity 2018.2 or later.", new Point(5, frameMatrix.rows() - 10), Imgproc.FONT_HERSHEY_SIMPLEX, 0.5, new Scalar(255, 255, 255, 255), 1, Imgproc.LINE_AA, false);
#endif
                break;
            }

            ProcessImage(frameMatrix, grayMatrix, imageProcessingType);

            // Convert to Texture2D
            Utils.fastMatToTexture2D(frameMatrix, texture);
        }
コード例 #6
0
        void clamp(float c)
        {
            float[] p_float = new float[p.total()];
            MatUtils.copyFromMat <float>(p, p_float);
            int p_cols = p.cols();

            float[] e_float = new float[e.total()];
            MatUtils.copyFromMat <float>(e, e_float);
            int e_cols = e.cols();

            double scale = p_float[0];
            int    rows  = e.rows();

            for (int i = 0; i < rows; i++)
            {
                if (e_float[i * e_cols] < 0)
                {
                    continue;
                }
                float v = c * (float)Math.Sqrt(e_float[i * e_cols]);
                if (Math.Abs(p_float[i * p_cols] / scale) > v)
                {
                    if (p_float[i * p_cols] > 0)
                    {
                        p_float[i * p_cols] = (float)(v * scale);
                    }
                    else
                    {
                        p_float[i * p_cols] = (float)(-v * scale);
                    }
                }
            }
            MatUtils.copyToMat(p_float, p);
        }
コード例 #7
0
        // Pastes faces on original frame.
        private void AlphaBlend_pixel(Mat fg, Mat bg, Mat alpha, Mat dst)
        {
            byte[] fg_byte = new byte[fg.total() * fg.channels()];
            MatUtils.copyFromMat <byte>(fg, fg_byte);
            byte[] bg_byte = new byte[bg.total() * bg.channels()];
            MatUtils.copyFromMat <byte>(bg, bg_byte);
            byte[] alpha_byte = new byte[alpha.total() * alpha.channels()];
            MatUtils.copyFromMat <byte>(alpha, alpha_byte);

            int pixel_i  = 0;
            int channels = (int)bg.channels();
            int total    = (int)bg.total();

            for (int i = 0; i < total; i++)
            {
                if (alpha_byte[i] == 0)
                {
                }
                else if (alpha_byte[i] == 255)
                {
                    bg_byte[pixel_i]     = fg_byte[pixel_i];
                    bg_byte[pixel_i + 1] = fg_byte[pixel_i + 1];
                    bg_byte[pixel_i + 2] = fg_byte[pixel_i + 2];
                }
                else
                {
                    bg_byte[pixel_i]     = (byte)(((255 - alpha_byte[i]) * bg_byte[pixel_i] + alpha_byte[i] * fg_byte[pixel_i]) >> 8);
                    bg_byte[pixel_i + 1] = (byte)(((255 - alpha_byte[i]) * bg_byte[pixel_i + 1] + alpha_byte[i] * fg_byte[pixel_i + 1]) >> 8);
                    bg_byte[pixel_i + 2] = (byte)(((255 - alpha_byte[i]) * bg_byte[pixel_i + 2] + alpha_byte[i] * fg_byte[pixel_i + 2]) >> 8);
                }
                pixel_i += channels;
            }

            MatUtils.copyToMat(bg_byte, dst);
        }
コード例 #8
0
        Mat calc_simil(Mat pts)
        {
            float[] pts_float = new float[pts.total()];
            MatUtils.copyFromMat <float>(pts, pts_float);
            int pts_cols = pts.cols();

            //compute translation
            int   n = pts.rows() / 2;
            float mx = 0, my = 0;

            for (int i = 0; i < n; i++)
            {
                mx += pts_float[(2 * pts_cols) * i];
                my += pts_float[((2 * pts_cols) * i) + 1];
            }
            using (Mat p = new Mat(2 * n, 1, CvType.CV_32F))
            {
                float[] p_float = new float[p.total()];
                MatUtils.copyFromMat <float>(p, p_float);
                int p_cols = p.cols();

                mx /= n;
                my /= n;
                for (int i = 0; i < n; i++)
                {
                    p_float[(2 * p_cols) * i]       = pts_float[(2 * pts_cols) * i] - mx;
                    p_float[((2 * p_cols) * i) + 1] = pts_float[((2 * pts_cols) * i) + 1] - my;
                }
                MatUtils.copyToMat(p_float, p);

                //compute rotation and scale
                float[] reference_float = new float[reference.total()];
                MatUtils.copyFromMat <float>(reference, reference_float);
                int reference_cols = reference.cols();

                float a = 0, b = 0, c = 0;
                for (int i = 0; i < n; i++)
                {
                    a += reference_float[(2 * reference_cols) * i] * reference_float[(2 * reference_cols) * i] +
                         reference_float[((2 * reference_cols) * i) + 1] * reference_float[((2 * reference_cols) * i) + 1];
                    b += reference_float[(2 * reference_cols) * i] * p_float[(2 * p_cols) * i] +
                         reference_float[((2 * reference_cols) * i) + 1] * p_float[((2 * p_cols) * i) + 1];
                    c += reference_float[(2 * reference_cols) * i] * p_float[((2 * p_cols) * i) + 1] -
                         reference_float[((2 * reference_cols) * i) + 1] * p_float[(2 * p_cols) * i];
                }
                b /= a;
                c /= a;
                float scale = (float)Math.Sqrt(b * b + c * c), theta = (float)Math.Atan2(c, b);
                float sc = scale * (float)Math.Cos(theta), ss = scale * (float)Math.Sin(theta);

                Mat returnMat = new Mat(2, 3, CvType.CV_32F);
                returnMat.put(0, 0, sc, -ss, mx, ss, sc, my);

                return(returnMat);
            }
        }
コード例 #9
0
        public void CaptureFrame(Mat matrix)
        {
            if (uprightBuffer == null)
            {
                return;
            }

            MatUtils.copyToMat(uprightBuffer, matrix);
            Core.flip(matrix, matrix, 0);
        }
コード例 #10
0
        public void read(object root_json)
        {
            IDictionary smodel_json = (IDictionary)root_json;

            IDictionary V_json = (IDictionary)smodel_json ["V"];

            V = new Mat((int)(long)V_json ["rows"], (int)(long)V_json ["cols"], CvType.CV_32F);
//              Debug.Log ("V " + V.ToString ());

            IList V_data_json = (IList)V_json ["data"];

            float[] V_data = new float[V.rows() * V.cols()];
            for (int i = 0; i < V_data_json.Count; i++)
            {
                V_data [i] = (float)(double)V_data_json [i];
            }
            MatUtils.copyToMat(V_data, V);
//              Debug.Log ("V dump " + V.dump ());



            IDictionary e_json = (IDictionary)smodel_json ["e"];

            e = new Mat((int)(long)e_json ["rows"], (int)(long)e_json ["cols"], CvType.CV_32F);
//              Debug.Log ("e " + e.ToString ());

            IList e_data_json = (IList)e_json ["data"];

            float[] e_data = new float[e.rows() * e.cols()];
            for (int i = 0; i < e_data_json.Count; i++)
            {
                e_data [i] = (float)(double)e_data_json [i];
            }
            MatUtils.copyToMat(e_data, e);
//              Debug.Log ("e dump " + e.dump ());



            IDictionary C_json = (IDictionary)smodel_json ["C"];

            C = new Mat((int)(long)C_json ["rows"], (int)(long)C_json ["cols"], CvType.CV_32S);
//              Debug.Log ("C " + C.ToString ());

            IList C_data_json = (IList)C_json ["data"];

            int[] C_data = new int[C.rows() * C.cols()];
            for (int i = 0; i < C_data_json.Count; i++)
            {
                C_data [i] = (int)(long)C_data_json [i];
            }
            MatUtils.copyToMat(C_data, C);
//              Debug.Log ("C dump " + C.dump ());

            p = Mat.zeros(e.rows(), 1, CvType.CV_32F);
        }
        void DoProcess()
        {
            if (!(mat.Value is OpenCVForUnityPlayMakerActions.Mat))
            {
                LogError("mat is not initialized. Add Action \"newMat\".");
                return;
            }
            OpenCVForUnity.CoreModule.Mat wrapped_mat = OpenCVForUnityPlayMakerActionsUtils.GetWrappedObject <OpenCVForUnityPlayMakerActions.Mat, OpenCVForUnity.CoreModule.Mat>(mat);

            MatUtils.copyToMat <int>(array.intValues, wrapped_mat);
        }
コード例 #12
0
        public Point[] calc_peaks(Mat im, Point[] points, Size ssize)
        {
            int n = points.Length;

            using (Mat pt = (new MatOfPoint2f(points)).reshape(1, 2 * n)) using (Mat S = calc_simil(pt)) using (Mat Si = inv_simil(S))
                    {
                        float[] pt_float = new float[pt.total()];
                        MatUtils.copyFromMat <float>(pt, pt_float);
                        int pt_cols = pt.cols();

                        float[] S_float = new float[S.total()];
                        MatUtils.copyFromMat <float>(S, S_float);
                        int S_cols = S.cols();

                        float[] A_float = new float[2 * 3];

                        Point[] pts = apply_simil(Si, points);

                        for (int i = 0; i < n; i++)
                        {
                            Size wsize = new Size(ssize.width + patches[i].patch_size().width, ssize.height + patches[i].patch_size().height);
                            using (Mat A = new Mat(2, 3, CvType.CV_32F))
                            {
                                MatUtils.copyFromMat <float>(A, A_float);
                                int A_cols = A.cols();

                                A_float[0]                = S_float[0];
                                A_float[1]                = S_float[1];
                                A_float[1 * A_cols]       = S_float[1 * S_cols];
                                A_float[(1 * A_cols) + 1] = S_float[(1 * S_cols) + 1];
                                A_float[2]                = (float)(pt_float[(2 * pt_cols) * i] -
                                                                    (A_float[0] * (wsize.width - 1) / 2 + A_float[1] * (wsize.height - 1) / 2));
                                A_float[(1 * A_cols) + 2] = (float)(pt_float[((2 * pt_cols) * i) + 1] -
                                                                    (A_float[1 * A_cols] * (wsize.width - 1) / 2 + A_float[(1 * A_cols) + 1] * (wsize.height - 1) / 2));

                                MatUtils.copyToMat(A_float, A);

                                using (Mat I = new Mat())
                                {
                                    Imgproc.warpAffine(im, I, A, wsize, Imgproc.INTER_LINEAR + Imgproc.WARP_INVERSE_MAP);
                                    using (Mat R = patches[i].calc_response(I, false))
                                    {
                                        Core.MinMaxLocResult minMaxLocResult = Core.minMaxLoc(R);
                                        pts[i].x = pts[i].x + minMaxLocResult.maxLoc.x - 0.5 * ssize.width;
                                        pts[i].y = pts[i].y + minMaxLocResult.maxLoc.y - 0.5 * ssize.height;
                                    }
                                }
                            }
                        }
                        return(apply_simil(S, pts));
                    }
        }
コード例 #13
0
        void DoProcess()
        {
            System.Int64[] wrapped_array = OpenCVForUnityPlayMakerActionsUtils.GetWrappedObject <OpenCVForUnityPlayMakerActions.LongArray, System.Int64[]>(array);

            if (!(mat.Value is OpenCVForUnityPlayMakerActions.Mat))
            {
                LogError("mat is not initialized. Add Action \"newMat\".");
                return;
            }
            OpenCVForUnity.CoreModule.Mat wrapped_mat = OpenCVForUnityPlayMakerActionsUtils.GetWrappedObject <OpenCVForUnityPlayMakerActions.Mat, OpenCVForUnity.CoreModule.Mat>(mat);

            MatUtils.copyToMat <long>(wrapped_array, wrapped_mat);
        }
コード例 #14
0
        /// <summary>
        /// Handles the event of a new image getting captured.
        /// </summary>
        /// <param name="imageData">The raw data of the image.</param>
        private void OnCaptureRawImageComplete(byte[] imageData)
        {
            lock (_cameraLockObject)
            {
                _isCapturing = false;
            }
            //// Initialize to 8x8 texture so there is no discrepency
            //// between uninitalized captures and error texture
            //Texture2D texture = new Texture2D(8, 8);
            //bool status = texture.LoadImage(imageData);

            //if (status && (texture.width != 8 && texture.height != 8))
            //{
            //    _previewObject.SetActive(true);
            //    Renderer renderer = _previewObject.GetComponent<Renderer>();
            //    if (renderer != null)
            //    {
            //        renderer.material.mainTexture = texture;
            //    }
            //}

            Mat buff = new Mat(1, imageData.Length, CvType.CV_8UC1);

            MatUtils.copyToMat <byte>(imageData, buff);

            Mat imgMat = Imgcodecs.imdecode(buff, Imgcodecs.IMREAD_COLOR);

            //            Debug.Log ("imgMat.ToString() " + imgMat.ToString ());
            buff.Dispose();

            Run(imgMat);

            Imgproc.cvtColor(imgMat, imgMat, Imgproc.COLOR_BGR2RGB);

            Texture2D outputTexture = new Texture2D(imgMat.width(), imgMat.height(), TextureFormat.RGBA32, false);

            Utils.matToTexture2D(imgMat, outputTexture);

            imgMat.Dispose();

            //if (status && (texture.width != 8 && texture.height != 8))
            //{
            _previewObject.SetActive(true);
            Renderer renderer = _previewObject.GetComponent <Renderer>();

            if (renderer != null)
            {
                renderer.material.mainTexture = outputTexture;
            }
            //}
        }
        void Update()
        {
            if (reader != null)
            {
                ColorFrame frame = reader.AcquireLatestFrame();

                if (frame != null)
                {
                    frame.CopyConvertedFrameDataToArray(data, ColorImageFormat.Rgba);

                    frame.Dispose();
                    frame = null;
                }
            }
            else
            {
                return;
            }

            MatUtils.copyToMat(data, rgbaMat);


            if (filterType == FilterTypePreset.NONE)
            {
                Imgproc.putText(rgbaMat, "Filter Type: NONE " + texture.width + "x" + texture.height, new Point(5, texture.height - 5), Imgproc.FONT_HERSHEY_PLAIN, 4.0, new Scalar(255, 0, 0, 255), 3);
            }
            else if (filterType == FilterTypePreset.SEPIA)
            {
                Core.transform(rgbaMat, rgbaMat, sepiaKernel);

                Imgproc.putText(rgbaMat, "Filter Type: SEPIA " + texture.width + "x" + texture.height, new Point(5, texture.height - 5), Imgproc.FONT_HERSHEY_PLAIN, 4.0, new Scalar(255, 0, 0, 255), 3);
            }
            else if (filterType == FilterTypePreset.PIXELIZE)
            {
                Imgproc.resize(rgbaMat, pixelizeIntermediateMat, pixelizeSize0, 0.1, 0.1, Imgproc.INTER_NEAREST);
                Imgproc.resize(pixelizeIntermediateMat, rgbaMat, rgbaMat.size(), 0.0, 0.0, Imgproc.INTER_NEAREST);

                Imgproc.putText(rgbaMat, "Filter Type: PIXELIZE " + texture.width + "x" + texture.height, new Point(5, texture.height - 5), Imgproc.FONT_HERSHEY_PLAIN, 4.0, new Scalar(255, 0, 0, 255), 3);
            }
            else if (filterType == FilterTypePreset.COMIC)
            {
                comicFilter.Process(rgbaMat, rgbaMat);

                Imgproc.putText(rgbaMat, "Filter Type: COMIC " + texture.width + "x" + texture.height, new Point(5, texture.height - 5), Imgproc.FONT_HERSHEY_PLAIN, 4.0, new Scalar(255, 0, 0, 255), 3);
            }

            Utils.matToTexture2D(rgbaMat, texture);
        }
コード例 #16
0
        public void read(object root_json)
        {
            IDictionary pmodel_json = (IDictionary)root_json;

            IDictionary P_json = (IDictionary)pmodel_json["P"];

            P = new Mat((int)(long)P_json["rows"], (int)(long)P_json["cols"], CvType.CV_32F);
            //Debug.Log ("P " + P.ToString ());

            IList P_data_json = (IList)P_json["data"];

            float[] P_data = new float[P.rows() * P.cols()];
            for (int i = 0; i < P_data_json.Count; i++)
            {
                P_data[i] = (float)(double)P_data_json[i];
            }
            MatUtils.copyToMat(P_data, P);
        }
コード例 #17
0
        /// <summary>
        /// Processes the received frame, converts the image to grayscale if requested, and invokes the next photo request.
        /// </summary>
        private void OnCapturedPhotoToMemory(PhotoCapture.PhotoCaptureResult result, PhotoCaptureFrame photoCaptureFrame)
        {
            if (_stopped?.Task != null)
            {
                return;
            }
            if (result.resultType == PhotoCapture.CaptureResultType.UnknownError)
            {
                return;
            }
            if (photoCaptureFrame == null)
            {
                return;
            }
            Size size = new Size(FrameWidth, (double)FrameHeight * 3 / 2); // Luminance (grayscale) of the NV12 format requires image height, chrominance is stored in half resolution. <see href="https://docs.microsoft.com/en-us/windows/win32/medfound/recommended-8-bit-yuv-formats-for-video-rendering#nv12"/>.

            _image = new Mat(size, CvType.CV_8UC1);
            List <byte> imageBuffer = new List <byte>();

            photoCaptureFrame?.CopyRawImageDataIntoBuffer(imageBuffer);
            MatUtils.copyToMat(imageBuffer.ToArray(), _image);

            if (_format == ColorFormat.Grayscale)
            {
                Imgproc.cvtColor(_image, _image, Imgproc.COLOR_YUV2GRAY_NV12);
            }

            Matrix4x4 cameraToWorldMatrix = Matrix4x4.identity;

            photoCaptureFrame?.TryGetCameraToWorldMatrix(out cameraToWorldMatrix);
            CameraExtrinsic extrinsic = new CameraExtrinsic(cameraToWorldMatrix);

            Matrix4x4 projectionMatrix = Matrix4x4.identity;

            photoCaptureFrame?.TryGetProjectionMatrix(out projectionMatrix);
            CameraIntrinsic intrinsic = new CameraIntrinsic(projectionMatrix);

            CameraFrame           cameraFrame = new CameraFrame(_image, intrinsic, extrinsic, FrameWidth, FrameHeight, FrameCount++, _format);
            FrameArrivedEventArgs args        = new FrameArrivedEventArgs(cameraFrame);

            FrameArrived?.Invoke(this, args);

            _photoCaptureObject?.TakePhotoAsync(OnCapturedPhotoToMemory);
        }
コード例 #18
0
        public void CaptureFrame(Mat matrix)
        {
            if (preview == null)
            {
                return;
            }

#if OPENCV_USE_UNSAFE_CODE && UNITY_2018_2_OR_NEWER
            unsafe
            {
                var ptr = (IntPtr)NativeArrayUnsafeUtility.GetUnsafeReadOnlyPtr(preview.GetRawTextureData <byte>());
                MatUtils.copyToMat(ptr, matrix);
            }
            Core.flip(matrix, matrix, 0);
#else
            MatUtils.copyToMat(preview.GetRawTextureData(), matrix);
            Core.flip(matrix, matrix, 0);
#endif
        }
コード例 #19
0
        /// <summary>
        /// Invoked on each received video frame. Extracts the image according to the <see cref="ColorFormat"/> and invokes the <see cref="FrameArrived"/> event containing a <see cref="CameraFrame"/>.
        /// </summary>
        private unsafe void OnFrameArrived(MediaFrameReader sender, MediaFrameArrivedEventArgs args)
        {
            if (sender == null)
            {
                throw new ArgumentNullException(nameof(sender));
            }
            if (args == null)
            {
                throw new ArgumentNullException(nameof(args));
            }
            using (MediaFrameReference frame = sender.TryAcquireLatestFrame())
            {
                if (frame == null)
                {
                    return;
                }
                SoftwareBitmap originalSoftwareBitmap = frame.VideoMediaFrame?.SoftwareBitmap;
                if (originalSoftwareBitmap == null)
                {
                    _logger.LogWarning("Received frame without image.");
                    return;
                }

                CameraExtrinsic extrinsic = new CameraExtrinsic(frame.CoordinateSystem, WorldOrigin);
                CameraIntrinsic intrinsic = new CameraIntrinsic(frame.VideoMediaFrame.CameraIntrinsics);

                using (var input = originalSoftwareBitmap.LockBuffer(BitmapBufferAccessMode.Read))
                    using (var inputReference = input.CreateReference())
                    {
                        byte *inputBytes;
                        uint  inputCapacity;
                        ((IMemoryBufferByteAccess)inputReference).GetBuffer(out inputBytes, out inputCapacity);
                        MatUtils.copyToMat((IntPtr)inputBytes, _bitmap);
                        int thisFrameCount = Interlocked.Increment(ref FrameCount);

                        // TODO: Check out of using block
                        CameraFrame           cameraFrame = new CameraFrame(_bitmap, intrinsic, extrinsic, FrameWidth, FrameHeight, (uint)thisFrameCount, _format);
                        FrameArrivedEventArgs eventArgs   = new FrameArrivedEventArgs(cameraFrame);
                        FrameArrived?.Invoke(this, eventArgs);
                    }
                originalSoftwareBitmap?.Dispose();
            }
        }
コード例 #20
0
        /// <summary>
        /// Invoked if the NV12 to RGB conversion is complete and the data is ready to be read to the CPU.
        /// </summary>
        private void OnCompleteReadback(AsyncGPUReadbackRequest request)
        {
            if (request.hasError)
            {
                Debug.LogError("GPU readback error");
                return;
            }

            MatUtils.copyToMat(request.GetData <uint>(), _rgb);
            Core.flip(_rgb, _rgb, 0); // image is flipped on x-axis
            CameraFrame           newFrame = new CameraFrame(_rgb, CurrentCameraFrame.Intrinsic, CurrentCameraFrame.Extrinsic, CurrentCameraFrame.Width, CurrentCameraFrame.Height, CurrentCameraFrame.FrameCount, Format);
            FrameArrivedEventArgs args     = new FrameArrivedEventArgs(newFrame);

            _frame = newFrame;
            FrameArrived?.Invoke(this, args);
            FPSUtils.VideoTick();
            NewFrameAvailable = true;
            IsProcessingFrame = false;
        }
コード例 #21
0
        public void Update()
        {
            if (!_available)
            {
                return;
            }
            if (!_cameraTexture.didUpdateThisFrame)
            {
                return;
            }
            Color32[] pixels32 = _cameraTexture.GetPixels32();
            Utils.setDebugMode(true);
            Mat argbMat = new Mat(_targetVideoHeight, _targetVideoWidth, CvType.CV_8UC4);

            MatUtils.copyToMat(pixels32, argbMat);
            if (argbMat.empty())
            {
                return;
            }
            // workaround obs cam: drop frame if grey / empty.
            double[] values = argbMat.get(0, 0);
            if (values[0] == 128 && values[1] == 129 && values[2] == 127 && values[3] == 255)
            {
                return;
            }
            Mat yuvMat = new Mat(_targetVideoHeight * 2 / 3, _targetVideoWidth, CvType.CV_8UC1);

            Imgproc.cvtColor(argbMat, yuvMat, Imgproc.COLOR_BGRA2YUV_I420);
            Mat submat = yuvMat.submat(0, _targetVideoHeight, 0, _targetVideoWidth);

            Core.flip(submat, submat, 0);
            Utils.setDebugMode(false);
            CameraIntrinsic       intrinsic = new CameraIntrinsic();
            CameraExtrinsic       extrinsic = new CameraExtrinsic(Matrix4x4.identity);
            CameraFrame           frame     = new CameraFrame(submat, intrinsic, extrinsic, _targetVideoWidth, _targetVideoHeight, frameCount++, ColorFormat.Unknown);
            FrameArrivedEventArgs args      = new FrameArrivedEventArgs(frame);

            FrameArrived?.Invoke(this, args);
        }
コード例 #22
0
        public void read(object root_json)
        {
            IDictionary detector_json = (IDictionary)root_json;

            detector_fname = (string)detector_json["fname"];

            detector_offset = new Vector3((float)(double)detector_json["x offset"], (float)(double)detector_json["y offset"], (float)(double)detector_json["z offset"]);

            IDictionary reference_json = (IDictionary)detector_json["reference"];

            reference = new Mat((int)(long)reference_json["rows"], (int)(long)reference_json["cols"], CvType.CV_32F);

            IList data_json = (IList)reference_json["data"];

            float[] data = new float[reference.rows() * reference.cols()];
            for (int i = 0; i < data_json.Count; i++)
            {
                data[i] = (float)(double)data_json[i];
            }
            MatUtils.copyToMat(data, reference);

            detector = new CascadeClassifier(Utils.getFilePath(detector_fname));
        }
コード例 #23
0
        public ComicFilter(int blackThresh = 60, int grayThresh = 120, int thickness = 5, bool useNoiseFilter = true)
        {
            this.blackThresh    = blackThresh;
            this.drawMainLine   = (thickness != 0);
            this.useNoiseFilter = useNoiseFilter;

            grayLUT = new Mat(1, 256, CvType.CV_8UC1);
            byte[] lutArray = new byte[256];
            for (int i = 0; i < lutArray.Length; i++)
            {
                if (blackThresh <= i && i < grayThresh)
                {
                    lutArray[i] = 255;
                }
            }
            MatUtils.copyToMat(lutArray, grayLUT);

            if (drawMainLine)
            {
                kernel_dilate = new Mat(thickness, thickness, CvType.CV_8UC1, new Scalar(1));

                int erode = (thickness >= 5) ? 2 : 1;
                kernel_erode = new Mat(erode, erode, CvType.CV_8UC1, new Scalar(1));

                int blur = (thickness >= 4) ? thickness - 1 : 3;
                blurSize = new Size(blur, blur);

                contrastAdjustmentsLUT = new Mat(1, 256, CvType.CV_8UC1);
                byte[] contrastAdjustmentsLUTArray = new byte[256];
                for (int i = 0; i < contrastAdjustmentsLUTArray.Length; i++)
                {
                    int a = (int)(i * 1.5f);
                    contrastAdjustmentsLUTArray[i] = (a > byte.MaxValue) ? (byte)255 : (byte)a;
                }
                MatUtils.copyToMat(contrastAdjustmentsLUTArray, contrastAdjustmentsLUT);
            }
        }
コード例 #24
0
        Mat inv_simil(Mat S)
        {
            float[] S_float = new float[S.total()];
            MatUtils.copyFromMat <float> (S, S_float);
            int S_cols = S.cols();

            Mat   Si = new Mat(2, 3, CvType.CV_32F);
            float d  = S_float [0] * S_float [(1 * S_cols) + 1] - S_float [1 * S_cols] * S_float [1];

            float[] Si_float = new float[Si.total()];
            MatUtils.copyFromMat <float> (Si, Si_float);
            int Si_cols = Si.cols();

            Si_float [0] = S_float [(1 * S_cols) + 1] / d;
            Si_float [1] = -S_float [1] / d;
            Si_float [(1 * Si_cols) + 1] = S_float [0] / d;
            Si_float [1 * Si_cols]       = -S_float [1 * S_cols] / d;

            MatUtils.copyToMat(Si_float, Si);

            Mat Ri = new Mat(Si, new OpenCVForUnity.CoreModule.Rect(0, 0, 2, 2));


            Mat negaRi = new Mat();

            Core.multiply(Ri, new Scalar(-1), negaRi);
            Mat t = new Mat();

            Core.gemm(negaRi, S.col(2), 1, new Mat(negaRi.rows(), negaRi.cols(), negaRi.type()), 0, t);

            Mat St = Si.col(2);

            t.copyTo(St);

            return(Si);
        }
        protected virtual void OnFrameSampleAcquired(VideoCaptureSample sample)
        {
            lock (latestImageBytesLockObject)
            {
                //When copying the bytes out of the buffer, you must supply a byte[] that is appropriately sized.
                //You can reuse this byte[] until you need to resize it (for whatever reason).
                if (_latestImageBytes == null || _latestImageBytes.Length < sample.dataLength)
                {
                    _latestImageBytes = new byte[sample.dataLength];
                }
                sample.CopyRawImageDataIntoBuffer(_latestImageBytes);
            }

            float[] cameraToWorldMatrixAsFloat;
            if (sample.TryGetCameraToWorldMatrix(out cameraToWorldMatrixAsFloat) == false)
            {
                sample.Dispose();
                return;
            }

            float[] projectionMatrixAsFloat;
            if (sample.TryGetProjectionMatrix(out projectionMatrixAsFloat) == false)
            {
                sample.Dispose();
                return;
            }

            // Right now we pass things across the pipe as a float array then convert them back into UnityEngine.Matrix using a utility method
            projectionMatrix    = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(projectionMatrixAsFloat);
            cameraToWorldMatrix = LocatableCameraUtils.ConvertFloatArrayToMatrix4x4(cameraToWorldMatrixAsFloat);

            cameraIntrinsics = sample.cameraIntrinsics;

            sample.Dispose();

            didUpdateThisFrame = true;
            didUpdateImageBufferInCurrentFrame = true;

            if (hasInitEventCompleted && frameMatAcquired != null)
            {
                Mat mat = new Mat(cameraParams.cameraResolutionHeight, cameraParams.cameraResolutionWidth, CvType.CV_8UC(Channels(outputColorFormat)));
                if (baseColorFormat == outputColorFormat)
                {
                    MatUtils.copyToMat <byte>(latestImageBytes, mat);
                }
                else
                {
                    Mat baseMat = new Mat(cameraParams.cameraResolutionHeight, cameraParams.cameraResolutionWidth, CvType.CV_8UC(Channels(baseColorFormat)));
                    MatUtils.copyToMat <byte>(latestImageBytes, baseMat);
                    Imgproc.cvtColor(baseMat, mat, ColorConversionCodes(baseColorFormat, outputColorFormat));
                }

                if (_rotate90Degree)
                {
                    Mat rotatedFrameMat = new Mat(cameraParams.cameraResolutionWidth, cameraParams.cameraResolutionHeight, CvType.CV_8UC(Channels(outputColorFormat)));
                    Core.rotate(mat, rotatedFrameMat, Core.ROTATE_90_CLOCKWISE);
                    mat.Dispose();

                    FlipMat(rotatedFrameMat, _flipVertical, _flipHorizontal);

                    frameMatAcquired.Invoke(rotatedFrameMat, projectionMatrix, cameraToWorldMatrix, cameraIntrinsics);
                }
                else
                {
                    FlipMat(mat, _flipVertical, _flipHorizontal);

                    frameMatAcquired.Invoke(mat, projectionMatrix, cameraToWorldMatrix, cameraIntrinsics);
                }
            }
        }
        protected virtual Mat generatePriors(int width = 320, int height = 240)
        {
            double[][] shrinkage_list   = new double[2][];
            double[][] feature_map_list = new double[2][];

            var feature_map_w = new double[strides.Length];

            for (int i = 0; i < strides.Length; i++)
            {
                feature_map_w[i] = (int)Math.Floor(width / strides[i]);
            }
            feature_map_list[0] = feature_map_w;

            var feature_map_h = new double[strides.Length];

            for (int i = 0; i < strides.Length; i++)
            {
                feature_map_h[i] = (int)Math.Floor(height / strides[i]);
            }
            feature_map_list[1] = feature_map_h;

            for (int i = 0; i < 2; i++)
            {
                shrinkage_list[i] = strides;
            }


            int priors_size = 0;

            for (int index = 0; index < feature_map_list[0].Length; index++)
            {
                priors_size += (int)(feature_map_list[1][index] * feature_map_list[0][index] * min_boxes[index].Length);
            }

            float[] priors_arr = new float[priors_size * 4];

            int count = 0;

            for (int index = 0; index < feature_map_list[0].Length; index++)
            {
                var scale_w = width / shrinkage_list[0][index];
                var scale_h = height / shrinkage_list[1][index];
                for (int j = 0; j < feature_map_list[1][index]; j++)
                {
                    for (int i = 0; i < feature_map_list[0][index]; i++)
                    {
                        var x_center = (i + 0.5) / scale_w;
                        var y_center = (j + 0.5) / scale_h;

                        foreach (var min_box in min_boxes[index])
                        {
                            var w = min_box / width;
                            var h = min_box / height;

                            priors_arr[count * 4]     = (float)x_center;
                            priors_arr[count * 4 + 1] = (float)y_center;
                            priors_arr[count * 4 + 2] = (float)w;
                            priors_arr[count * 4 + 3] = (float)h;

                            count++;
                        }
                    }
                }
            }

            Mat priors = new Mat(priors_size, 4, CvType.CV_32FC1);

            MatUtils.copyToMat(priors_arr, priors);

            // Clamp values
            Imgproc.threshold(priors, priors, 0, 0, Imgproc.THRESH_TOZERO);
            Imgproc.threshold(priors, priors, 1.0, 0, Imgproc.THRESH_TRUNC);

            return(priors);
        }
コード例 #27
0
        private Point[] fit(Mat image,
                            Point[] init,
                            Size ssize,
                            bool robust,
                            int itol,
                            double ftol)
        {
            int n = smodel.npts();

//      assert((int(init.size())==n) && (pmodel.n_patches()==n));
//              Debug.Log ("init.size())==n " + init.Length + " " + n);
//              Debug.Log ("pmodel.n_patches()==n " + pmodel.n_patches () + " " + n);
            smodel.calc_params(init, new Mat(), 3.0f);
            Point[] pts = smodel.calc_shape();

            //find facial features in image around current estimates
            Point[] peaks = pmodel.calc_peaks(image, pts, ssize);

            //optimise
            if (!robust)
            {
                smodel.calc_params(peaks, new Mat(), 3.0f); //compute shape model parameters
                pts = smodel.calc_shape();                  //update shape
            }
            else
            {
                using (Mat weight = new Mat(n, 1, CvType.CV_32F)) using (Mat weight_sort = new Mat(n, 1, CvType.CV_32F)) {
                        float[] weight_float = new float[weight.total()];
                        MatUtils.copyFromMat <float> (weight, weight_float);
                        float[] weight_sort_float = new float[weight_sort.total()];

                        Point[] pts_old = pts;
                        for (int iter = 0; iter < itol; iter++)
                        {
                            //compute robust weight
                            for (int i = 0; i < n; i++)
                            {
                                using (MatOfPoint tmpMat = new MatOfPoint(new Point(pts [i].x - peaks [i].x, pts [i].y - peaks [i].y))) {
                                    weight_float [i] = (float)Core.norm(tmpMat);
                                }
                            }
                            MatUtils.copyToMat(weight_float, weight);

                            Core.sort(weight, weight_sort, Core.SORT_EVERY_COLUMN | Core.SORT_ASCENDING);


                            MatUtils.copyFromMat <float> (weight_sort, weight_sort_float);
                            double var = 1.4826 * weight_sort_float [n / 2];


                            if (var < 0.1)
                            {
                                var = 0.1;
                            }

                            Core.pow(weight, 2, weight);


                            Core.multiply(weight, new Scalar(-0.5 / (var * var)), weight);

                            Core.exp(weight, weight);

                            //compute shape model parameters
                            smodel.calc_params(peaks, weight, 3.0f);


                            //update shape
                            pts = smodel.calc_shape();

                            //check for convergence
                            float v = 0;
                            for (int i = 0; i < n; i++)
                            {
                                using (MatOfPoint tmpMat = new MatOfPoint(new Point(pts [i].x - pts_old [i].x, pts [i].y - pts_old [i].y))) {
                                    v += (float)Core.norm(tmpMat);
                                }
                            }
                            if (v < ftol)
                            {
                                break;
                            }
                            else
                            {
                                pts_old = pts;
                            }
                        }
                    }
            }
            return(pts);
        }
コード例 #28
0
        // Use this for initialization
        void Run()
        {
            //if true, The error log of the Native side OpenCV will be displayed on the Unity Editor Console.
            Utils.setDebugMode(true);


            classNames = readClassNames(classes_filepath);
            if (classNames == null)
            {
                Debug.LogError(classes_filepath + " is not loaded. Please see \"StreamingAssets/dnn/setup_dnn_module.pdf\". ");
            }

            classColors = new List <Scalar>();
            for (int i = 0; i < classNames.Count; i++)
            {
                classColors.Add(new Scalar(UnityEngine.Random.Range(0, 255), UnityEngine.Random.Range(0, 255), UnityEngine.Random.Range(0, 255)));
            }


            Mat img = Imgcodecs.imread(image_filepath);

            if (img.empty())
            {
                Debug.LogError(image_filepath + " is not loaded. Please see \"StreamingAssets/dnn/setup_dnn_module.pdf\". ");
                img = new Mat(height, width, CvType.CV_8UC3, new Scalar(0, 0, 0));
            }



            //Adust Quad.transform.localScale.
            gameObject.transform.localScale = new Vector3(img.width(), img.height(), 1);
            Debug.Log("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation);

            float imageWidth  = img.width();
            float imageHeight = img.height();

            float widthScale  = (float)Screen.width / imageWidth;
            float heightScale = (float)Screen.height / imageHeight;

            if (widthScale < heightScale)
            {
                Camera.main.orthographicSize = (imageWidth * (float)Screen.height / (float)Screen.width) / 2;
            }
            else
            {
                Camera.main.orthographicSize = imageHeight / 2;
            }


            Net net = null;

            if (string.IsNullOrEmpty(model_filepath) || string.IsNullOrEmpty(config_filepath))
            {
                Debug.LogError(model_filepath + " or " + config_filepath + " is not loaded. Please see \"StreamingAssets/dnn/setup_dnn_module.pdf\". ");
            }
            else
            {
                net = Dnn.readNetFromTensorflow(model_filepath, config_filepath);
            }

            if (net == null)
            {
                Imgproc.putText(img, "model file is not loaded.", new Point(5, img.rows() - 30), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2, Imgproc.LINE_AA, false);
                Imgproc.putText(img, "Please read console message.", new Point(5, img.rows() - 10), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2, Imgproc.LINE_AA, false);
            }
            else
            {
                float frameWidth  = img.cols();
                float frameHeight = img.rows();

                Mat blob = Dnn.blobFromImage(img, 1.0, new Size(width, height), new Scalar(0, 0, 0), true, false);


                net.setInput(blob);

                List <Mat>    outputBlobs = new List <Mat>();
                List <string> outputName  = new List <string>();
                outputName.Add("detection_out_final");
                outputName.Add("detection_masks");

                net.forward(outputBlobs, outputName);

                Mat boxes = outputBlobs[0];
                Mat masks = outputBlobs[1];


                //int numClasses = masks.size (1);
                //int numDetections = boxes.size (2);


                Debug.Log("boxes.size(0) " + boxes.size(0));
                Debug.Log("boxes.size(1) " + boxes.size(1));
                Debug.Log("boxes.size(2) " + boxes.size(2));
                Debug.Log("boxes.size(3) " + boxes.size(3));
                Debug.Log("masks.size(0) " + masks.size(0));
                Debug.Log("masks.size(1) " + masks.size(1));
                Debug.Log("masks.size(2) " + masks.size(2));
                Debug.Log("masks.size(3) " + masks.size(3));


                //reshape from 4D to two 2D.
                float[] data = new float[boxes.size(3)];
                boxes = boxes.reshape(1, (int)boxes.total() / boxes.size(3));
                //Debug.Log ("boxes.ToString() " + boxes.ToString ());

                //reshape from 4D to two 2D.
                float[] mask_data = new float[masks.size(2) * masks.size(3)];
                masks = masks.reshape(1, (int)masks.total() / (masks.size(2) * masks.size(3)));
                //Debug.Log ("masks.ToString(): " + masks.ToString ());


                for (int i = 0; i < boxes.rows(); i++)
                {
                    boxes.get(i, 0, data);

                    float score = data[2];

                    if (score > thr)
                    {
                        int class_id = (int)(data[1]);


                        float left   = (float)(data[3] * frameWidth);
                        float top    = (float)(data[4] * frameHeight);
                        float right  = (float)(data[5] * frameWidth);
                        float bottom = (float)(data[6] * frameHeight);

                        left   = (int)Mathf.Max(0, Mathf.Min(left, frameWidth - 1));
                        top    = (int)Mathf.Max(0, Mathf.Min(top, frameHeight - 1));
                        right  = (int)Mathf.Max(0, Mathf.Min(right, frameWidth - 1));
                        bottom = (int)Mathf.Max(0, Mathf.Min(bottom, frameHeight - 1));

                        Debug.Log("class_id: " + class_id + " class_name " + classNames[class_id] + " left: " + left + " top: " + top + " right: " + right + " bottom: " + bottom);



                        //draw masks

                        masks.get((i * 90) + class_id, 0, mask_data);

                        Mat objectMask = new Mat(15, 15, CvType.CV_32F);
                        MatUtils.copyToMat <float>(mask_data, objectMask);

                        Imgproc.resize(objectMask, objectMask, new Size(right - left + 1, bottom - top + 1));

                        Core.compare(objectMask, new Scalar(mask_thr), objectMask, Core.CMP_GT);
                        //Debug.Log ("objectMask.ToString(): " + objectMask.ToString ());
                        //Debug.Log ("objectMask.dump(): " + objectMask.dump ());


                        Mat roi = new Mat(img, new OpenCVForUnity.CoreModule.Rect(new Point(left, top), new Point(right + 1, bottom + 1)));

                        Mat coloredRoi = new Mat(roi.size(), CvType.CV_8UC3);

                        Imgproc.rectangle(coloredRoi, new Point(0, 0), new Point(coloredRoi.width(), coloredRoi.height()), classColors[class_id], -1);

                        Core.addWeighted(coloredRoi, 0.7, roi, 0.3, 0, coloredRoi);
                        //Debug.Log ("coloredRoi.ToString(): " + coloredRoi.ToString ());
                        //Debug.Log ("roi.ToString(): " + roi.ToString ());

                        coloredRoi.copyTo(roi, objectMask);
                        coloredRoi.Dispose();

                        objectMask.Dispose();



                        //draw boxes

                        Imgproc.rectangle(img, new Point(left, top), new Point(right, bottom), new Scalar(0, 255, 0), 2);

                        string label = score.ToString();
                        if (classNames != null && classNames.Count != 0)
                        {
                            if (class_id < (int)classNames.Count)
                            {
                                label = classNames[class_id] + ": " + label;
                            }
                        }

                        int[] baseLine  = new int[1];
                        Size  labelSize = Imgproc.getTextSize(label, Imgproc.FONT_HERSHEY_SIMPLEX, 0.5, 1, baseLine);

                        top = Mathf.Max(top, (int)labelSize.height);
                        Imgproc.rectangle(img, new Point(left, top - labelSize.height),
                                          new Point(left + labelSize.width, top + baseLine[0]), Scalar.all(255), Core.FILLED);
                        Imgproc.putText(img, label, new Point(left, top), Imgproc.FONT_HERSHEY_SIMPLEX, 0.5, new Scalar(0, 0, 0, 255));
                    }
                }

                boxes.Dispose();
                masks.Dispose();
                blob.Dispose();
            }

            Imgproc.cvtColor(img, img, Imgproc.COLOR_BGR2RGB);

            Texture2D texture = new Texture2D(img.cols(), img.rows(), TextureFormat.RGBA32, false);

            Utils.matToTexture2D(img, texture);

            gameObject.GetComponent <Renderer>().material.mainTexture = texture;

            net.Dispose();


            Utils.setDebugMode(false);
        }
コード例 #29
0
        // Histogram Equalize seperately for the left and right sides of the face.
        private static void equalizeLeftAndRightHalves(Mat faceImg)
        {
            // It is common that there is stronger light from one half of the face than the other. In that case,
            // if you simply did histogram equalization on the whole face then it would make one half dark and
            // one half bright. So we will do histogram equalization separately on each face half, so they will
            // both look similar on average. But this would cause a sharp edge in the middle of the face, because
            // the left half and right half would be suddenly different. So we also histogram equalize the whole
            // image, and in the middle part we blend the 3 images together for a smooth brightness transition.

            int w = faceImg.cols();
            int h = faceImg.rows();

            // 1) First, equalize the whole face.
            using (Mat wholeFace = new Mat(h, w, CvType.CV_8UC1))
            {
                Imgproc.equalizeHist(faceImg, wholeFace);

                // 2) Equalize the left half and the right half of the face separately.
                int midX = w / 2;
                using (Mat leftSide = new Mat(faceImg, new Rect(0, 0, midX, h)))
                    using (Mat rightSide = new Mat(faceImg, new Rect(midX, 0, w - midX, h)))
                    {
                        Imgproc.equalizeHist(leftSide, leftSide);
                        Imgproc.equalizeHist(rightSide, rightSide);

                        // 3) Combine the left half and right half and whole face together, so that it has a smooth transition.
                        byte[] wholeFace_byte = new byte[wholeFace.total() * wholeFace.elemSize()];
                        MatUtils.copyFromMat <byte>(wholeFace, wholeFace_byte);
                        byte[] leftSide_byte = new byte[leftSide.total() * leftSide.elemSize()];
                        MatUtils.copyFromMat <byte>(leftSide, leftSide_byte);
                        byte[] rightSide_byte = new byte[rightSide.total() * rightSide.elemSize()];
                        MatUtils.copyFromMat <byte>(rightSide, rightSide_byte);

                        int leftSide_w  = leftSide.cols();
                        int rightSide_w = rightSide.cols();

                        for (int y = 0; y < h; y++)
                        {
                            for (int x = 0; x < w; x++)
                            {
                                byte wv = wholeFace_byte[y * w + x];
                                if (x < w / 4)
                                { // Left 25%: just use the left face.
                                    wv = leftSide_byte[y * leftSide_w + x];
                                }
                                else if (x < w * 2 / 4)
                                { // Mid-left 25%: blend the left face & whole face.
                                    byte lv = leftSide_byte[y * leftSide_w + x];

                                    // Blend more of the whole face as it moves further right along the face.
                                    float f = (x - w * 1 / 4) / (w * 0.25f);
                                    wv = (byte)Mathf.Round((1.0f - f) * lv + f * wv);
                                }
                                else if (x < w * 3 / 4)
                                { // Mid-right 25%: blend the right face & whole face.
                                    byte rv = rightSide_byte[y * rightSide_w + x - midX];

                                    // Blend more of the right-side face as it moves further right along the face.
                                    float f = (x - w * 2 / 4) / (w * 0.25f);
                                    wv = (byte)Mathf.Round((1.0f - f) * wv + f * rv);
                                }
                                else
                                { // Right 25%: just use the right face.
                                    wv = rightSide_byte[y * rightSide_w + x - midX];
                                }
                            } // end x loop
                        }     //end y loop
                        MatUtils.copyToMat(wholeFace_byte, faceImg);
                    }
            }
        }
コード例 #30
0
        // Calculates source image histogram and changes target_image to match source hist.
        private void specifyHistogram(Mat source_image, Mat target_image, Mat mask)
        {
            byte[][] LUT = new byte[3][];
            for (int i = 0; i < LUT.Length; i++)
            {
                LUT[i] = new byte[256];
            }
            double[][] source_hist = new double[3][];
            for (int i = 0; i < source_hist.Length; i++)
            {
                source_hist[i] = new double[256];
            }
            double[][] target_hist = new double[3][];
            for (int i = 0; i < target_hist.Length; i++)
            {
                target_hist[i] = new double[256];
            }
            double[][] source_cdf = new double[3][];
            for (int i = 0; i < source_cdf.Length; i++)
            {
                source_cdf[i] = new double[256];
            }
            double[][] target_cdf = new double[3][];
            for (int i = 0; i < target_cdf.Length; i++)
            {
                target_cdf[i] = new double[256];
            }

            double[] source_histMax = new double[3];
            double[] target_histMax = new double[3];

            byte[] mask_byte = new byte[mask.total() * mask.channels()];
            MatUtils.copyFromMat <byte>(mask, mask_byte);
            byte[] source_image_byte = new byte[source_image.total() * source_image.channels()];
            MatUtils.copyFromMat <byte>(source_image, source_image_byte);
            byte[] target_image_byte = new byte[target_image.total() * target_image.channels()];
            MatUtils.copyFromMat <byte>(target_image, target_image_byte);

            int pixel_i  = 0;
            int channels = (int)source_image.channels();
            int total    = (int)source_image.total();

            for (int i = 0; i < total; i++)
            {
                if (mask_byte[i] != 0)
                {
                    byte c = source_image_byte[pixel_i];
                    source_hist[0][c]++;
                    if (source_hist[0][c] > source_histMax[0])
                    {
                        source_histMax[0] = source_hist[0][c];
                    }

                    c = source_image_byte[pixel_i + 1];
                    source_hist[1][c]++;
                    if (source_hist[1][c] > source_histMax[1])
                    {
                        source_histMax[1] = source_hist[1][c];
                    }

                    c = source_image_byte[pixel_i + 2];
                    source_hist[2][c]++;
                    if (source_hist[2][c] > source_histMax[2])
                    {
                        source_histMax[2] = source_hist[2][c];
                    }

                    c = target_image_byte[pixel_i];
                    target_hist[0][c]++;
                    if (target_hist[0][c] > target_histMax[0])
                    {
                        target_histMax[0] = target_hist[0][c];
                    }

                    c = target_image_byte[pixel_i + 1];
                    target_hist[1][c]++;
                    if (target_hist[1][c] > target_histMax[1])
                    {
                        target_histMax[1] = target_hist[1][c];
                    }

                    c = target_image_byte[pixel_i + 2];
                    target_hist[2][c]++;
                    if (target_hist[2][c] > target_histMax[2])
                    {
                        target_histMax[2] = target_hist[2][c];
                    }
                }
                // Advance to next pixel
                pixel_i += channels;
            }

            // Normalize hist
            for (int i = 0; i < 256; i++)
            {
                source_hist[0][i] /= source_histMax[0];
                source_hist[1][i] /= source_histMax[1];
                source_hist[2][i] /= source_histMax[2];

                target_hist[0][i] /= target_histMax[0];
                target_hist[1][i] /= target_histMax[1];
                target_hist[2][i] /= target_histMax[2];
            }

            // Calc cumulative distribution function (CDF)
            source_cdf[0][0] = source_hist[0][0];
            source_cdf[1][0] = source_hist[1][0];
            source_cdf[2][0] = source_hist[2][0];
            target_cdf[0][0] = target_hist[0][0];
            target_cdf[1][0] = target_hist[1][0];
            target_cdf[2][0] = target_hist[2][0];
            for (int i = 1; i < 256; i++)
            {
                source_cdf[0][i] = source_cdf[0][i - 1] + source_hist[0][i];
                source_cdf[1][i] = source_cdf[1][i - 1] + source_hist[1][i];
                source_cdf[2][i] = source_cdf[2][i - 1] + source_hist[2][i];

                target_cdf[0][i] = target_cdf[0][i - 1] + target_hist[0][i];
                target_cdf[1][i] = target_cdf[1][i - 1] + target_hist[1][i];
                target_cdf[2][i] = target_cdf[2][i - 1] + target_hist[2][i];
            }

            // Normalize CDF
            for (int i = 0; i < 256; i++)
            {
                source_cdf[0][i] /= source_cdf[0][255];
                source_cdf[1][i] /= source_cdf[1][255];
                source_cdf[2][i] /= source_cdf[2][255];

                target_cdf[0][i] /= target_cdf[0][255];
                target_cdf[1][i] /= target_cdf[1][255];
                target_cdf[2][i] /= target_cdf[2][255];
            }

            // Create lookup table (LUT)
            const double HISTMATCH_EPSILON = 0.000001f;

            for (int i = 0; i < 3; i++)
            {
                int last = 0;
                for (int j = 0; j < 256; j++)
                {
                    double F1j = target_cdf[i][j];

                    for (int k = last; k < 256; k++)
                    {
                        double F2k = source_cdf[i][k];
                        if (Math.Abs(F2k - F1j) < HISTMATCH_EPSILON || F2k > F1j)
                        {
                            LUT[i][j] = (byte)k;
                            last      = k;
                            break;
                        }
                    }
                }
            }

            // Repaint pixels
            pixel_i = 0;
            for (int i = 0; i < total; i++)
            {
                if (mask_byte[i] != 0)
                {
                    target_image_byte[pixel_i]     = LUT[0][target_image_byte[pixel_i]];
                    target_image_byte[pixel_i + 1] = LUT[1][target_image_byte[pixel_i + 1]];
                    target_image_byte[pixel_i + 2] = LUT[2][target_image_byte[pixel_i + 2]];
                }
                // Advance to next pixel
                pixel_i += channels;
            }

            MatUtils.copyToMat(target_image_byte, target_image);
        }