예제 #1
0
        /// <summary>
        /// Niblackの手法による二値化処理を行う。
        /// </summary>
        /// <param name="imgSrc">入力画像</param>
        /// <param name="imgDst">出力画像</param>
        /// <param name="kernelSize">局所領域のサイズ</param>
        /// <param name="k">係数</param>
#else
        /// <summary>
        /// Binarizes by Niblack's method
        /// </summary>
        /// <param name="src">Input image</param>
        /// <param name="dst">Output image</param>
        /// <param name="kernelSize">Window size</param>
        /// <param name="k">Adequate coefficient</param>
#endif
        public static void Niblack(Mat src, Mat dst, int kernelSize, double k)
        {
            if (src == null)
                throw new ArgumentNullException("src");
            if (dst == null)
                throw new ArgumentNullException("dst");

            // グレースケールのみ
            if (src.Type() != MatType.CV_8UC1)
                throw new ArgumentException("src must be gray scale image");
            if (dst.Type() != MatType.CV_8UC1)
                throw new ArgumentException("dst must be gray scale image");

            // サイズのチェック
            if (kernelSize < 3)
                throw new ArgumentOutOfRangeException("kernelSize", "size must be 3 and above");
            if (kernelSize % 2 == 0)
                throw new ArgumentOutOfRangeException("kernelSize", "size must be odd number");

            int width = src.Width;
            int height = src.Height;
            dst.Create(src.Size(), src.Type());

            using (var tSrcMat = new MatOfByte(src))
            using (var tDstMat = new MatOfByte(dst))
            {
                var tSrc = tSrcMat.GetIndexer();
                var tDst = tDstMat.GetIndexer();

                //for (int y = 0; y < gray.Height; y++)
                MyParallel.For(0, height, delegate(int y)
                {
                    for (int x = 0; x < width; x++)
                    {
                        double m, s;
                        MeanStddev(src, x, y, kernelSize, out m, out s);
                        double threshold = m + k * s;

                        if (tSrc[y, x] < threshold)
                            tDst[y, x] = 0;
                        else
                            tDst[y, x] = 255;
                    }
                }
                );
            }
        }
예제 #2
0
        /// <summary>
        /// Raises the web cam texture to mat helper inited event.
        /// </summary>
        public void OnWebCamTextureToMatHelperInited()
        {
            Debug.Log ("OnWebCamTextureToMatHelperInited");

                        Mat webCamTextureMat = webCamTextureToMatHelper.GetMat ();

                        colors = new Color32[webCamTextureMat.cols () * webCamTextureMat.rows ()];
                        texture = new Texture2D (webCamTextureMat.cols (), webCamTextureMat.rows (), TextureFormat.RGBA32, false);

                        matOpFlowThis = new Mat ();
                        matOpFlowPrev = new Mat ();
                        MOPcorners = new MatOfPoint ();
                        mMOP2fptsThis = new MatOfPoint2f ();
                        mMOP2fptsPrev = new MatOfPoint2f ();
                        mMOP2fptsSafe = new MatOfPoint2f ();
                        mMOBStatus = new MatOfByte ();
                        mMOFerr = new MatOfFloat ();

                        gameObject.transform.localScale = new Vector3 (webCamTextureMat.cols (), webCamTextureMat.rows (), 1);

                        Debug.Log ("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation);

                        float width = 0;
                        float height = 0;

                        width = gameObject.transform.localScale.x;
                        height = gameObject.transform.localScale.y;

                        float widthScale = (float)Screen.width / width;
                        float heightScale = (float)Screen.height / height;
                        if (widthScale < heightScale) {
                                Camera.main.orthographicSize = (width * (float)Screen.height / (float)Screen.width) / 2;
                        } else {
                                Camera.main.orthographicSize = height / 2;
                        }

                        gameObject.GetComponent<Renderer> ().material.mainTexture = texture;

                        //			webCamTextureToMatHelper.Play ();
        }
예제 #3
0
        /// <summary>
        /// Niblackの手法による二値化処理を行う(高速だが、メモリを多く消費するバージョン)。
        /// </summary>
        /// <param name="imgSrc">入力画像</param>
        /// <param name="imgDst">出力画像</param>
        /// <param name="kernelSize">局所領域のサイズ</param>
        /// <param name="k">係数</param>
#else
        /// <summary>
        /// Binarizes by Niblack's method (This is faster but memory-hogging)
        /// </summary>
        /// <param name="src">Input image</param>
        /// <param name="dst">Output image</param>
        /// <param name="kernelSize">Window size</param>
        /// <param name="k">Adequate coefficient</param>
#endif
        public static void NiblackFast(Mat src, Mat dst, int kernelSize, double k)
        {
            if (src == null)
                throw new ArgumentNullException("src");
            if (dst == null)
                throw new ArgumentNullException("dst");

            // グレースケールのみ
            if (src.Type() != MatType.CV_8UC1)
                throw new ArgumentException("src must be gray scale image");
            if (dst.Type() != MatType.CV_8UC1)
                throw new ArgumentException("dst must be gray scale image");

            // サイズのチェック
            if (kernelSize < 3)
                throw new ArgumentOutOfRangeException("kernelSize", "size must be 3 and above");
            if (kernelSize % 2 == 0)
                throw new ArgumentOutOfRangeException("kernelSize", "size must be odd number");

            int borderSize = kernelSize / 2;
            int width = src.Width;
            int height = src.Height;
            dst.Create(src.Size(), src.Type());

            using (var tempMat = new Mat(height + (borderSize * 2), width + (borderSize * 2), src.Type()))
            using (var sumMat = new Mat(tempMat.Height + 1, tempMat.Width + 1, MatType.CV_64FC1, 1))
            using (var sqSumMat = new Mat(tempMat.Height + 1, tempMat.Width + 1, MatType.CV_64FC1, 1))
            {
                Cv2.CopyMakeBorder(src, tempMat, borderSize, borderSize, borderSize, borderSize, BorderTypes.Replicate, Scalar.All(0));
                Cv2.Integral(tempMat, sumMat, sqSumMat);

                using (var tSrcMat = new MatOfByte(src))
                using (var tDstMat = new MatOfByte(dst))
                using (var tSumMat = new MatOfDouble(sumMat))
                using (var tSqSumMat = new MatOfDouble(sqSumMat))
                {
                    var tSrc = tSrcMat.GetIndexer();
                    var tDst = tDstMat.GetIndexer();
                    var tSum = tSumMat.GetIndexer();
                    var tSqSum = tSqSumMat.GetIndexer();

                    int ylim = height + borderSize;
                    int xlim = width + borderSize;
                    int kernelPixels = kernelSize * kernelSize;
                    for (int y = borderSize; y < ylim; y++)
                    {
                        for (int x = borderSize; x < xlim; x++)
                        {
                            int x1 = x - borderSize;
                            int y1 = y - borderSize;
                            int x2 = x + borderSize + 1;
                            int y2 = y + borderSize + 1;
                            double sum = tSum[y2, x2] - tSum[y2, x1] - tSum[y1, x2] + tSum[y1, x1];
                            double sqsum = tSqSum[y2, x2] - tSqSum[y2, x1] - tSqSum[y1, x2] + tSqSum[y1, x1];
                            double mean = sum / kernelPixels;
                            double var = (sqsum / kernelPixels) - (mean * mean);
                            if (var < 0.0) var = 0.0;
                            double stddev = Math.Sqrt(var);

                            double threshold = mean + k * stddev;
                            if (tSrc[y - borderSize, x - borderSize] < threshold)
                                tDst[y - borderSize, x - borderSize] = 0;
                            else
                                tDst[y - borderSize, x - borderSize] = 255;
                        }
                    }
                }
            }
        }
예제 #4
0
        /// <summary>
        /// 注目画素の周辺画素の最大値と最小値を求める
        /// </summary>
        /// <param name="img">画像の画素データ</param>
        /// <param name="x">x座標</param>
        /// <param name="y">y座標</param>
        /// <param name="size">周辺画素の探索サイズ。奇数でなければならない</param>
        /// <param name="min">出力される最小値</param>
        /// <param name="max">出力される最大値</param>
        private static void MinMax(Mat img, int x, int y, int size, out byte min, out byte max)
        {
            int size2 = size / 2;
            min = byte.MaxValue;
            max = byte.MinValue;
            int xs = Math.Max(x - size2, 0);
            int xe = Math.Min(x + size2, img.Width);
            int ys = Math.Max(y - size2, 0);
            int ye = Math.Min(y + size2, img.Height);

            using (var tImg = new MatOfByte(img))
            {
                var indexer = tImg.GetIndexer();

                for (int xx = xs; xx < xe; xx++)
                {
                    for (int yy = ys; yy < ye; yy++)
                    {
                        byte v = indexer[yy, xx];
                        if (max < v)
                            max = v;
                        else if (min > v)
                            min = v;
                    }
                }
            }
        }
예제 #5
0
        public static float[] GetArray(this Mat self, float[] buffer = null, bool bgr2rgb = true)
        {
            int width  = self.Width;
            int height = self.Height;

            float[] f;
            if (buffer == null)
            {
                f = new float[width * height * self.Channel];
            }
            else
            {
                if (buffer.Length < width * height * self.Channel)
                {
                    throw new ArgumentOutOfRangeException(nameof(buffer));
                }
                f = buffer;
            }

            if (self.Channel == 3)
            {
                using (MatOfByte3 matByte = new MatOfByte3())
                {
                    self.CopyTo(matByte);

                    var indexer = matByte.GetIndexer();
                    int i       = 0;
                    for (int y = 0; y < height; y++)
                    {
                        for (int x = 0; x < width; x++)
                        {
                            Vec3b color = indexer[y, x];
                            if (bgr2rgb)
                            {
                                f[i] = color.Item2;
                                i++;
                                f[i] = color.Item1;
                                i++;
                                f[i] = color.Item0;
                                i++;
                            }
                            else
                            {
                                f[i] = color.Item0;
                                i++;
                                f[i] = color.Item1;
                                i++;
                                f[i] = color.Item2;
                                i++;
                            }
                        }
                    }
                }
            }
            else if (self.Channel == 1)
            {
                using (var matByte = new MatOfByte())
                {
                    self.CopyTo(matByte);

                    var w       = self.Width;
                    var h       = self.Height;
                    var indexer = matByte.GetIndexer();
                    int i       = 0;
                    for (int y = 0; y < h; y++)
                    {
                        for (int x = 0; x < w; x++)
                        {
                            var pixel = indexer[y, x];
                            f[i] = pixel;
                            i++;
                        }
                    }
                }
            }
            else
            {
                throw new Exception("not supported channel");
            }

            return(f);
        }
예제 #6
0
        /// <summary>
        /// Niblackの手法による二値化処理を行う(高速だが、メモリを多く消費するバージョン)。
        /// </summary>
        /// <param name="imgSrc">入力画像</param>
        /// <param name="imgDst">出力画像</param>
        /// <param name="kernelSize">局所領域のサイズ</param>
        /// <param name="k">係数</param>
#else
        /// <summary>
        /// Binarizes by Niblack's method (This is faster but memory-hogging)
        /// </summary>
        /// <param name="src">Input image</param>
        /// <param name="dst">Output image</param>
        /// <param name="kernelSize">Window size</param>
        /// <param name="k">Adequate coefficient</param>
#endif
        public static void NiblackFast(Mat src, Mat dst, int kernelSize, double k)
        {
            if (src == null)
            {
                throw new ArgumentNullException("src");
            }
            if (dst == null)
            {
                throw new ArgumentNullException("dst");
            }

            // グレースケールのみ
            if (src.Type() != MatType.CV_8UC1)
            {
                throw new ArgumentException("src must be gray scale image");
            }
            if (dst.Type() != MatType.CV_8UC1)
            {
                throw new ArgumentException("dst must be gray scale image");
            }

            // サイズのチェック
            if (kernelSize < 3)
            {
                throw new ArgumentOutOfRangeException("kernelSize", "size must be 3 and above");
            }
            if (kernelSize % 2 == 0)
            {
                throw new ArgumentOutOfRangeException("kernelSize", "size must be odd number");
            }

            int borderSize = kernelSize / 2;
            int width      = src.Width;
            int height     = src.Height;

            dst.Create(src.Size(), src.Type());

            using (var tempMat = new Mat(height + (borderSize * 2), width + (borderSize * 2), src.Type()))
                using (var sumMat = new Mat(tempMat.Height + 1, tempMat.Width + 1, MatType.CV_64FC1, 1))
                    using (var sqSumMat = new Mat(tempMat.Height + 1, tempMat.Width + 1, MatType.CV_64FC1, 1))
                    {
                        Cv2.CopyMakeBorder(src, tempMat, borderSize, borderSize, borderSize, borderSize, BorderTypes.Replicate, Scalar.All(0));
                        Cv2.Integral(tempMat, sumMat, sqSumMat);

                        using (var tSrcMat = new MatOfByte(src))
                            using (var tDstMat = new MatOfByte(dst))
                                using (var tSumMat = new MatOfDouble(sumMat))
                                    using (var tSqSumMat = new MatOfDouble(sqSumMat))
                                    {
                                        var tSrc   = tSrcMat.GetIndexer();
                                        var tDst   = tDstMat.GetIndexer();
                                        var tSum   = tSumMat.GetIndexer();
                                        var tSqSum = tSqSumMat.GetIndexer();

                                        int ylim         = height + borderSize;
                                        int xlim         = width + borderSize;
                                        int kernelPixels = kernelSize * kernelSize;
                                        for (int y = borderSize; y < ylim; y++)
                                        {
                                            for (int x = borderSize; x < xlim; x++)
                                            {
                                                int    x1    = x - borderSize;
                                                int    y1    = y - borderSize;
                                                int    x2    = x + borderSize + 1;
                                                int    y2    = y + borderSize + 1;
                                                double sum   = tSum[y2, x2] - tSum[y2, x1] - tSum[y1, x2] + tSum[y1, x1];
                                                double sqsum = tSqSum[y2, x2] - tSqSum[y2, x1] - tSqSum[y1, x2] + tSqSum[y1, x1];
                                                double mean  = sum / kernelPixels;
                                                double var   = (sqsum / kernelPixels) - (mean * mean);
                                                if (var < 0.0)
                                                {
                                                    var = 0.0;
                                                }
                                                double stddev = Math.Sqrt(var);

                                                double threshold = mean + k * stddev;
                                                if (tSrc[y - borderSize, x - borderSize] < threshold)
                                                {
                                                    tDst[y - borderSize, x - borderSize] = 0;
                                                }
                                                else
                                                {
                                                    tDst[y - borderSize, x - borderSize] = 255;
                                                }
                                            }
                                        }
                                    }
                    }
        }
예제 #7
0
        /// <summary>
        /// Bernsenの手法による二値化処理を行う。
        /// </summary>
        /// <param name="imgSrc">入力画像</param>
        /// <param name="imgDst">出力画像</param>
        /// <param name="kernelSize">局所領域のサイズ</param>
        /// <param name="constrastMin">この数値以下のコントラストの領域は背景領域とみなす</param>
        /// <param name="bgThreshold">背景領域と見なされた領域に適用する閾値(背景領域以外では、適応的に閾値を求める)</param>
#else
        /// <summary>
        /// Binarizes by Bernsen's method
        /// </summary>
        /// <param name="src">Input image</param>
        /// <param name="dst">Output image</param>
        /// <param name="kernelSize">Window size</param>
        /// <param name="constrastMin">Adequate coefficient</param>
        /// <param name="bgThreshold">Adequate coefficient</param>
#endif
        public static void Bernsen(Mat src, Mat dst, int kernelSize, byte constrastMin, byte bgThreshold)
        {
            if (src == null)
            {
                throw new ArgumentNullException(nameof(src));
            }
            if (dst == null)
            {
                throw new ArgumentNullException(nameof(dst));
            }

            // グレースケールのみ
            if (src.Type() != MatType.CV_8UC1)
            {
                throw new ArgumentException("src must be gray scale image");
            }

            // サイズのチェック
            if (kernelSize < 3)
            {
                throw new ArgumentOutOfRangeException(nameof(kernelSize), "size must be 3 and above");
            }
            if (kernelSize % 2 == 0)
            {
                throw new ArgumentOutOfRangeException(nameof(kernelSize), "size must be odd number");
            }

            int width  = src.Width;
            int height = src.Height;

            dst.Create(src.Size(), src.Type());

            using (var tSrcMat = new MatOfByte(src))
                using (var tDstMat = new MatOfByte(dst))
                {
                    var tSrc = tSrcMat.GetIndexer();
                    var tDst = tDstMat.GetIndexer();

                    for (int y = 0; y < height; y++)
                    {
                        for (int x = 0; x < width; x++)
                        {
                            byte min, max;
                            MinMax(src, x, y, kernelSize, out min, out max);

                            int  contrast = max - min;
                            byte threshold;
                            if (contrast < constrastMin)
                            {
                                threshold = bgThreshold;
                            }
                            else
                            {
                                threshold = (byte)((max + min) / 2);
                            }

                            if (tSrc[y, x] <= threshold)
                            {
                                tDst[y, x] = 0;
                            }
                            else
                            {
                                tDst[y, x] = 255;
                            }
                        }
                    }
                }
        }
예제 #8
0
    /// <summary>
    /// Refines the matches with homography.
    /// </summary>
    /// <returns><c>true</c>, if matches with homography was refined, <c>false</c> otherwise.</returns>
    /// <param name="queryKeypoints">Query keypoints.</param>
    /// <param name="trainKeypoints">Train keypoints.</param>
    /// <param name="reprojectionThreshold">Reprojection threshold.</param>
    /// <param name="matches">Matches.</param>
    /// <param name="homography">Homography.</param>
    static bool refineMatchesWithHomography
    (
        MatOfKeyPoint queryKeypoints,
        MatOfKeyPoint trainKeypoints,
        float reprojectionThreshold,
        MatOfDMatch matches,
        Mat homography
    )
    {
//              Debug.Log ("matches " + matches.ToString ());

        int minNumberMatchesAllowed = 8;

        List <KeyPoint> queryKeypointsList = queryKeypoints.toList();
        List <KeyPoint> trainKeypointsList = trainKeypoints.toList();
        List <DMatch>   matchesList        = matches.toList();

        if (matchesList.Count < minNumberMatchesAllowed)
        {
            return(false);
        }

        // Prepare data for cv::findHomography
        List <Point> srcPointsList = new List <Point> (matchesList.Count);
        List <Point> dstPointsList = new List <Point> (matchesList.Count);

        for (int i = 0; i < matchesList.Count; i++)
        {
            srcPointsList.Add(trainKeypointsList [matchesList [i].trainIdx].pt);
            dstPointsList.Add(queryKeypointsList [matchesList [i].queryIdx].pt);
        }

        // Find homography matrix and get inliers mask
        using (MatOfPoint2f srcPoints = new MatOfPoint2f())
            using (MatOfPoint2f dstPoints = new MatOfPoint2f())
                using (MatOfByte inliersMask = new MatOfByte(new byte[srcPointsList.Count])) {
                    srcPoints.fromList(srcPointsList);
                    dstPoints.fromList(dstPointsList);

//              Debug.Log ("srcPoints " + srcPoints.ToString ());
//              Debug.Log ("dstPoints " + dstPoints.ToString ());


                    Calib3d.findHomography(srcPoints,
                                           dstPoints,
                                           Calib3d.FM_RANSAC,
                                           reprojectionThreshold,
                                           inliersMask, 2000, 0.955).copyTo(homography);

                    if (homography.rows() != 3 || homography.cols() != 3)
                    {
                        return(false);
                    }

                    //Debug.Log ("homography " + homography.ToString ());

                    //Debug.Log ("inliersMask " + inliersMask.dump ());

                    List <byte> inliersMaskList = inliersMask.toList();

                    List <DMatch> inliers = new List <DMatch> ();
                    for (int i = 0; i < inliersMaskList.Count; i++)
                    {
                        if (inliersMaskList [i] == 1)
                        {
                            inliers.Add(matchesList [i]);
                        }
                    }

                    matches.fromList(inliers);
                    //Debug.Log ("matches " + matches.ToString ());
                }

        return(matchesList.Count > minNumberMatchesAllowed);
    }
예제 #9
0
        public override int Run()
        {
            int device = 0;

            var argument = new StringList {
                "./"
            };
            FaceModelParameters det_parameters = new FaceModelParameters(argument);

            //vector<string> files, depth_directories, output_video_files, out_dummy;
            StringList files = new StringList(), output_video_files = new StringList(), out_dummy = new StringList();
            bool       u;
            string     output_codec;

            LandmarkDetector.get_video_input_output_params(files, out_dummy, output_video_files, out u, out output_codec, argument);

            CLNF clnf_model = new CLNF(det_parameters.model_location);

            float fx = 0, fy = 0, cx = 0, cy = 0;

            LandmarkDetector.get_camera_params(out device, out fx, out fy, out cx, out cy, argument);

            // If cx (optical axis centre) is undefined will use the image size/2 as an estimate
            bool cx_undefined = false;
            bool fx_undefined = false;

            if (cx == 0 || cy == 0)
            {
                cx_undefined = true;
            }
            if (fx == 0 || fy == 0)
            {
                fx_undefined = true;
            }

            //// Do some grabbing
            INFO_STREAM("Attempting to capture from device: " + device);
            using (VideoCapture video_capture = new VideoCapture(device))
            {
                using (Mat dummy = new Mat())
                    video_capture.Read(dummy);

                if (!video_capture.IsOpened())
                {
                    FATAL_STREAM("Failed to open video source");
                    return(1);
                }
                else
                {
                    INFO_STREAM("Device or file opened");
                }

                int frame_count    = 0;
                Mat captured_image = new Mat();
                video_capture.Read(captured_image);
                Size = new Size(captured_image.Width / SizeFactor, captured_image.Height / SizeFactor);
                using (var resized_image = captured_image.Resize(Size))
                {
                    // If optical centers are not defined just use center of image
                    if (cx_undefined)
                    {
                        cx = resized_image.Cols / 2.0f;
                        cy = resized_image.Rows / 2.0f;
                    }
                    // Use a rough guess-timate of focal length
                    if (fx_undefined)
                    {
                        fx = (float)(500 * (resized_image.Cols / 640.0));
                        fy = (float)(500 * (resized_image.Rows / 480.0));

                        fx = (float)((fx + fy) / 2.0);
                        fy = fx;
                    }
                }

                // Use for timestamping if using a webcam
                long t_initial = Cv2.GetTickCount();

                INFO_STREAM("Starting tracking");
                while (video_capture.Read(captured_image))
                {
                    using (var resized_image = captured_image.Resize(Size))
                    {
                        // Reading the images
                        MatOfByte grayscale_image = new MatOfByte();

                        if (resized_image.Channels() == 3)
                        {
                            Cv2.CvtColor(resized_image, grayscale_image, ColorConversionCodes.BGR2GRAY);
                        }
                        else
                        {
                            grayscale_image = (MatOfByte)resized_image.Clone();
                        }

                        // The actual facial landmark detection / tracking
                        bool detection_success = LandmarkDetector.DetectLandmarksInVideo(new SWIGTYPE_p_cv__Mat_T_uchar_t(grayscale_image.CvPtr), new SWIGTYPE_p_CLNF(CLNF.getCPtr(clnf_model)), new SWIGTYPE_p_FaceModelParameters(FaceModelParameters.getCPtr(det_parameters)));

                        // Visualising the results
                        // Drawing the facial landmarks on the face and the bounding box around it if tracking is successful and initialised
                        double detection_certainty = clnf_model.detection_certainty;

                        visualise_tracking(resized_image, ref clnf_model, ref det_parameters, frame_count, fx, fy, cx, cy);

                        // detect key presses
                        char character_press = (char)Cv2.WaitKey(15);
                        switch (character_press)
                        {
                        case 'r':
                            clnf_model.Reset();
                            break;

                        case 'q':
                            return(0);
                        }

                        // Update the frame count
                        frame_count++;

                        grayscale_image.Dispose();
                        grayscale_image = null;
                    }
                }
            }

            return(0);
        }
예제 #10
0
        private void BeamDetection(string outputfilename, bool isup)
        {// beam Detection
            int BinarizeThreshold   = 60;
            int BrightnessThreshold = 4;
            int nop = 7;

            double dz = 0;

            if (isup == true)
            {
                dz = -0.003;
            }
            else
            {
                dz = 0.003;
            }

            Camera         camera    = Camera.GetInstance();
            MotorControler mc        = MotorControler.GetInstance(parameterManager);
            Vector3        InitPoint = mc.GetPoint();
            Vector3        p         = new Vector3();
            TracksManager  tm        = parameterManager.TracksManager;
            int            mod       = parameterManager.ModuleNo;
            int            pl        = parameterManager.PlateNo;
            Track          myTrack   = tm.GetTrack(tm.TrackingIndex);

            string[] sp = myTrack.IdString.Split('-');

            //string datfileName = string.Format("{0}.dat", System.DateTime.Now.ToString("yyyyMMdd_HHmmss"));
            string       datfileName = string.Format(@"c:\test\bpm\{0}\{1}-{2}-{3}-{4}-{5}.dat", mod, mod, pl, sp[0], sp[1], System.DateTime.Now.ToString("ddHHmmss"));
            BinaryWriter writer      = new BinaryWriter(File.Open(datfileName, FileMode.Create));

            byte[] bb = new byte[440 * 512 * nop];

            string       fileName = string.Format("{0}", outputfilename);
            StreamWriter twriter  = File.CreateText(fileName);
            string       stlog    = "";


            List <ImageTaking> LiIT = TakeSequentialImage(0.0, 0.0, dz, nop);

            Mat sum = Mat.Zeros(440, 512, MatType.CV_8UC1);

            for (int i = 0; i < LiIT.Count; i++)
            {
                Mat bin = (Mat)DogContrastBinalize(LiIT[i].img, 31, BinarizeThreshold);
                Cv2.Add(sum, bin, sum);
                //byte[] b = LiIT[i].img.ToBytes();//format is .png
                MatOfByte mob = new MatOfByte(LiIT[i].img);
                byte[]    b   = mob.ToArray();
                b.CopyTo(bb, 440 * 512 * i);
            }

            mc.MovePointZ(InitPoint.Z);
            mc.Join();


            Cv2.Threshold(sum, sum, BrightnessThreshold, 1, ThresholdType.Binary);


            //Cv2.FindContoursをつかうとAccessViolationExceptionになる(Release/Debug両方)ので、C-API風に書く
            using (CvMemStorage storage = new CvMemStorage())
            {
                using (CvContourScanner scanner = new CvContourScanner(sum.ToIplImage(), storage, CvContour.SizeOf, ContourRetrieval.Tree, ContourChain.ApproxSimple))
                {
                    //string fileName = string.Format(@"c:\img\{0}.txt",
                    //        System.DateTime.Now.ToString("yyyyMMdd_HHmmss_fff"));

                    foreach (CvSeq <CvPoint> c in scanner)
                    {
                        CvMoments mom = new CvMoments(c, false);
                        if (c.ElemSize < 2)
                        {
                            continue;
                        }
                        if (mom.M00 == 0.0)
                        {
                            continue;
                        }
                        double mx = mom.M10 / mom.M00;
                        double my = mom.M01 / mom.M00;
                        stlog += string.Format("{0:F} {1:F}\n", mx, my);
                    }
                }
            }

            twriter.Write(stlog);
            twriter.Close();

            writer.Write(bb);
            writer.Flush();
            writer.Close();

            sum *= 255;
            sum.ImWrite(String.Format(@"c:\img\{0}_{1}_{2}.bmp",
                                      System.DateTime.Now.ToString("yyyyMMdd_HHmmss"),
                                      (int)(p.X * 1000),
                                      (int)(p.Y * 1000)));
        }//BeamDetection
예제 #11
0
        /**
         * Draws the found matches of keypoints from two images.
         *
         * param img1 First source image.
         * param keypoints1 Keypoints from the first source image.
         * param img2 Second source image.
         * param keypoints2 Keypoints from the second source image.
         * param matches1to2 Matches from the first image to the second one, which means that keypoints1[i]
         * has a corresponding point in keypoints2[matches[i]] .
         * param outImg Output image. Its content depends on the flags value defining what is drawn in the
         * output image. See possible flags bit values below.
         * param matchColor Color of matches (lines and connected keypoints). If matchColor==Scalar::all(-1)
         * , the color is generated randomly.
         * param singlePointColor Color of single keypoints (circles), which means that keypoints do not
         * have the matches. If singlePointColor==Scalar::all(-1) , the color is generated randomly.
         * param matchesMask Mask determining which matches are drawn. If the mask is empty, all matches are
         * drawn.
         * DrawMatchesFlags.
         *
         * This function draws matches of keypoints from two images in the output image. Match is a line
         * connecting two keypoints (circles). See cv::DrawMatchesFlags.
         */
        public static void drawMatches(Mat img1, MatOfKeyPoint keypoints1, Mat img2, MatOfKeyPoint keypoints2, MatOfDMatch matches1to2, Mat outImg, Scalar matchColor, Scalar singlePointColor, MatOfByte matchesMask)
        {
            if (img1 != null)
            {
                img1.ThrowIfDisposed();
            }
            if (keypoints1 != null)
            {
                keypoints1.ThrowIfDisposed();
            }
            if (img2 != null)
            {
                img2.ThrowIfDisposed();
            }
            if (keypoints2 != null)
            {
                keypoints2.ThrowIfDisposed();
            }
            if (matches1to2 != null)
            {
                matches1to2.ThrowIfDisposed();
            }
            if (outImg != null)
            {
                outImg.ThrowIfDisposed();
            }
            if (matchesMask != null)
            {
                matchesMask.ThrowIfDisposed();
            }
            Mat keypoints1_mat  = keypoints1;
            Mat keypoints2_mat  = keypoints2;
            Mat matches1to2_mat = matches1to2;
            Mat matchesMask_mat = matchesMask;

            features2d_Features2d_drawMatches_11(img1.nativeObj, keypoints1_mat.nativeObj, img2.nativeObj, keypoints2_mat.nativeObj, matches1to2_mat.nativeObj, outImg.nativeObj, matchColor.val[0], matchColor.val[1], matchColor.val[2], matchColor.val[3], singlePointColor.val[0], singlePointColor.val[1], singlePointColor.val[2], singlePointColor.val[3], matchesMask_mat.nativeObj);
        }
예제 #12
0
        //
        // C++:  void cv::drawMatches(Mat img1, vector_KeyPoint keypoints1, Mat img2, vector_KeyPoint keypoints2, vector_DMatch matches1to2, Mat& outImg, Scalar matchColor = Scalar::all(-1), Scalar singlePointColor = Scalar::all(-1), vector_char matchesMask = std::vector<char>(), DrawMatchesFlags flags = DrawMatchesFlags::DEFAULT)
        //

        //javadoc: drawMatches(img1, keypoints1, img2, keypoints2, matches1to2, outImg, matchColor, singlePointColor, matchesMask)
        public static void drawMatches(Mat img1, MatOfKeyPoint keypoints1, Mat img2, MatOfKeyPoint keypoints2, MatOfDMatch matches1to2, Mat outImg, Scalar matchColor, Scalar singlePointColor, MatOfByte matchesMask)
        {
            if (img1 != null)
            {
                img1.ThrowIfDisposed();
            }
            if (keypoints1 != null)
            {
                keypoints1.ThrowIfDisposed();
            }
            if (img2 != null)
            {
                img2.ThrowIfDisposed();
            }
            if (keypoints2 != null)
            {
                keypoints2.ThrowIfDisposed();
            }
            if (matches1to2 != null)
            {
                matches1to2.ThrowIfDisposed();
            }
            if (outImg != null)
            {
                outImg.ThrowIfDisposed();
            }
            if (matchesMask != null)
            {
                matchesMask.ThrowIfDisposed();
            }
#if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat keypoints1_mat  = keypoints1;
            Mat keypoints2_mat  = keypoints2;
            Mat matches1to2_mat = matches1to2;
            Mat matchesMask_mat = matchesMask;
            features2d_Features2d_drawMatches_10(img1.nativeObj, keypoints1_mat.nativeObj, img2.nativeObj, keypoints2_mat.nativeObj, matches1to2_mat.nativeObj, outImg.nativeObj, matchColor.val[0], matchColor.val[1], matchColor.val[2], matchColor.val[3], singlePointColor.val[0], singlePointColor.val[1], singlePointColor.val[2], singlePointColor.val[3], matchesMask_mat.nativeObj);

            return;
#else
            return;
#endif
        }
        public Dictionary <string, BitmapSource> GenerateQRCore(string inputMessage, QRCodeProperties qrCodeProps, QRColorMode colorMode)
        {
            outputImages      = new Dictionary <string, BitmapSource>();
            outputMats        = new Dictionary <string, Mat>();
            matSearchPixels   = new List <Point>();
            this.inputMessage = inputMessage;

            CalculateSearchPoint(qrCodeProps);

            if (colorMode == QRColorMode.Grayscale)
            {
                // Generate empty mat and set all pixels to white
                qrMatMono = Mat.Zeros(new Size(qrCodeProps.ImgSize.Width, qrCodeProps.ImgSize.Height), MatType.CV_8UC1);
                //qrMat.SetTo(Scalar.White);

                // Get mat indexer
                MatOfByte         mob1        = new MatOfByte(qrMatMono);
                MatIndexer <byte> indexerByte = mob1.GetIndexer();

                int stringIndex = -1;
                for (int y = 0; y < qrCodeProps.CellsPerDim * qrCodeProps.CellSize; y += qrCodeProps.CellSize)
                {
                    for (int x = 0; x < qrCodeProps.CellsPerDim * qrCodeProps.CellSize; x += qrCodeProps.CellSize)
                    {
                        // If message is done reading
                        if (++stringIndex + 1 > inputMessage.Length)
                        {
                            break;
                        }

                        // If bit is 0, skip this cell
                        if (inputMessage[stringIndex].Equals('0'))
                        {
                            continue;
                        }

                        // If bit is 1, color the cell
                        else if (inputMessage[stringIndex].Equals('1'))
                        {
                            for (int i = y; i < y + qrCodeProps.CellSize; i++)
                            {
                                for (int j = x; j < x + qrCodeProps.CellSize; j++)
                                {
                                    indexerByte[i, j] = LUM_INTENSITY_MAX;
                                }
                            }
                        }
                    }
                }

                // Add image and mat to output lists
                outputImages.Add(QR_TYPE_MONOCHROME, Utils.MatToImage(qrMatMono));
                outputMats.Add(QR_TYPE_MONOCHROME, qrMatMono);

                // Return images to UI
                return(outputImages);
            }
            else if (colorMode == QRColorMode.Color)
            {
                // Generate empty mats and fill with white
                Mat qrRedMat   = Mat.Zeros(new Size(qrCodeProps.ImgSize.Width, qrCodeProps.ImgSize.Height), MatType.CV_8UC3);
                Mat qrGreenMat = Mat.Zeros(new Size(qrCodeProps.ImgSize.Width, qrCodeProps.ImgSize.Height), MatType.CV_8UC3);
                Mat qrBlueMat  = Mat.Zeros(new Size(qrCodeProps.ImgSize.Width, qrCodeProps.ImgSize.Height), MatType.CV_8UC3);
                //qrCyanMat.SetTo(Scalar.White);
                //qrMagentaMat.SetTo(Scalar.White);
                //qrYellowMat.SetTo(Scalar.White);

                // Get mat indexers
                MatOfByte3 mobRed   = new MatOfByte3(qrRedMat);
                MatOfByte3 mobGreen = new MatOfByte3(qrGreenMat);
                MatOfByte3 mobBlue  = new MatOfByte3(qrBlueMat);

                MatIndexer <Vec3b> indexerMobRed   = mobRed.GetIndexer();
                MatIndexer <Vec3b> indexerMobGreen = mobGreen.GetIndexer();
                MatIndexer <Vec3b> indexerMobBlue  = mobBlue.GetIndexer();

                // Split message thrice
                int    bitsPerChar     = 7;
                int    messageChars    = inputMessage.Length / bitsPerChar;
                string messageForRed   = string.Empty;
                string messageForGreen = string.Empty;
                string messageForBlue  = string.Empty;

                for (int i = 0; i < messageChars; i++)
                {
                    if (i % 3 == 0)
                    {
                        for (int j = 0; j < bitsPerChar; j++)
                        {
                            messageForRed += inputMessage[(i * bitsPerChar) + j];
                        }
                    }

                    else if (i % 3 == 1)
                    {
                        for (int j = 0; j < bitsPerChar; j++)
                        {
                            messageForGreen += inputMessage[(i * bitsPerChar) + j];
                        }
                    }

                    else if (i % 3 == 2)
                    {
                        for (int j = 0; j < bitsPerChar; j++)
                        {
                            messageForBlue += inputMessage[(i * bitsPerChar) + j];
                        }
                    }
                }

                indexerMobRed   = WriteColorComponent(messageForRed, qrCodeProps, indexerMobRed, COLOR_RED);
                indexerMobGreen = WriteColorComponent(messageForGreen, qrCodeProps, indexerMobGreen, COLOR_GREEN);
                indexerMobBlue  = WriteColorComponent(messageForBlue, qrCodeProps, indexerMobBlue, COLOR_BLUE);

                Mat combinedMat = qrRedMat + qrGreenMat + qrBlueMat;

                // Add image and mats to output lists
                outputImages.Add(QR_TYPE_COMBINED, Utils.MatToImage(combinedMat));
                outputImages.Add(QR_TYPE_RED, Utils.MatToImage(qrRedMat));
                outputImages.Add(QR_TYPE_GREEN, Utils.MatToImage(qrGreenMat));
                outputImages.Add(QR_TYPE_BLUE, Utils.MatToImage(qrBlueMat));
                outputMats.Add(QR_TYPE_COMBINED, combinedMat);
                outputMats.Add(QR_TYPE_RED, qrRedMat);
                outputMats.Add(QR_TYPE_GREEN, qrGreenMat);
                outputMats.Add(QR_TYPE_BLUE, qrBlueMat);

                return(outputImages);
            }

            return(null);
        }
        public Dictionary <string, string> DecodeQRCode(QRCodeProperties qrCodeProps, QRColorMode colorMode)
        {
            outputMessages = new Dictionary <string, string>();

            if (colorMode == QRColorMode.Grayscale)
            {
                string decodedBinary = string.Empty;
                outputMats.TryGetValue(QR_TYPE_MONOCHROME, out Mat qrMatMono);

                // Get mat indexer
                MatOfByte         mob1        = new MatOfByte(qrMatMono);
                MatIndexer <byte> indexerByte = mob1.GetIndexer();

                // Read decoded strings
                int     bitsPerChar   = 7;
                int     messageLength = inputMessage.Length;
                decimal messageChars  = messageLength / bitsPerChar;

                // Read decoded binary
                for (int i = 0; i < messageChars; i++)
                {
                    decodedBinary += ReadMonochromePixels(indexerByte, i, bitsPerChar);
                }

                // Add decoded messag to output list
                outputMessages.Add(QR_TYPE_MONOCHROME, decodedBinary);

                return(outputMessages);
            }
            else if (colorMode == QRColorMode.Color)
            {
                string decodedBinaryRed      = string.Empty;
                string decodedBinaryGreen    = string.Empty;
                string decodedBinaryBlue     = string.Empty;
                string decodedBinaryCombined = string.Empty;

                outputMats.TryGetValue(QR_TYPE_COMBINED, out Mat qrMatCombined);

                // Get mat indexer
                MatOfByte3         mobComb        = new MatOfByte3(qrMatCombined);
                MatIndexer <Vec3b> indexerMobComb = mobComb.GetIndexer();

                // Read decoded strings
                int     bitsPerChar          = 7;
                int     messageLength        = inputMessage.Length;
                decimal messageChars         = messageLength / bitsPerChar;
                int     coloredMessageLength = (int)Math.Ceiling(messageChars / 3);

                for (int i = 0; i < coloredMessageLength; i++)
                {
                    string tempRed   = ReadColorPixels(indexerMobComb, QR_TYPE_RED, i, bitsPerChar);
                    string tempGreen = ReadColorPixels(indexerMobComb, QR_TYPE_GREEN, i, bitsPerChar);
                    string tempBlue  = ReadColorPixels(indexerMobComb, QR_TYPE_BLUE, i, bitsPerChar);

                    decodedBinaryRed   += tempRed;
                    decodedBinaryGreen += tempGreen;
                    decodedBinaryBlue  += tempBlue;

                    decodedBinaryCombined += tempRed;
                    decodedBinaryCombined += tempGreen;
                    decodedBinaryCombined += tempBlue;
                }

                // Add output messages
                outputMessages.Add(QR_TYPE_RED, decodedBinaryRed);
                outputMessages.Add(QR_TYPE_GREEN, decodedBinaryGreen);
                outputMessages.Add(QR_TYPE_BLUE, decodedBinaryBlue);
                outputMessages.Add(QR_TYPE_COMBINED, decodedBinaryCombined);

                return(outputMessages);
            }

            return(null);
        }
예제 #15
0
        //javadoc: calcOpticalFlowPyrLK(prevImg, nextImg, prevPts, nextPts, status, err, winSize, maxLevel, criteria)
        public static void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, MatOfPoint2f prevPts, MatOfPoint2f nextPts, MatOfByte status, MatOfFloat err, Size winSize, int maxLevel, TermCriteria criteria)
        {
            if (prevImg != null)
            {
                prevImg.ThrowIfDisposed();
            }
            if (nextImg != null)
            {
                nextImg.ThrowIfDisposed();
            }
            if (prevPts != null)
            {
                prevPts.ThrowIfDisposed();
            }
            if (nextPts != null)
            {
                nextPts.ThrowIfDisposed();
            }
            if (status != null)
            {
                status.ThrowIfDisposed();
            }
            if (err != null)
            {
                err.ThrowIfDisposed();
            }
#if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat prevPts_mat = prevPts;
            Mat nextPts_mat = nextPts;
            Mat status_mat  = status;
            Mat err_mat     = err;
            video_Video_calcOpticalFlowPyrLK_12(prevImg.nativeObj, nextImg.nativeObj, prevPts_mat.nativeObj, nextPts_mat.nativeObj, status_mat.nativeObj, err_mat.nativeObj, winSize.width, winSize.height, maxLevel, criteria.type, criteria.maxCount, criteria.epsilon);

            return;
#else
            return;
#endif
        }
예제 #16
0
        //javadoc: calcOpticalFlowPyrLK(prevImg, nextImg, prevPts, nextPts, status, err)
        public static void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, MatOfPoint2f prevPts, MatOfPoint2f nextPts, MatOfByte status, MatOfFloat err)
        {
            if (prevImg != null)
            {
                prevImg.ThrowIfDisposed();
            }
            if (nextImg != null)
            {
                nextImg.ThrowIfDisposed();
            }
            if (prevPts != null)
            {
                prevPts.ThrowIfDisposed();
            }
            if (nextPts != null)
            {
                nextPts.ThrowIfDisposed();
            }
            if (status != null)
            {
                status.ThrowIfDisposed();
            }
            if (err != null)
            {
                err.ThrowIfDisposed();
            }
#if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat prevPts_mat = prevPts;
            Mat nextPts_mat = nextPts;
            Mat status_mat  = status;
            Mat err_mat     = err;
            video_Video_calcOpticalFlowPyrLK_15(prevImg.nativeObj, nextImg.nativeObj, prevPts_mat.nativeObj, nextPts_mat.nativeObj, status_mat.nativeObj, err_mat.nativeObj);

            return;
#else
            return;
#endif
        }
예제 #17
0
        private IEnumerator init()
        {
            if (webCamTexture != null)
            {
                webCamTexture.Stop();
                initDone = false;

                rgbaMat.Dispose();

                matOpFlowThis.Dispose();
                matOpFlowPrev.Dispose();
                MOPcorners.Dispose();
                mMOP2fptsThis.Dispose();
                mMOP2fptsPrev.Dispose();
                mMOP2fptsSafe.Dispose();
                mMOBStatus.Dispose();
                mMOFerr.Dispose();
            }

            // Checks how many and which cameras are available on the device
            for (int cameraIndex = 0; cameraIndex < WebCamTexture.devices.Length; cameraIndex++)
            {
                if (WebCamTexture.devices [cameraIndex].isFrontFacing == shouldUseFrontFacing)
                {
                    Debug.Log(cameraIndex + " name " + WebCamTexture.devices [cameraIndex].name + " isFrontFacing " + WebCamTexture.devices [cameraIndex].isFrontFacing);

                    webCamDevice = WebCamTexture.devices [cameraIndex];

                    webCamTexture = new WebCamTexture(webCamDevice.name, width, height);


                    break;
                }
            }

            if (webCamTexture == null)
            {
                webCamDevice  = WebCamTexture.devices [0];
                webCamTexture = new WebCamTexture(webCamDevice.name, width, height);
            }

            Debug.Log("width " + webCamTexture.width + " height " + webCamTexture.height + " fps " + webCamTexture.requestedFPS);



            // Starts the camera
            webCamTexture.Play();
            while (true)
            {
                //If you want to use webcamTexture.width and webcamTexture.height on iOS, you have to wait until webcamTexture.didUpdateThisFrame == 1, otherwise these two values will be equal to 16. (http://forum.unity3d.com/threads/webcamtexture-and-error-0x0502.123922/)
                                                                #if UNITY_IOS && !UNITY_EDITOR && (UNITY_4_6_3 || UNITY_4_6_4 || UNITY_5_0_0 || UNITY_5_0_1)
                if (webCamTexture.width > 16 && webCamTexture.height > 16)
                {
                                                                #else
                if (webCamTexture.didUpdateThisFrame)
                {
                                                                                #endif

                    Debug.Log("width " + webCamTexture.width + " height " + webCamTexture.height + " fps " + webCamTexture.requestedFPS);
                    Debug.Log("videoRotationAngle " + webCamTexture.videoRotationAngle + " videoVerticallyMirrored " + webCamTexture.videoVerticallyMirrored + " isFrongFacing " + webCamDevice.isFrontFacing);

                    colors = new Color32[webCamTexture.width * webCamTexture.height];

                    rgbaMat = new Mat(webCamTexture.height, webCamTexture.width, CvType.CV_8UC4);

                    matOpFlowThis = new Mat();
                    matOpFlowPrev = new Mat();
                    MOPcorners    = new MatOfPoint();
                    mMOP2fptsThis = new MatOfPoint2f();
                    mMOP2fptsPrev = new MatOfPoint2f();
                    mMOP2fptsSafe = new MatOfPoint2f();
                    mMOBStatus    = new MatOfByte();
                    mMOFerr       = new MatOfFloat();

                    texture = new Texture2D(webCamTexture.width, webCamTexture.height, TextureFormat.RGBA32, false);

                    gameObject.GetComponent <Renderer> ().material.mainTexture = texture;

                    updateLayout();

                    screenOrientation = Screen.orientation;
                    initDone          = true;

                    break;
                }
                else
                {
                    yield return(0);
                }
            }
        }
예제 #18
0
 public static SWIGTYPE_p_cv__Mat_T_uchar_t ToSwig(this MatOfByte obj)
 {
     return(new SWIGTYPE_p_cv__Mat_T_uchar_t(obj.CvPtr));
 }
예제 #19
0
        /// <summary>
        /// Bernsenの手法による二値化処理を行う。
        /// </summary>
        /// <param name="imgSrc">入力画像</param>
        /// <param name="imgDst">出力画像</param>
        /// <param name="kernelSize">局所領域のサイズ</param>
        /// <param name="constrastMin">この数値以下のコントラストの領域は背景領域とみなす</param>
        /// <param name="bgThreshold">背景領域と見なされた領域に適用する閾値(背景領域以外では、適応的に閾値を求める)</param>
#else
        /// <summary>
        /// Binarizes by Bernsen's method
        /// </summary>
        /// <param name="src">Input image</param>
        /// <param name="dst">Output image</param>
        /// <param name="kernelSize">Window size</param>
        /// <param name="constrastMin">Adequate coefficient</param>
        /// <param name="bgThreshold">Adequate coefficient</param>
#endif
        public static void Bernsen(Mat src, Mat dst, int kernelSize, byte constrastMin, byte bgThreshold)
        {
            if (src == null)
                throw new ArgumentNullException("src");
            if (dst == null)
                throw new ArgumentNullException("dst");

            // グレースケールのみ
            if (src.Type() != MatType.CV_8UC1)
                throw new ArgumentException("src must be gray scale image");
            if (dst.Type() != MatType.CV_8UC1)
                throw new ArgumentException("dst must be gray scale image");

            // サイズのチェック
            if (kernelSize < 3)
                throw new ArgumentOutOfRangeException("kernelSize", "size must be 3 and above");
            if (kernelSize % 2 == 0)
                throw new ArgumentOutOfRangeException("kernelSize", "size must be odd number");

            int width = src.Width;
            int height = src.Height;
            dst.Create(src.Size(), src.Type());

            using (var tSrcMat = new MatOfByte(src))
            using (var tDstMat = new MatOfByte(dst))
            {
                var tSrc = tSrcMat.GetIndexer();
                var tDst = tDstMat.GetIndexer();

                for (int y = 0; y < height; y++)
                {
                    for (int x = 0; x < width; x++)
                    {
                        byte min, max;
                        MinMax(src, x, y, kernelSize, out min, out max);

                        int contrast = max - min;
                        byte threshold;
                        if (contrast < constrastMin)
                            threshold = bgThreshold;
                        else
                            threshold = (byte)((max + min) / 2);

                        if (tSrc[y, x] <= threshold)
                            tDst[y, x] = 0;
                        else
                            tDst[y, x] = 255;
                    }
                }
            }

        }
예제 #20
0
        private IEnumerator init()
        {
            if (webCamTexture != null) {
                                webCamTexture.Stop ();
                                initDone = false;

                                rgbaMat.Dispose ();

                                matOpFlowThis.Dispose ();
                                matOpFlowPrev.Dispose ();
                                MOPcorners.Dispose ();
                                mMOP2fptsThis.Dispose ();
                                mMOP2fptsPrev.Dispose ();
                                mMOP2fptsSafe.Dispose ();
                                mMOBStatus.Dispose ();
                                mMOFerr.Dispose ();
                        }

                        // Checks how many and which cameras are available on the device
                        for (int cameraIndex = 0; cameraIndex < WebCamTexture.devices.Length; cameraIndex++) {

                                if (WebCamTexture.devices [cameraIndex].isFrontFacing == shouldUseFrontFacing) {

                                        Debug.Log (cameraIndex + " name " + WebCamTexture.devices [cameraIndex].name + " isFrontFacing " + WebCamTexture.devices [cameraIndex].isFrontFacing);

                                        webCamDevice = WebCamTexture.devices [cameraIndex];

                                        webCamTexture = new WebCamTexture (webCamDevice.name, width, height);

                                        break;
                                }

                        }

                        if (webCamTexture == null) {
                                webCamDevice = WebCamTexture.devices [0];
                                webCamTexture = new WebCamTexture (webCamDevice.name, width, height);
                        }

                        Debug.Log ("width " + webCamTexture.width + " height " + webCamTexture.height + " fps " + webCamTexture.requestedFPS);

                        // Starts the camera
                        webCamTexture.Play ();
                        while (true) {
                                //If you want to use webcamTexture.width and webcamTexture.height on iOS, you have to wait until webcamTexture.didUpdateThisFrame == 1, otherwise these two values will be equal to 16. (http://forum.unity3d.com/threads/webcamtexture-and-error-0x0502.123922/)
                                #if UNITY_IOS && !UNITY_EDITOR && (UNITY_4_6_3 || UNITY_4_6_4 || UNITY_5_0_0 || UNITY_5_0_1)
                if (webCamTexture.width > 16 && webCamTexture.height > 16) {
                                #else
                                if (webCamTexture.didUpdateThisFrame) {
                                        #if UNITY_IOS && !UNITY_EDITOR && UNITY_5_2
                                        while (webCamTexture.width <= 16) {
                                                webCamTexture.GetPixels32 ();
                                                yield return new WaitForEndOfFrame ();
                                        }
                                        #endif
                                #endif

                                        Debug.Log ("width " + webCamTexture.width + " height " + webCamTexture.height + " fps " + webCamTexture.requestedFPS);
                                        Debug.Log ("videoRotationAngle " + webCamTexture.videoRotationAngle + " videoVerticallyMirrored " + webCamTexture.videoVerticallyMirrored + " isFrongFacing " + webCamDevice.isFrontFacing);

                                        colors = new Color32[webCamTexture.width * webCamTexture.height];

                                        rgbaMat = new Mat (webCamTexture.height, webCamTexture.width, CvType.CV_8UC4);

                                        matOpFlowThis = new Mat ();
                                        matOpFlowPrev = new Mat ();
                                        MOPcorners = new MatOfPoint ();
                                        mMOP2fptsThis = new MatOfPoint2f ();
                                        mMOP2fptsPrev = new MatOfPoint2f ();
                                        mMOP2fptsSafe = new MatOfPoint2f ();
                                        mMOBStatus = new MatOfByte ();
                                        mMOFerr = new MatOfFloat ();

                                        texture = new Texture2D (webCamTexture.width, webCamTexture.height, TextureFormat.RGBA32, false);

                                        gameObject.GetComponent<Renderer> ().material.mainTexture = texture;

                                        updateLayout ();

                                        screenOrientation = Screen.orientation;
                                        initDone = true;

                                        break;
                                } else {
                                        yield return 0;
                                }
                        }
                }
예제 #21
0
        /// <summary>
        /// 注目画素の周辺画素の平均値と標準偏差を求める
        /// </summary>
        /// <param name="img">画像の画素データ</param>
        /// <param name="x">x座標</param>
        /// <param name="y">y座標</param>
        /// <param name="size">周辺画素の探索サイズ。奇数でなければならない</param>
        /// <param name="mean">出力される平均</param>
        /// <param name="stddev">出力される標準偏差</param>
        private static void MeanStddev(Mat img, int x, int y, int size, out double mean, out double stddev)
        {
            int count = 0;
            int sum = 0;
            int sqsum = 0;
            int size2 = size / 2;
            int xs = Math.Max(x - size2, 0);
            int xe = Math.Min(x + size2, img.Width);
            int ys = Math.Max(y - size2, 0);
            int ye = Math.Min(y + size2, img.Height);
            byte v;

            using (var tImg = new MatOfByte(img))
            {
                var indexer = tImg.GetIndexer();
                for (int xx = xs; xx < xe; xx++)
                {
                    for (int yy = ys; yy < ye; yy++)
                    {
                        v = indexer[yy, xx];
                        sum += v;
                        sqsum += v*v;
                        count++;
                    }
                }
            }

            mean = (double)sum / count;
            double var = ((double)sqsum / count) - (mean * mean);
            if (var < 0.0) var = 0.0;
            stddev = Math.Sqrt(var);
        }
예제 #22
0
        private IEnumerator init()
        {
            if (webCamTexture != null) {
                                webCamTexture.Stop ();
                                initDone = false;

                                rgbaMat.Dispose ();

                                matOpFlowThis.Dispose ();
                                matOpFlowPrev.Dispose ();
                                MOPcorners.Dispose ();
                                mMOP2fptsThis.Dispose ();
                                mMOP2fptsPrev.Dispose ();
                                mMOP2fptsSafe.Dispose ();
                                mMOBStatus.Dispose ();
                                mMOFerr.Dispose ();
                        }

                        // Checks how many and which cameras are available on the device
                        for (int cameraIndex = 0; cameraIndex < WebCamTexture.devices.Length; cameraIndex++) {

                                if (WebCamTexture.devices [cameraIndex].isFrontFacing == isFrontFacing) {

                                        Debug.Log (cameraIndex + " name " + WebCamTexture.devices [cameraIndex].name + " isFrontFacing " + WebCamTexture.devices [cameraIndex].isFrontFacing);

                                        webCamDevice = WebCamTexture.devices [cameraIndex];

                                        webCamTexture = new WebCamTexture (webCamDevice.name, width, height);

                                        break;
                                }

                        }

                        if (webCamTexture == null) {
                                webCamDevice = WebCamTexture.devices [0];
                                webCamTexture = new WebCamTexture (webCamDevice.name, width, height);
                        }

                        Debug.Log ("width " + webCamTexture.width + " height " + webCamTexture.height + " fps " + webCamTexture.requestedFPS);

                        // Starts the camera
                        webCamTexture.Play ();
                        while (true) {
                                //If you want to use webcamTexture.width and webcamTexture.height on iOS, you have to wait until webcamTexture.didUpdateThisFrame == 1, otherwise these two values will be equal to 16. (http://forum.unity3d.com/threads/webcamtexture-and-error-0x0502.123922/)
                                #if UNITY_IOS && !UNITY_EDITOR && (UNITY_4_6_3 || UNITY_4_6_4 || UNITY_5_0_0 || UNITY_5_0_1)
                if (webCamTexture.width > 16 && webCamTexture.height > 16) {
                                #else
                                if (webCamTexture.didUpdateThisFrame) {
                                        #endif

                                        Debug.Log ("width " + webCamTexture.width + " height " + webCamTexture.height + " fps " + webCamTexture.requestedFPS);
                                        Debug.Log ("videoRotationAngle " + webCamTexture.videoRotationAngle + " videoVerticallyMirrored " + webCamTexture.videoVerticallyMirrored + " isFrongFacing " + webCamDevice.isFrontFacing);

                                        colors = new Color32[webCamTexture.width * webCamTexture.height];

                                        rgbaMat = new Mat (webCamTexture.height, webCamTexture.width, CvType.CV_8UC4);

                                        matOpFlowThis = new Mat ();
                                        matOpFlowPrev = new Mat ();
                                        MOPcorners = new MatOfPoint ();
                                        mMOP2fptsThis = new MatOfPoint2f ();
                                        mMOP2fptsPrev = new MatOfPoint2f ();
                                        mMOP2fptsSafe = new MatOfPoint2f ();
                                        mMOBStatus = new MatOfByte ();
                                        mMOFerr = new MatOfFloat ();

                                        texture = new Texture2D (webCamTexture.width, webCamTexture.height, TextureFormat.RGBA32, false);

                                        gameObject.transform.eulerAngles = new Vector3 (0, 0, 0);
                                        #if (UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR
                    gameObject.transform.eulerAngles = new Vector3 (0, 0, -90);
                                        #endif
            //										gameObject.transform.rotation = gameObject.transform.rotation * Quaternion.AngleAxis (webCamTexture.videoRotationAngle, Vector3.back);

                                        gameObject.transform.localScale = new Vector3 (webCamTexture.width, webCamTexture.height, 1);

            //										bool videoVerticallyMirrored = webCamTexture.videoVerticallyMirrored;
            //										float scaleX = 1;
            //										float scaleY = videoVerticallyMirrored ? -1.0f : 1.0f;
            //										if (webCamTexture.videoRotationAngle == 270)
            //												scaleY = -1.0f;
            //										gameObject.transform.localScale = new Vector3 (scaleX * gameObject.transform.localScale.x, scaleY * gameObject.transform.localScale.y, 1);

                                        gameObject.GetComponent<Renderer> ().material.mainTexture = texture;

                                        #if (UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR
                                        Camera.main.orthographicSize = webCamTexture.width / 2;
                                        #else
                                        Camera.main.orthographicSize = webCamTexture.height / 2;
                                        #endif

                                        initDone = true;

                                        break;
                                } else {
                                        yield return 0;
                                }
                        }
                }

                // Update is called once per frame
                void Update ()
                {
                        if (!initDone)
                                return;

                        #if UNITY_IOS && !UNITY_EDITOR && (UNITY_4_6_3 || UNITY_4_6_4 || UNITY_5_0_0 || UNITY_5_0_1)
                if (webCamTexture.width > 16 && webCamTexture.height > 16) {
                        #else
                        if (webCamTexture.didUpdateThisFrame) {
                                #endif

                                Utils.webCamTextureToMat (webCamTexture, rgbaMat, colors);

                                if (webCamTexture.videoVerticallyMirrored) {
                                        if (webCamDevice.isFrontFacing) {
                                                if (webCamTexture.videoRotationAngle == 0) {
                                                        Core.flip (rgbaMat, rgbaMat, 1);
                                                } else if (webCamTexture.videoRotationAngle == 90) {
                                                        Core.flip (rgbaMat, rgbaMat, 0);
                                                } else if (webCamTexture.videoRotationAngle == 270) {
                                                        Core.flip (rgbaMat, rgbaMat, 1);
                                                }
                                        } else {
                                                if (webCamTexture.videoRotationAngle == 90) {

                                                } else if (webCamTexture.videoRotationAngle == 270) {
                                                        Core.flip (rgbaMat, rgbaMat, -1);
                                                }
                                        }
                                } else {
                                        if (webCamDevice.isFrontFacing) {
                                                if (webCamTexture.videoRotationAngle == 0) {
                                                        Core.flip (rgbaMat, rgbaMat, 1);
                                                } else if (webCamTexture.videoRotationAngle == 90) {
                                                        Core.flip (rgbaMat, rgbaMat, 0);
                                                } else if (webCamTexture.videoRotationAngle == 270) {
                                                        Core.flip (rgbaMat, rgbaMat, 1);
                                                }
                                        } else {
                                                if (webCamTexture.videoRotationAngle == 90) {

                                                } else if (webCamTexture.videoRotationAngle == 270) {
                                                        Core.flip (rgbaMat, rgbaMat, -1);
                                                }
                                        }
                                }

                                if (mMOP2fptsPrev.rows () == 0) {

                                        // first time through the loop so we need prev and this mats
                                        // plus prev points
                                        // get this mat
                                        Imgproc.cvtColor (rgbaMat, matOpFlowThis, Imgproc.COLOR_RGBA2GRAY);

                                        // copy that to prev mat
                                        matOpFlowThis.copyTo (matOpFlowPrev);

                                        // get prev corners
                                        Imgproc.goodFeaturesToTrack (matOpFlowPrev, MOPcorners, iGFFTMax, 0.05, 20);
                                        mMOP2fptsPrev.fromArray (MOPcorners.toArray ());

                                        // get safe copy of this corners
                                        mMOP2fptsPrev.copyTo (mMOP2fptsSafe);
                                } else {
                                        // we've been through before so
                                        // this mat is valid. Copy it to prev mat
                                        matOpFlowThis.copyTo (matOpFlowPrev);

                                        // get this mat
                                        Imgproc.cvtColor (rgbaMat, matOpFlowThis, Imgproc.COLOR_RGBA2GRAY);

                                        // get the corners for this mat
                                        Imgproc.goodFeaturesToTrack (matOpFlowThis, MOPcorners, iGFFTMax, 0.05, 20);
                                        mMOP2fptsThis.fromArray (MOPcorners.toArray ());

                                        // retrieve the corners from the prev mat
                                        // (saves calculating them again)
                                        mMOP2fptsSafe.copyTo (mMOP2fptsPrev);

                                        // and save this corners for next time through

                                        mMOP2fptsThis.copyTo (mMOP2fptsSafe);
                                }

                                /*
            Parameters:
            prevImg first 8-bit input image
            nextImg second input image
            prevPts vector of 2D points for which the flow needs to be found; point coordinates must be single-precision floating-point numbers.
            nextPts output vector of 2D points (with single-precision floating-point coordinates) containing the calculated new positions of input features in the second image; when OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
            status output status vector (of unsigned chars); each element of the vector is set to 1 if the flow for the corresponding features has been found, otherwise, it is set to 0.
            err output vector of errors; each element of the vector is set to an error for the corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't found then the error is not defined (use the status parameter to find such cases).
            */
                                Video.calcOpticalFlowPyrLK (matOpFlowPrev, matOpFlowThis, mMOP2fptsPrev, mMOP2fptsThis, mMOBStatus, mMOFerr);

                                if (!mMOBStatus.empty ()) {
                                        List<Point> cornersPrev = mMOP2fptsPrev.toList ();
                                        List<Point> cornersThis = mMOP2fptsThis.toList ();
                                        List<byte> byteStatus = mMOBStatus.toList ();

                                        int x = 0;
                                        int y = byteStatus.Count - 1;

                                        for (x = 0; x < y; x++) {
                                                if (byteStatus [x] == 1) {
                                                        Point pt = cornersThis [x];
                                                        Point pt2 = cornersPrev [x];

                                                        Core.circle (rgbaMat, pt, 5, colorRed, iLineThickness - 1);

                                                        Core.line (rgbaMat, pt, pt2, colorRed, iLineThickness);
                                                }
                                        }
                                }

                                Utils.matToTexture2D (rgbaMat, texture, colors);

                                gameObject.GetComponent<Renderer> ().material.mainTexture = texture;

                        }

                }

                void OnDisable ()
                {
                        webCamTexture.Stop ();
                }

                void OnGUI ()
                {
                        float screenScale = Screen.width / 240.0f;
                        Matrix4x4 scaledMatrix = Matrix4x4.Scale (new Vector3 (screenScale, screenScale, screenScale));
                        GUI.matrix = scaledMatrix;

                        GUILayout.BeginVertical ();
                        if (GUILayout.Button ("back")) {
                                Application.LoadLevel ("OpenCVForUnitySample");
                        }
                        if (GUILayout.Button ("change camera")) {
                                isFrontFacing = !isFrontFacing;
                                StartCoroutine (init ());
                        }

                        GUILayout.EndVertical ();
                }
            }
        }
예제 #23
0
        public static FormExtractionResult ProcessImage(string filename, FormExtractionOptions options = null)
        {
            if (options == null)
            {
                // Assume recommanded parameters.
                options = new FormExtractionOptions();
            }

            var orig  = new Mat(filename);
            var image = new Mat(filename, ImreadModes.GrayScale);

            Cv2.AdaptiveThreshold(image, image, 255, AdaptiveThresholdTypes.MeanC, ThresholdTypes.Binary, 9, 4);

            // Resize image if too large.
            if (image.Width > options.ResizeWidth)
            {
                var height = options.ResizeWidth * image.Height / image.Width;
                Cv2.Resize(image, image, new Size(options.ResizeWidth, height));
            }

            Cv2.BitwiseNot(image, image);
            Cv2.Dilate(image, image, Cv2.GetStructuringElement(MorphShapes.Cross, new Size(2, 2)));

            MatOfByte         mat     = new MatOfByte(image);
            MatIndexer <byte> indexer = mat.GetIndexer();

            var row      = image.Height;
            var col      = image.Width;
            Mat newImage = new Mat(row, col, MatType.CV_8UC3);

            newImage.SetTo(Scalar.Black);

            // We must determine if it "may" be an interesting blob.
            Stopwatch watch = new Stopwatch();

            watch.Start();

            int[] imgData = new int[row * col];
            for (int y = 0; y < row; y++)
            {
                for (int x = 0; x < col; x++)
                {
                    imgData[y + x * row] = indexer[y, x];
                }
            }

            var result = HasBoxes(imgData, row, col, options);

            watch.Stop();
            result.Duration = watch.Elapsed;

            // Preview
            if (result.Boxes.Any() && image.Width != 0 && options.ShowDebugImage)
            {
                var img = CreateImage(result.DebugImg, hasColor: true);
                Cv2.BitwiseOr(newImage, img, newImage);

                Cv2.BitwiseNot(image, image);
                int width  = 400;
                var height = width * image.Height / image.Width;
                Cv2.Resize(orig, orig, new Size(width, height));
                Cv2.Resize(image, image, new Size(width, height));
                Cv2.Resize(newImage, newImage, new Size(width, height));

                using (new Window("orig", orig))
                    using (new Window("pre", image))
                        using (new Window("post", newImage))
                        {
                            Cv2.WaitKey();
                            Cv2.DestroyAllWindows();
                        }
            }

            // Dispose.
            orig.Dispose();
            image.Dispose();
            newImage.Dispose();
            mat.Dispose();

            return(result);
        }