public void cariX(IplImage imgSrc, ref int min, ref int max)
        {
            bool minTemu = false;

            data = new CvMat();

            CvScalar maxVal = cvlib.cvRealScalar(imgSrc.width * 255);
            CvScalar val = cvlib.cvRealScalar(0);

            //For each column sum, if sum <width * 255 then we find min
            //then proceed to the end of me to find max, if sum <width * 255 then found a new max
            for (int i = 0; i < imgSrc.width; i++)
            {
                cvlib.cvGetCol( imgSrc,  data, i); //col
                val = cvlib.cvSum( data);
                if (val.Val < maxVal.Val)
                {
                    max = i;
                    if (!minTemu)
                    {
                        min = i;
                        minTemu = true;
                    }
                }
            }
        }
        public FlannColoredModelPoints(List<Tuple<CvPoint3D64f, CvColor>> modelPoints, IndexParams indexParams, SearchParams searchParams, double colorScale)
        {
            _modelPoints = modelPoints;

            _modelMat = new CvMat(_modelPoints.Count, 6, MatrixType.F32C1);
            unsafe
            {
                float* modelArr = _modelMat.DataSingle;
                foreach (var tuple in _modelPoints)
                {
                    *(modelArr++) = (float)tuple.Item1.X;
                    *(modelArr++) = (float)tuple.Item1.Y;
                    *(modelArr++) = (float)tuple.Item1.Z;
                    *(modelArr++) = (float)(tuple.Item2.R * colorScale / 255);
                    *(modelArr++) = (float)(tuple.Item2.G * colorScale / 255);
                    *(modelArr++) = (float)(tuple.Item2.B * colorScale / 255);
                }
            }
            _colorScale = colorScale;
            _modelDataMat = new Mat(_modelMat);
            _indexParam = indexParams;
            _searchParam = searchParams;
            _indexParam.IsEnabledDispose = false;
            _searchParam.IsEnabledDispose = false;
            _flannIndex = new Index(_modelDataMat, _indexParam);
        }
        public void Read(XmlElement me)
        {
            foreach (XmlNode node in me.ChildNodes)
            {
                if (node is XmlElement)
                {

                    if (node.Name == "rotation")
                    {
                        CvMat mat;
                        OpenCVUtil.Read(out mat, node as XmlElement);
                        Rotation = mat;
                    }
                    else if (node.Name == "translation")
                    {
                        CvMat mat;
                        OpenCVUtil.Read(out mat, node as XmlElement);
                        Translation = mat;
                    }
                    else if (node.Name == "intrinsic")
                    {
                        CvMat mat;
                        OpenCVUtil.Read(out mat, node as XmlElement);
                        Intrinsic = mat;
                    }
                    else if (node.Name == "distortion")
                    {
                        CvMat mat;
                        OpenCVUtil.Read(out mat, node as XmlElement);
                        Distortion = mat;
                    }
                }
            }
        }
        public void cariY(IplImage imgSrc, ref int min, ref int max)
        {
            bool minFound = false;

            data = new CvMat();

            CvScalar maxVal = cvlib.cvRealScalar(imgSrc.width * 255);
            CvScalar val = cvlib.cvRealScalar(0);

            //For each row sum, if sum <width * 255 then we find min
            //then proceed to the end of me to find max, if sum <width * 255 then found a new max
            for (int i = 0; i < imgSrc.height; i++)
            {
                cvlib.cvGetRow( imgSrc,  data, i); //row
                val = cvlib.cvSum( data);
                if (val.val1 < maxVal.val1)
                {
                    max = i;
                    if (!minFound)
                    {
                        min = i;
                        minFound = true;
                    }
                }
            }
        }
 void CalcPoint(CvMat velx, CvMat vely, IplImage rez)
 {
     int sX = 0;
     int sY = 0;
     int coun = 0;
     for (int x = 0; x < imWidth; x += 10)
     {
         for (int y = 0; y < imHeight; y += 10)
         {
             int dx = (int)Cv.GetReal2D(velx, y, x);
             int dy = (int)Cv.GetReal2D(vely, y, x);
             if (dx > 15 || dy > 15)
             {
                 Cv.Line(rez, Cv.Point(x, y), Cv.Point(x + dx, y + dy), Cv.RGB(0, 0, 255), 1, Cv.AA, 0);
                 sX += x;
                 sY += y;
                 coun++;
             }
             if (dx < -15 || dy < -15)
             {
                 Cv.Line(rez, Cv.Point(x, y), Cv.Point(x + dx, y + dy), Cv.RGB(0, 255, 0), 1, Cv.AA, 0);
                 sX += x;
                 sY += y;
                 coun++;
             }
         }
     }
     if (coun > 10)
     {
         moveVec.Set(sX / coun, sY / coun, 0);
     }
 }
示例#6
0
		/// <summary>
        /// 学習データを与えて初期化
        /// </summary>
        /// <param name="trainData"></param>
		/// <param name="tflag"></param>
        /// <param name="responses"></param>
        /// <param name="varIdx"></param>
        /// <param name="sampleIdx"></param>
        /// <param name="varType"></param>
        /// <param name="missingMask"></param>
        /// <param name="param"></param>
		/// <param name="shared"></param>
        /// <param name="addLabels"></param>
        /// <returns></returns>
#else
		/// <summary>
        /// Training constructor
        /// </summary>
		/// <param name="trainData"></param>
		/// <param name="tflag"></param>
        /// <param name="responses"></param>
		/// <param name="varIdx"></param>
        /// <param name="sampleIdx"></param>
        /// <param name="varType"></param>
        /// <param name="missingMask"></param>
        /// <param name="param"></param>
		/// <param name="shared"></param>
		/// <param name="addLabels"></param>
        /// <returns></returns>
#endif
		public CvDTreeTrainData(
            CvMat trainData,
            DTreeDataLayout tflag, 
            CvMat responses, 
            CvMat varIdx = null, 
            CvMat sampleIdx = null, 
            CvMat varType = null, 
            CvMat missingMask = null,
			CvDTreeParams param = null, 
            bool shared = false, 
            bool addLabels = false)
		{
            if (trainData == null)
                throw new ArgumentNullException("trainData");
            if (responses == null)
                throw new ArgumentNullException("responses");
            trainData.ThrowIfDisposed();
            responses.ThrowIfDisposed();

            if(param == null)
				param = new CvDTreeParams();

            ptr = NativeMethods.ml_CvDTreeTrainData_new2(
				trainData.CvPtr, 
				(int)tflag, 
				responses.CvPtr, 
				Cv2.ToPtr(varIdx), 
                Cv2.ToPtr(sampleIdx),
                Cv2.ToPtr(varType), 
                Cv2.ToPtr(missingMask), 
				param.CvPtr, 
				shared ? 1 : 0, 
                addLabels ? 1 : 0
			);
		}
示例#7
0
        public static void MainLoop()
        {
            PROCESS.SetWindowPos(0, 0, 800, 600);
            Thread.Sleep(200);
            foreach (var image in ImageList)
            {
                CvMat screen = Utils.TakeScreenshot().ToMat().ToCvMat();
                Screenshot = new CvMat(screen.Rows, screen.Cols, MatrixType.U8C1);
                screen.CvtColor(Screenshot, ColorConversion.BgraToGray);

                Result =
                    Cv.CreateImage(Cv.Size(Screenshot.Width - image.Width + 1, Screenshot.Height - image.Height + 1),
                        BitDepth.F32, 1);
                Cv.MatchTemplate(Screenshot, image, Result, MatchTemplateMethod.CCoeffNormed);
                /*Screenshot.SaveImage("data/screenshot.png");
                image.SaveImage("data/image.png");*/
                Cv.Normalize(Result, Result, 0, 1, NormType.MinMax);
                Cv.MinMaxLoc(Result, out MinAcc, out MaxAcc, out MinPos, out MaxPos, null);
                Console.WriteLine(MaxAcc);
                if (MaxAcc >= 0.75)
                {
                    Position = new Point(MaxPos.X, MaxPos.Y);
                    Utils.MoveMouse(Position);
                    Thread.Sleep(15);
                    Utils.LeftClick();
                    Thread.Sleep(100);
                    MaxAcc = 0;
                }
                Result.Dispose();
            }
        }
 public static byte[] GetBytesFromData(CvMat mat)
 {
     int byteLength = mat.ElemDepth * mat.Cols * mat.Rows * mat.ElemChannels / 8;
     byte[] ret = new byte[byteLength];
     Marshal.Copy(mat.Data, ret, 0, byteLength);
     return ret;
 }
示例#9
0
        /// <summary>
        /// 初期化
        /// </summary>
        /// <param name="trainData"></param>
		/// <param name="responses"></param>
        /// <param name="varIdx"></param>
        /// <param name="sampleIdx"></param>
		/// <param name="param"></param>
#else
		/// <summary>
        /// Training constructor
        /// </summary>
		/// <param name="trainData"></param>
		/// <param name="responses"></param>
		/// <param name="varIdx"></param>
		/// <param name="sampleIdx"></param>
		/// <param name="param"></param>
#endif
		public CvSVM(
            CvMat trainData, 
            CvMat responses, 
            CvMat varIdx = null, 
            CvMat sampleIdx = null, 
            CvSVMParams param = null)
	    {
            if (trainData == null)
                throw new ArgumentNullException("trainData");
            if (responses == null)
                throw new ArgumentNullException("responses");

			if(param == null)
				param = new CvSVMParams();
            trainData.ThrowIfDisposed();
            responses.ThrowIfDisposed();

            ptr = NativeMethods.ml_CvSVM_new2_CvMat(
				trainData.CvPtr, 
				responses.CvPtr, 
				Cv2.ToPtr(varIdx), 
				Cv2.ToPtr(sampleIdx), 
				param.NativeStruct
			);
        }
示例#10
0
        public static void Read(out CvMat matrix, XmlElement element)
        {
            int cols = int.Parse(element.GetAttribute("cols"));
            int rows = int.Parse(element.GetAttribute("rows"));
            MatrixType matType = (MatrixType)Enum.Parse(typeof(MatrixType), element.GetAttribute("type"));
            string terms = element.GetAttribute("values");
            List<double> values = new List<double>();
            string[] words = terms.Split(',');
            foreach (string w in words)
                values.Add(double.Parse(w.Trim()));

            switch (matType)
            {
                case MatrixType.F32C1:
                    break;
                case MatrixType.F64C1:
                    break;
                default:
                    throw new Exception("Read unsupported MatrixType " + matType.ToString());
            }
            matrix = new CvMat(rows, cols, matType);

            // Fill the matrix popping values off
            for (int x = 0; x < cols; ++x)
            {
                for (int y = 0; y < rows; ++y)
                {
                    matrix.Set2D(x, y, new CvScalar(values[0]));
                    values.RemoveAt(0);
                }
            }
        }
 public SerializableCvMat(CvMat mat)
 {
     this.Cols = mat.Cols;
     this.Rows = mat.Rows;
     this.ElemType = mat.ElemType;
     this.Array = mat.ToArray();
 }
        static public Rotation RotationMatrixToEulerZXY(CvMat R)
        {
            var i = 2;
            var j = 0; // EULER_NEXT[2]
            var k = 1; // EULER_NEXT[3]

            var cos_beta = Math.Sqrt(Math.Pow(R[i, i], 2) + Math.Pow(R[j, i], 2));

            double alpha, beta, gamma;
            if (cos_beta > EULER_EPSILON)
            {
                alpha = Math.Atan2(R[k, j], R[k, k]);
                beta = Math.Atan2(-R[k, i], cos_beta);
                gamma = Math.Atan2(R[j, i], R[i, i]);
            }
            else
            {
                alpha = Math.Atan2(-R[j, k], R[j, j]);
                beta = Math.Atan2(-R[k, i], cos_beta);
                gamma = 0.0;
            }

            alpha = wrap_angles(alpha, 0.0, 2.0 * Math.PI); // Z
            beta = wrap_angles(beta, 0.0, 2.0 * Math.PI); // X
            gamma = wrap_angles(gamma, 0.0, 2.0 * Math.PI); // Y

            // errr... 180 - Z result seems right. Why?
            return new Rotation(RadianToDegree(beta), RadianToDegree(gamma), 180.0 - RadianToDegree(alpha));
        }
        private static void applyLinearFilter()
        {
            using (var src = new IplImage(@"..\..\Images\Penguin.Png", LoadMode.AnyDepth | LoadMode.AnyColor))
            using (var dst = new IplImage(src.Size, src.Depth, src.NChannels))
            {
                float[] data = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
                var kernel = new CvMat(rows: 1, cols: 21, type: MatrixType.F32C1, elements: data);

                Cv.Normalize(src: kernel, dst: kernel, a: 1.0, b: 0, normType: NormType.L1);

                double sum = 0;
                foreach (var item in data)
                {
                    sum += Math.Abs(item);
                }
                Console.WriteLine(sum); // => .999999970197678

                Cv.Filter2D(src, dst, kernel, anchor: new CvPoint(0, 0));

                using (new CvWindow("src", image: src))
                using (new CvWindow("dst", image: dst))
                {
                    Cv.WaitKey(0);
                }
            }
        }
示例#14
0
        public Solve()
        {
            //  x +  y +  z = 6
            // 2x - 3y + 4z = 8
            // 4x + 4y - 4z = 0

            double[] A = new double[]{
                1, 1, 1,
                2, -3, 4,
                4, 4, -4
            };
            double[] B = new double[]{
                6,
                8,
                0
            };

            CvMat matA = new CvMat(3, 3, MatrixType.F64C1, A);
            CvMat matB = new CvMat(3, 1, MatrixType.F64C1, B);

            // X = inv(A) * B
            CvMat matAInv = matA.Clone();
            matA.Inv(matAInv);

            CvMat matX = matAInv * matB;

            Console.WriteLine("X = {0}", matX[0].Val0);
            Console.WriteLine("Y = {0}", matX[1].Val0);
            Console.WriteLine("Z = {0}", matX[2].Val0);
            Console.Read();
        }
	void CalcPoint(CvMat velx, CvMat vely, IplImage rez)
	{
		int sX = 0;
		int sY = 0;
		int coun = 0;
		for (int x = 0; x < cols; x += 10) {
			for (int y = 0; y < rows; y += 10) {
				int dx = (int)Cv.GetReal2D (velx, y, x);
				int dy = (int)Cv.GetReal2D (vely, y, x);
				if(Mathf.Abs(dx)>8 && Mathf.Abs(dy)>8 && Mathf.Abs(dx)<75 && Mathf.Abs(dy)<75)
				{
					Cv.Line (rez, Cv.Point (x, y), Cv.Point (x + dx, y + dy), Cv.RGB (0, 0, 255), 1, Cv.AA, 0);
					sX += x;
					sY += y;
					coun++;
				}	


                if(y == 0 && x == cols/8*3 || y == 0 && x == cols/8*5 )
                {
                    Debug.Log("wewe");
                    Cv.Line(rez, Cv.Point(x, 0), Cv.Point(x, rows), Cv.RGB(255, 0, 0), 3, Cv.AA, 0);
                }

                if (x == 0 && y == rows / 3 || x == 0 && y == rows / 3 * 2)
                {
                    Cv.Line(rez, Cv.Point(0, y), Cv.Point(cols, y), Cv.RGB(255, 0, 0), 3, Cv.AA, 0);
                }
			}
		}
		if (coun >15) {
            Cv.Circle(rez, Cv.Point(sX / coun, sY / coun),30, Cv.RGB(255, 255, 0),5);
			moveVec.Set (sX / coun, sY / coun, 0);
		}
	}
        public void cariX(IplImage imgSrc, ref int min, ref int max)
        {
            bool minTemu = false;

            data = new CvMat();

            CvScalar maxVal = cxtypes.cvRealScalar(imgSrc.width * 255);
            CvScalar val = cxtypes.cvRealScalar(0);

            //utk setiap kolom sum, jika sum < width*255 maka kita temukan min
            //kemudian lanjutkan hingga akhir utk menemukan max, jika sum < width*255 maka ditemukan max baru
            for (int i = 0; i < imgSrc.width; i++)
            {
                cxcore.CvGetCol(ref imgSrc, ref data, i); //col
                val = cxcore.CvSum(ref data);
                if (val.val1 < maxVal.val1)
                {
                    max = i;
                    if (!minTemu)
                    {
                        min = i;
                        minTemu = true;
                    }
                }
            }
        }
示例#17
0
 public CvMatProxy(CvMat mat)
 {
     Data = mat.ToRectangularArray();
     Rows = mat.Rows;
     Cols = mat.Cols;
     ElemChannels = mat.ElemChannels;
 }
示例#18
0
        public Filter2D()
        {
            // cvFilter2D
            // ユーザが定義したカーネルによるフィルタリング

            // (1)画像の読み込み
            using (IplImage srcImg = new IplImage(Const.ImageFruits, LoadMode.AnyDepth | LoadMode.AnyColor))
            using (IplImage dstImg = new IplImage(srcImg.Size, srcImg.Depth, srcImg.NChannels))
            {
                // (2)カーネルの正規化と,フィルタ処理
                float[] data = {    2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
                                    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
                };
                CvMat kernel = new CvMat(1, 21, MatrixType.F32C1, data);

                Cv.Normalize(kernel, kernel, 1.0, 0, NormType.L1);
                Cv.Filter2D(srcImg, dstImg, kernel, new CvPoint(0, 0));

                // (3)結果を表示する
                using (CvWindow window = new CvWindow("Filter2D", dstImg))
                {
                    Cv.WaitKey(0);
                }
            }

        }
示例#19
0
        /// <summary>
        /// 学習データを与えて初期化
        /// </summary>
        /// <param name="trainData"></param>
		/// <param name="tflag"></param>
        /// <param name="responses"></param>
        /// <param name="varIdx"></param>
        /// <param name="sampleIdx"></param>
        /// <param name="varType"></param>
        /// <param name="missingMask"></param>
        /// <param name="param"></param>
#else
		/// <summary>
        /// Training constructor
        /// </summary>
        /// <param name="trainData"></param>
		/// <param name="tflag"></param>
        /// <param name="responses"></param>
		/// <param name="varIdx"></param>
        /// <param name="sampleIdx"></param>
        /// <param name="varType"></param>
        /// <param name="missingMask"></param>
        /// <param name="param"></param>
#endif
		public CvBoost(
            CvMat trainData, 
            DTreeDataLayout tflag, 
            CvMat responses, 
			CvMat varIdx = null, 
            CvMat sampleIdx = null, 
            CvMat varType = null, 
            CvMat missingMask = null, 
            CvBoostParams param = null)
		{    
			if (trainData == null)
                throw new ArgumentNullException("trainData");
            if (responses == null)
                throw new ArgumentNullException("responses");
            trainData.ThrowIfDisposed();
            responses.ThrowIfDisposed();

            if (param == null)
                param = new CvBoostParams();

			ptr = NativeMethods.ml_CvBoost_new_CvMat(
				trainData.CvPtr, 
				(int)tflag,
				responses.CvPtr,
				Cv2.ToPtr(varIdx), 
                Cv2.ToPtr(sampleIdx), 
                Cv2.ToPtr(varType), 
                Cv2.ToPtr(missingMask),
				param.CvPtr
			);
		}
 private static CvMat CreatePointCountMatrix(int numPoints)
 {
     int[] pointCountsValue = new int[_numImages];
     pointCountsValue[0] = numPoints;
     CvMat pointCounts = new CvMat(_numImages, 1, MatrixType.S32C1, pointCountsValue);
     return pointCounts;
 }
 private void ApplyCalibrationToUnityCamera(CvMat intrinsic, CvMat rotation, CvMat translation)
 {
     CvMat rotationInverse = GetRotationMatrixFromRotationVector(rotation).Transpose(); // transpose is same as inverse for rotation matrix
     CvMat transFinal = (rotationInverse * -1) * translation.Transpose();
     _mainCamera.projectionMatrix = LoadProjectionMatrix((float)intrinsic[0, 0], (float)intrinsic[1, 1], (float)intrinsic[0, 2], (float)intrinsic[1, 2]);
     ApplyTranslationAndRotationToCamera(transFinal, RotationConversion.RotationMatrixToEulerZXY(rotationInverse));
 }
    // Convert the image to HSV values
    CvMat ConvertToHSV(CvMat img)
    {
        CvMat imgHSV = img.EmptyClone();  // Assign destination matrix of same size and type

        Cv.CvtColor(img, imgHSV, ColorConversion.BgrToHsv);

        return(imgHSV);
    }
示例#23
0
    private static CvMat CreatePointCountMatrix(int numPoints)
    {
        int[] pointCountsValue = new int[_numImages];
        pointCountsValue[0] = numPoints;
        CvMat pointCounts = new CvMat(_numImages, 1, MatrixType.S32C1, pointCountsValue);

        return(pointCounts);
    }
示例#24
0
    private void ApplyCalibrationToUnityCamera(CvMat intrinsic, CvMat rotation, CvMat translation)
    {
        CvMat rotationInverse = GetRotationMatrixFromRotationVector(rotation).Transpose(); // transpose is same as inverse for rotation matrix
        CvMat transFinal      = (rotationInverse * -1) * translation.Transpose();

        _mainCamera.projectionMatrix = LoadProjectionMatrix((float)intrinsic[0, 0], (float)intrinsic[1, 1], (float)intrinsic[0, 2], (float)intrinsic[1, 2]);
        ApplyTranslationAndRotationToCamera(transFinal, RotationConversion.RotationMatrixToEulerZXY(rotationInverse));
    }
        public static byte[] GetBytesFromData(CvMat mat)
        {
            int byteLength = mat.ElemDepth * mat.Cols * mat.Rows * mat.ElemChannels / 8;

            byte[] ret = new byte[byteLength];
            Marshal.Copy(mat.Data, ret, 0, byteLength);
            return(ret);
        }
 public static SerializableCvMat CreateOrNull(CvMat mat)
 {
     if (mat == null)
     {
         return(null);
     }
     return(new SerializableCvMat(mat));
 }
 /// <summary>
 /// 次元を指定するコンストラクタ
 /// </summary>
 /// <param name="dimension"></param>
 public MemorySavingLeastSquare(int dimension)
 {
     _dimension = dimension;
     _left = CvEx.InitCvMat(dimension, dimension, MatrixType.F64C1);
     _right = CvEx.InitCvMat(dimension, 1, MatrixType.F64C1);
     _left.Zero();
     _right.Zero();
 }
示例#28
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="mat"></param>
        /// <param name="row"></param>
        /// <param name="col"></param>
        /// <param name="pixSize"></param>
        /// <returns></returns>
#else
        /// <summary>
        ///
        /// </summary>
        /// <param name="mat"></param>
        /// <param name="row"></param>
        /// <param name="col"></param>
        /// <param name="pixSize"></param>
        /// <returns></returns>
#endif
        public static unsafe byte *MAT_ELEM_PTR_FAST(CvMat mat, int row, int col, int pixSize)
        {
            /*if ((uint)row < (uint)mat.Rows && (uint)col < (uint)mat.Cols)
             * {
             *  throw new ArgumentException();
             * }*/
            return(mat.DataByte + (uint)(mat.Step * row) + (pixSize * col));
        }
示例#29
0
        static void showrite(String s, CvMat image)
        {
            CvWindow window = new CvWindow(showiteCounter.ToString() + s);
            window.ShowImage(image);
            image.SaveImage(showiteCounter.ToString() + s + ".png");

            showiteCounter++;
        }
 /// <summary>
 /// 次元を指定するコンストラクタ
 /// </summary>
 /// <param name="dimension"></param>
 public MemorySavingLeastSquare(int dimension)
 {
     _dimension = dimension;
     _left      = CvEx.InitCvMat(dimension, dimension, MatrixType.F64C1);
     _right     = CvEx.InitCvMat(dimension, 1, MatrixType.F64C1);
     _left.Zero();
     _right.Zero();
 }
示例#31
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="data"></param>
        /// <param name="missing"></param>
        /// <param name="responses"></param>
        /// <param name="pWeight"></param>
        /// <returns></returns>
        private CvDTree MushroomCreateDTree(CvMat data, CvMat missing, CvMat responses, float pWeight)
        {
            float[] priors = { 1, pWeight };

            CvMat varType = new CvMat(data.Cols + 1, 1, MatrixType.U8C1);

            Cv.Set(varType, CvScalar.ScalarAll(CvStatModel.CV_VAR_CATEGORICAL)); // all the variables are categorical

            CvDTree dtree = new CvDTree();

            CvDTreeParams p = new CvDTreeParams(8,     // max depth
                                                10,    // min sample count
                                                0,     // regression accuracy: N/A here
                                                true,  // compute surrogate split, as we have missing data
                                                15,    // max number of categories (use sub-optimal algorithm for larger numbers)
                                                10,    // the number of cross-validation folds
                                                true,  // use 1SE rule => smaller tree
                                                true,  // throw away the pruned tree branches
                                                priors // the array of priors, the bigger p_weight, the more attention
                                                       // to the poisonous mushrooms
                                                       // (a mushroom will be judjed to be poisonous with bigger chance)
                                                );

            dtree.Train(data, DTreeDataLayout.RowSample, responses, null, null, varType, missing, p);

            // compute hit-rate on the training database, demonstrates predict usage.
            int hr1 = 0, hr2 = 0, pTotal = 0;

            for (int i = 0; i < data.Rows; i++)
            {
                CvMat sample, mask;
                Cv.GetRow(data, out sample, i);
                Cv.GetRow(missing, out mask, i);
                double r = dtree.Predict(sample, mask).Value;
                bool   d = Math.Abs(r - responses.DataArraySingle[i]) >= float.Epsilon;
                if (d)
                {
                    if (r != 'p')
                    {
                        hr1++;
                    }
                    else
                    {
                        hr2++;
                    }
                }
                //Console.WriteLine(responses.DataArraySingle[i]);
                pTotal += (responses.DataArraySingle[i] == (float)'p') ? 1 : 0;
            }

            Console.WriteLine("Results on the training database");
            Console.WriteLine("\tPoisonous mushrooms mis-predicted: {0} ({1}%)", hr1, (double)hr1 * 100 / pTotal);
            Console.WriteLine("\tFalse-alarms: {0} ({1}%)", hr2, (double)hr2 * 100 / (data.Rows - pTotal));

            varType.Dispose();

            return(dtree);
        }
示例#32
0
        /// <summary>
        /// 原点(直流成分)が画像の中心にくるように,画像の象限を入れ替える関数.
        /// srcArr, dstArr は同じサイズ,タイプの配列.
        /// </summary>
        /// <param name="srcArr"></param>
        /// <param name="dstArr"></param>
        private static void ShiftDFT(CvArr srcArr, CvArr dstArr)
        {
            CvSize size    = Cv.GetSize(srcArr);
            CvSize dstSize = Cv.GetSize(dstArr);

            if (dstSize.Width != size.Width || dstSize.Height != size.Height)
            {
                throw new ApplicationException("Source and Destination arrays must have equal sizes");
            }
            // (9)インプレースモード用のテンポラリバッファ
            CvMat tmp = null;

            if (srcArr == dstArr)
            {
                tmp = Cv.CreateMat(size.Height / 2, size.Width / 2, Cv.GetElemType(srcArr));
            }
            int cx = size.Width / 2;   /* 画像中心 */
            int cy = size.Height / 2;

            // (10)1〜4象限を表す配列と,そのコピー先
            CvMat q1stub, q2stub;
            CvMat q3stub, q4stub;
            CvMat d1stub, d2stub;
            CvMat d3stub, d4stub;
            CvMat q1 = Cv.GetSubRect(srcArr, out q1stub, new CvRect(0, 0, cx, cy));
            CvMat q2 = Cv.GetSubRect(srcArr, out q2stub, new CvRect(cx, 0, cx, cy));
            CvMat q3 = Cv.GetSubRect(srcArr, out q3stub, new CvRect(cx, cy, cx, cy));
            CvMat q4 = Cv.GetSubRect(srcArr, out q4stub, new CvRect(0, cy, cx, cy));
            CvMat d1 = Cv.GetSubRect(srcArr, out d1stub, new CvRect(0, 0, cx, cy));
            CvMat d2 = Cv.GetSubRect(srcArr, out d2stub, new CvRect(cx, 0, cx, cy));
            CvMat d3 = Cv.GetSubRect(srcArr, out d3stub, new CvRect(cx, cy, cx, cy));
            CvMat d4 = Cv.GetSubRect(srcArr, out d4stub, new CvRect(0, cy, cx, cy));

            if (srcArr != dstArr)
            {
                if (!Cv.ARE_TYPES_EQ(q1, d1))
                {
                    throw new ApplicationException("Source and Destination arrays must have the same format");
                }
                Cv.Copy(q3, d1, null);
                Cv.Copy(q4, d2, null);
                Cv.Copy(q1, d3, null);
                Cv.Copy(q2, d4, null);
            }
            else
            {
                Cv.Copy(q3, tmp, null);
                Cv.Copy(q1, q3, null);
                Cv.Copy(tmp, q1, null);
                Cv.Copy(q4, tmp, null);
                Cv.Copy(q2, q4, null);
                Cv.Copy(tmp, q2, null);
            }
            if (tmp != null)
            {
                tmp.Dispose();
            }
        }
示例#33
0
        // 数字認識
        internal static int recognizeDigit(CvMat image)
        {
            int nonzero = 0;

            nonzero = image.GetCols( image.Cols-2, image.Cols ).CountNonZero();
            if ( image.Rows * 2 == nonzero )
                // 1 右端2列がすべて輝点
                return 1;

            nonzero = image.GetRows( image.Rows-2, image.Rows ).CountNonZero();
            if ( image.Cols * 2 == nonzero )
                // 2 下端2行がすべて輝点
                return 2;

            nonzero = image.GetRows ( 0, 2 ).CountNonZero();
            if ( image.Cols * 2 - 2 < nonzero )
                // 7 上端2行がすべて輝点.ただし1ピクセルまで欠けても良い
                return 7;

            nonzero = image.GetCols ( image.Cols-3, image.Cols-1 ).CountNonZero();
            if ( image.Rows * 2 == nonzero )
                // 4 右端の左2列がすべて輝点
                return 4;

            CvRect rect = new CvRect( 0, 0, 1, image.Rows*2/3 );
            CvMat subarr;
            nonzero = image.GetSubArr ( out subarr, rect ).CountNonZero();
            if ( 0 == nonzero )
                // 3 左端の上部3分の2がすべて暗点
                return 3;

            rect = new CvRect ( 0, image.Rows/2, 3, 2 );
            nonzero = image.GetSubArr ( out subarr, rect ).CountNonZero();
            if ( 0 == nonzero )
                // 5 左端の下半分開始すぐのwidth3 height2 がすべて暗点
                return 5;

            rect = new CvRect ( image.Cols/2, image.Rows/2-1, 1, 3 );
            nonzero = image.GetSubArr( out subarr, rect ).CountNonZero();
            if ( 0 == nonzero )
                // 0 中央列中央3ピクセルがすべて暗点
                return 0;

            rect = new CvRect ( image.Cols-1, 0, 1, image.Rows*2/5 );
            nonzero = image.GetSubArr( out subarr, rect ).CountNonZero();
            if ( 0 == nonzero )
                // 6 右端上部5分の2がすべて暗点
                return 6;

            rect = new CvRect ( image.Cols-1, image.Rows-3, 1, 3 );
            nonzero = image.GetSubArr( out subarr, rect ).CountNonZero();
            if ( 0 == nonzero )
                // 右端下部3ピクセルがすべて暗点
                return 9;

            // 8 上記条件を満たさない
            return 8;
        }
示例#34
0
        public PixelSampling()
        {
            // 並進移動のためのピクセルサンプリング cvGetRectSubPix

            // (1) 画像の読み込み,出力用画像領域の確保を行なう
            using (IplImage srcImg = new IplImage(Const.ImageLenna, LoadMode.AnyDepth | LoadMode.AnyColor))
                using (IplImage dstImg = srcImg.Clone())
                {
                    // (2)dst_imgの画像中心になるsrc_img中の位置centerを指定する
                    CvPoint2D32f center = new CvPoint2D32f
                    {
                        X = srcImg.Width - 1,
                        Y = srcImg.Height - 1
                    };
                    // (3)centerが画像中心になるように,GetRectSubPixを用いて画像全体をシフトさせる
                    Cv.GetRectSubPix(srcImg, dstImg, center);
                    // (4)結果を表示する
                    using (CvWindow wSrc = new CvWindow("src"))
                        using (CvWindow wDst = new CvWindow("dst"))
                        {
                            wSrc.Image = srcImg;
                            wDst.Image = dstImg;
                            Cv.WaitKey(0);
                        }
                }


            // 回転移動のためのピクセルサンプリング cvGetQuadrangleSubPix

            const int angle = 45;

            // (1)画像の読み込み,出力用画像領域の確保を行なう
            using (IplImage srcImg = new IplImage(Const.ImageLenna, LoadMode.AnyDepth | LoadMode.AnyColor))
                using (IplImage dstImg = srcImg.Clone())
                {
                    // (2)回転のための行列(アフィン行列)要素を設定し,CvMat行列Mを初期化する
                    float[] m = new float[6];
                    m[0] = (float)(Math.Cos(angle * Cv.PI / 180.0));
                    m[1] = (float)(-Math.Sin(angle * Cv.PI / 180.0));
                    m[2] = srcImg.Width * 0.5f;
                    m[3] = -m[1];
                    m[4] = m[0];
                    m[5] = srcImg.Height * 0.5f;
                    using (CvMat mat = new CvMat(2, 3, MatrixType.F32C1, m))
                    {
                        // (3)指定された回転行列により,GetQuadrangleSubPixを用いて画像全体を回転させる
                        Cv.GetQuadrangleSubPix(srcImg, dstImg, mat);
                        // (4)結果を表示する
                        using (CvWindow wSrc = new CvWindow("src"))
                            using (CvWindow wDst = new CvWindow("dst"))
                            {
                                wSrc.Image = srcImg;
                                wDst.Image = dstImg;
                                Cv.WaitKey(0);
                            }
                    }
                }
        }
示例#35
0
        /// <summary>
        /// 初期化
        /// </summary>
        /// <param name="svmType">SVMの種類</param>
        /// <param name="kernelType">SVMカーネルの種類</param>
        /// <param name="degree">poly 用</param>
        /// <param name="gamma">poly/rbf/sigmoid 用</param>
        /// <param name="coef0">poly/sigmoid 用</param>
        /// <param name="c">SVMType.CSvc, SVMType.EpsSvr, SVMType.NuSvr 用</param>
        /// <param name="nu">SVMType.NuSvc, SVMType.OneClass, SVMType.NuSvr 用</param>
        /// <param name="p">SVMType.EpsSvr 用</param>
        /// <param name="classWeights">SVMType.CSvc 用</param>
        /// <param name="termCrit">終了条件</param>
#else
        /// <summary>
        /// Constructor
        /// </summary>
        /// <param name="svmType">Type of SVM</param>
        /// <param name="kernelType">The kernel type</param>
        /// <param name="degree">for poly</param>
        /// <param name="gamma">for poly/rbf/sigmoid</param>
        /// <param name="coef0">for poly/sigmoid</param>
        /// <param name="c">for SVMType.CSvc, SVMType.EpsSvr and SVMType.NuSvr</param>
        /// <param name="nu">for SVMType.NuSvc, SVMType.OneClass and SVMType.NuSvr</param>
        /// <param name="p">for SVMType.EpsSvr</param>
        /// <param name="classWeights">for SVMType.CSvc</param>
        /// <param name="termCrit">Termination criteria</param>
#endif
        public CvSVMParams(SVMType svmType, SVMKernelType kernelType, double degree,
                           double gamma, double coef0, double c, double nu, double p,
                           CvMat classWeights, CvTermCriteria termCrit)
        {
            data = new WCvSVMParams();
            NativeMethods.ml_CvSVMParams_new2(
                ref data, (int)svmType, (int)kernelType, degree, gamma, coef0,
                c, nu, p, Cv2.ToPtr(classWeights), termCrit);
        }
        /// <summary>
        /// 最小二乗法で解を得ます
        /// </summary>
        /// <returns></returns>
        public double[] Solve()
        {
            CvMat invLeft = CvEx.InitCvMat(_left);

            _left.Invert(invLeft, InvertMethod.Cholesky);
            CvMat ret = invLeft * _right;

            return(ret.Select(r => r.Val0).ToArray());
        }
示例#37
0
        /// <summary>
        /// サンプルに対する応答を予測する
        /// </summary>
        /// <param name="sample"></param>
        /// <param name="returnDfVal"></param>
        /// <returns></returns>
#else
        /// <summary>
        /// Predicts the response for sample
        /// </summary>
        /// <param name="sample"></param>
        /// <param name="returnDfVal"></param>
        /// <returns></returns>
#endif
        public virtual float Predict(CvMat sample, bool returnDfVal = false)
        {
            if (sample == null)
            {
                throw new ArgumentNullException(nameof(sample));
            }
            sample.ThrowIfDisposed();
            return(NativeMethods.ml_CvSVM_predict_CvMat1(ptr, sample.CvPtr, returnDfVal ? 1 : 0));
        }
示例#38
0
        static int WhatCard(bool black)
        {
            List <CvMat> templates = new List <CvMat>();
            var          image = new CvMat(path + @"bin/Debug/temp.png");
            double       maximum = 0, maxVal, minVal;
            CvPoint      maxLoc, minLoc;
            int          num = 0;
            CvMat        template;
            IplImage     result;

            if (black)
            {
                templates.AddRange(new CvMat[] { new CvMat(path + "B2.png"),
                                                 new CvMat(path + "B3.png"),
                                                 new CvMat(path + "B4.png"),
                                                 new CvMat(path + "B5.png"),
                                                 new CvMat(path + "B6.png"),
                                                 new CvMat(path + "B7.png"),
                                                 new CvMat(path + "B8.png"),
                                                 new CvMat(path + "B9.png"),
                                                 new CvMat(path + "B10.png"),
                                                 new CvMat(path + "BJack.png"),
                                                 new CvMat(path + "BQueen.png"),
                                                 new CvMat(path + "BKing.png"),
                                                 new CvMat(path + "BAce.png"), });
            }
            else
            {
                templates.AddRange(new CvMat[] { new CvMat(path + "R2.png"),
                                                 new CvMat(path + "R3.png"),
                                                 new CvMat(path + "R4.png"),
                                                 new CvMat(path + "R5.png"),
                                                 new CvMat(path + "R6.png"),
                                                 new CvMat(path + "R7.png"),
                                                 new CvMat(path + "R8.png"),
                                                 new CvMat(path + "R9.png"),
                                                 new CvMat(path + "R10.png"),
                                                 new CvMat(path + "RJack.png"),
                                                 new CvMat(path + "RQueen.png"),
                                                 new CvMat(path + "RKing.png"),
                                                 new CvMat(path + "RAce.png"), });
            }

            for (int i = 0; i < 13; i++)
            {
                template = templates[i];
                result   = new IplImage(new CvSize(image.Cols - template.Cols + 1, image.Rows - template.Rows + 1), BitDepth.F32, 1);
                Cv.MatchTemplate(image, template, result, MatchTemplateMethod.CCoeff);
                Cv.MinMaxLoc(result, out minVal, out maxVal, out minLoc, out maxLoc);
                if (maxVal > maximum)
                {
                    maximum = maxVal;
                    num     = i;
                }
            }
            return(num);
        }
示例#39
0
        CvMat getDepthUndistortMatImage(out float maxValue, out float minValue, CvMat src, float?center)
        {
            CvMat         ret    = CvEx.InitCvMat(src, MatrixType.U8C1);
            List <double> values = new List <double>();

            for (int y = 0; y < src.Rows; y++)
            {
                for (int x = 0; x < src.Cols; x++)
                {
                    if (!_undistortion.CameraStruct.IsInFocalLength(x, y))
                    {
                        continue;
                    }
                    int   i     = y * src.Cols + x;
                    float value = src.DataArraySingle[i];
                    // if (value < 2)
                    //     continue;
                    values.Add(value);
                }
            }
            float max = 0;
            float min = float.MaxValue;

            if (values.Count >= 1)
            {
                max = (float)CalcEx.GetNth(values, (int)(values.Count * 0.99));
                min = (float)CalcEx.GetNth(values, (int)(values.Count * 0.01));
            }
            max = (max - 1) * 1.5f + 1;
            min = (min - 1) * 1.5f + 1;
            if (center.HasValue)
            {
                float maxRange = Math.Max(max - center.Value, center.Value - min);
                max = center.Value + maxRange;
                min = center.Value - maxRange;
            }

            //max = 1.05f;
            //min = 0.95f;


            maxValue = max;
            minValue = min;
            if (max == min)
            {
                max += 0.5f;
                min -= 0.5f;
            }

            for (int i = 0; i < src.Rows * src.Cols; i++)
            {
                float value  = src.DataArraySingle[i];
                float output = 255 * (value - min) / (max - min);
                ret.DataArrayByte[i] = (byte)output;
            }
            return(ret);
        }
示例#40
0
文件: Blob.cs 项目: 0sv/opencvsharp
 private void GetEnclosingCircle(
     IEnumerable<CvPoint> points, out CvPoint2D32f center, out float radius)
 {
     var pointsArray = points.ToArray();
     using (var pointsMat = new CvMat(pointsArray.Length, 1, MatrixType.S32C2, pointsArray))
     {
         Cv.MinEnclosingCircle(pointsMat, out center, out radius);
     }
 }
示例#41
0
        /// <summary>
        /// 特徴ベクトルのツリーを作成する
        /// </summary>
        /// <param name="desc">d 次元特徴ベクトルの n × d 行列 (CV_32FC1 or CV_64FC1).</param>
#else
        /// <summary>
        /// Constructs a tree of feature vectors
        /// </summary>
        /// <param name="desc">n x d matrix of n d-dimensional feature vectors (CV_32FC1 or CV_64FC1). </param>
#endif
        public CvFeatureTree(CvMat desc)
        {
            if (desc == null)
                throw new ArgumentNullException("desc");
            
            ptr = NativeMethods.cvCreateFeatureTree(desc.CvPtr);
            if (ptr == IntPtr.Zero)
                throw new OpenCvSharpException("Failed to create CvFeatureTree");
        }
示例#42
0
        public static void EdgeEnhancement(IplImage gray, ref IplImage enhancedImage)
        {
            float[] data = { -1, -1, -1, -1, -1, -1, 2, 2, 2, -1,
                             -1,  2,  8,  2, -1, -1, 2, 2, 2, -1,-1, -1, -1, -1, -1 };
            CvMat   kernel = new CvMat(5, 5, MatrixType.U8C1, data);

            Cv.Normalize(kernel, kernel, 8, 0, NormType.L1);
            Cv.Filter2D(gray, enhancedImage, kernel);
        }
示例#43
0
        /// <summary>
        ///
        /// </summary>
        /// <typeparam name="T"></typeparam>
        /// <param name="mat"></param>
        /// <param name="row"></param>
        /// <param name="col"></param>
        /// <returns></returns>
#else
        /// <summary>
        ///
        /// </summary>
        /// <typeparam name="T"></typeparam>
        /// <param name="mat"></param>
        /// <param name="row"></param>
        /// <param name="col"></param>
        /// <returns></returns>
#endif
        public static T CV_MAT_ELEM <T>(CvMat mat, int row, int col) where T : struct
        {
            unsafe
            {
                Type  type   = typeof(T);
                byte *result = MAT_ELEM_PTR_FAST(mat, row, col, Marshal.SizeOf(type));
                return((T)Marshal.PtrToStructure(new IntPtr(result), type));
            }
        }
示例#44
0
        /// <summary>
        /// モデル状態を修正する. 修正された状態を kalman->state_post に保存し,これを出力として返す.
        /// </summary>
        /// <param name="kalman">更新されるカルマンフィルタ状態</param>
        /// <param name="measurement">観測ベクトルを含むCvMat</param>
        /// <returns>修正された状態を kalman->state_post に保存し,これを出力として返す.</returns>
#else
        /// <summary>
        /// Adjusts model state
        /// </summary>
        /// <param name="kalman">Kalman filter</param>
        /// <param name="measurement">CvMat containing the measurement vector. </param>
        /// <returns>The function stores adjusted state at kalman->state_post and returns it on output.</returns>
#endif
        public static CvMat KalmanCorrect(CvKalman kalman, CvMat measurement)
        {
            if (kalman == null)
                throw new ArgumentNullException("kalman");
            if (measurement == null)
                throw new ArgumentNullException("measurement");
            IntPtr result = CvInvoke.cvKalmanCorrect(kalman.CvPtr, measurement.CvPtr);
            return new CvMat(result, false);
        }
示例#45
0
        public PixelSampling()
        {
            // 並進移動のためのピクセルサンプリング cvGetRectSubPix

            // (1) 画像の読み込み,出力用画像領域の確保を行なう 
            using (IplImage srcImg = new IplImage(Const.ImageLenna, LoadMode.AnyDepth | LoadMode.AnyColor))
            using (IplImage dstImg = srcImg.Clone())
            {
                // (2)dst_imgの画像中心になるsrc_img中の位置centerを指定する
                CvPoint2D32f center = new CvPoint2D32f
                {
                    X = srcImg.Width - 1,
                    Y = srcImg.Height - 1
                };
                // (3)centerが画像中心になるように,GetRectSubPixを用いて画像全体をシフトさせる
                Cv.GetRectSubPix(srcImg, dstImg, center);
                // (4)結果を表示する
                using (CvWindow wSrc = new CvWindow("src"))
                using (CvWindow wDst = new CvWindow("dst"))
                {
                    wSrc.Image = srcImg;
                    wDst.Image = dstImg;
                    Cv.WaitKey(0);
                }
            }


            // 回転移動のためのピクセルサンプリング cvGetQuadrangleSubPix

            const int angle = 45;
            // (1)画像の読み込み,出力用画像領域の確保を行なう
            using (IplImage srcImg = new IplImage(Const.ImageLenna, LoadMode.AnyDepth | LoadMode.AnyColor))
            using (IplImage dstImg = srcImg.Clone())
            {
                // (2)回転のための行列(アフィン行列)要素を設定し,CvMat行列Mを初期化する
                float[] m = new float[6];
                m[0] = (float)(Math.Cos(angle * Cv.PI / 180.0));
                m[1] = (float)(-Math.Sin(angle * Cv.PI / 180.0));
                m[2] = srcImg.Width * 0.5f;
                m[3] = -m[1];
                m[4] = m[0];
                m[5] = srcImg.Height * 0.5f;
                using (CvMat mat = new CvMat(2, 3, MatrixType.F32C1, m))
                {
                    // (3)指定された回転行列により,GetQuadrangleSubPixを用いて画像全体を回転させる
                    Cv.GetQuadrangleSubPix(srcImg, dstImg, mat);
                    // (4)結果を表示する
                    using (CvWindow wSrc = new CvWindow("src"))
                    using (CvWindow wDst = new CvWindow("dst"))
                    {
                        wSrc.Image = srcImg;
                        wDst.Image = dstImg;
                        Cv.WaitKey(0);
                    }
                }
            }
        }
示例#46
0
    private void ApplyTranslationAndRotationToCamera(CvMat translation, Rotation r)
    {
        double tx = translation[0, 0];
        double ty = translation[0, 1];
        double tz = translation[0, 2];

        _mainCamera.transform.position    = new Vector3((float)tx, (float)ty, (float)tz);
        _mainCamera.transform.eulerAngles = new Vector3((float)r.X, (float)r.Y, (float)r.Z);
    }
示例#47
0
        /// <summary>
        /// cvEncodeImageにより、この画像をメモリ上に展開する
        /// </summary>
        /// <param name="ext">拡張子。.jpgや.pngなど。これによりエンコード形式が決定される。</param>
        /// <param name="encodingParams">JPEGの圧縮率などのエンコード設定</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Extract this image to the memory using cvEncodeImage
        /// </summary>
        /// <param name="ext">Image extension to decide encoding format.</param>
        /// <param name="encodingParams">Encoding options.</param>
        /// <returns></returns>
#endif
        public byte[] ToBytes(string ext, params ImageEncodingParam[] encodingParams)
        {
            using (CvMat mat = Cv.EncodeImage(ext, this, encodingParams))
            {
                byte[] bytes = new byte[mat.Rows * mat.Cols];
                Marshal.Copy(mat.Data, bytes, 0, bytes.Length);
                return(bytes);
            }
        }
示例#48
0
        /// <summary>
        /// 縦ベクトル用の4x4平行移動行列を作成します
        /// </summary>
        /// <param name="translation"></param>
        /// <returns></returns>
        public static CvMat GetTranslation(CvPoint3D64f translation)
        {
            CvMat ret = CvMat.Identity(4, 4, MatrixType.F64C1);

            ret[0, 3] = translation.X;
            ret[1, 3] = translation.Y;
            ret[2, 3] = translation.Z;
            return(ret);
        }
示例#49
0
        // => input = 24bit rgb
        public void calcNextFrame(CvMat input)
        {
            ++currentFrame;

            // IDEA 1 :
            // Get Hue out of input (get rid of lighting)
            // TODO : make following functions into multi-core
            // NOTE : Both below give alternatively good results for different pictures
            CvMat hue       = null;
            CvMat normalize = null;

            // no estimation of box color yet; if user hasn't set a hint himself, we'll just get input's center
            if (!hintPosSet && boxEstimationType == BoxEstimationType.NONE)
            {
                setBoxHint(input.Cols / 2, input.Rows / 2);
            }

            // new hint set
            if (hintPosSet)
            {
                hintPosSet = false;                 // don't ever re-enter, unless required
                resetBoxEstimation(input, ref hue, ref normalize);
            }


            CvLineSegmentPoint[] lines;
            {
                // IDEA 2 :
                // Before trying to extract any features, lets try to set a good ROI. The ROI returned by floodfill is certainly not good, as it may as well
                // hold only part of the box. We want to keep the whole region that contains pixels close to the box's estimated.
                CvMat roi = detectROI(input, ref hue, ref normalize);

                // IDEA 3 (ABANDONED) :
                // detect features from hue/roi (GoodFeaturesToTrack), then get lines from features
                // NOTE : maybe this idea will make a comeback in combination with lines detected, although it sounds a bit OTT
#if false
                lines = detectLinesFromFeatures(hue, roi);
#endif

                // IDEA 4 :
                // INSTEAD of "GoodFeaturesToTrack", go straight for Canny edges and HoughLinesP
                lines = detectLinesFromCanny(roi);
            }

            // IDEA 5 :
            // Distill discovered lines:
            // - merge ones that are very close
            // - find the top-most (higher-Y!) that form a convex quadrilateral
            // - find interesction points; if we have 4 and a convex shape we are finished
            // - if we only have 2, check how they can be connected to form a convex quadrilateral
            lines = distillLines(lines);

            // IDEA 6 :
            // If we are somewhat confident about our trapezium, we can start tracking the 4 points
            // instead of re-discovering them every frame.
        }
        /// <summary>
        ///
        /// </summary>
        /// <param name="m1"></param>
        /// <param name="m2"></param>
        /// <param name="model"></param>
        /// <param name="maxIters"></param>
        /// <returns></returns>
        public override unsafe bool Refine(CvMat m1, CvMat m2, CvMat model, int maxIters)
        {
            CvLevMarq     solver    = new CvLevMarq(8, 0, new CvTermCriteria(maxIters, double.Epsilon));
            int           count     = m1.Rows * m1.Cols;
            CvPoint2D64f *M         = (CvPoint2D64f *)m1.DataByte;
            CvPoint2D64f *m         = (CvPoint2D64f *)m2.DataByte;
            CvMat         modelPart = new CvMat(solver.Param.Rows, solver.Param.Cols, model.ElemType, model.Data);

            Cv.Copy(modelPart, solver.Param);

            for (; ;)
            {
                CvMat  _param = null;
                CvMat  _JtJ = null, _JtErr = null;
                double _errNorm = 0;

                if (!solver.UpdateAlt(out _param, out _JtJ, out _JtErr, out _errNorm))
                {
                    break;
                }

                for (int i = 0; i < count; i++)
                {
                    double * h = _param.DataDouble;
                    double   Mx = M[i].X, My = M[i].Y;
                    double   ww  = 1.0 / (h[6] * Mx + h[7] * My + 1.0);
                    double   _xi = (h[0] * Mx + h[1] * My + h[2]) * ww;
                    double   _yi = (h[3] * Mx + h[4] * My + h[5]) * ww;
                    double[] err = { _xi - m[i].X, _yi - m[i].Y };
                    if (_JtJ != null || _JtErr != null)
                    {
                        double[,] J = new double[2, 8]
                        {
                            { Mx *ww, My *ww, ww, 0, 0, 0, -Mx *ww *_xi, -My *ww *_xi },
                            { 0, 0, 0, Mx *ww, My *ww, ww, -Mx * ww * _yi, -My * ww * _yi }
                        };

                        for (int j = 0; j < 8; j++)
                        {
                            for (int k = j; k < 8; k++)
                            {
                                _JtJ.DataDouble[j * 8 + k] += J[0, j] * J[0, k] + J[1, j] * J[1, k];
                            }
                            _JtErr.DataDouble[j] += J[0, j] * err[0] + J[1, j] * err[1];
                        }
                    }
                    if (_errNorm != 0)
                    {
                        solver.ErrNorm += err[0] * err[0] + err[1] * err[1];
                    }
                }
            }

            Cv.Copy(solver.Param, modelPart);
            return(true);
        }
示例#51
0
        /// <summary>
        /// Конструктор вызываемый при десериализации
        /// </summary>
        /// <param name="info"></param>
        /// <param name="context"></param>
        protected CoordinatesTransformer(SerializationInfo info, StreamingContext context)
        {
            Type t = typeof(CvMatSerializator);

            Intrinsic            = ((CvMatSerializator)info.GetValue("Intrinsic", t)).BuildMatrix();
            Distortion           = ((CvMatSerializator)info.GetValue("Distortion", t)).BuildMatrix();
            Rotation             = ((CvMatSerializator)info.GetValue("Rotation", t)).BuildMatrix();
            Translation          = ((CvMatSerializator)info.GetValue("Translation", t)).BuildMatrix();
            TransformationMatrix = ((CvMatSerializator)info.GetValue("TransformationMatrix", t)).BuildMatrix();
        }
示例#52
0
 void putImage(CvMat mat, PixelFormat format)
 {
     if (!this.Dispatcher.CheckAccess())
     {
         this.Dispatcher.Invoke(new Action <CvMat, PixelFormat>(putImage), mat, format);
         return;
     }
     CvEx.GetBmpFromMat(ref _bmp, mat, format);
     imageTrack.Source = _bmp;
 }
示例#53
0
        void drawUndistortedCornerFrame(CvMat displayMat, CvPoint2D32f[] corners, CvSize boardSize)
        {
            CvMat cornerMat = new CvMat(1, corners.Length, MatrixType.F32C2);

            CvEx.FillCvMat(cornerMat, corners.Select(x => new CvScalar(x.X, x.Y)).ToList());
            CvMat undistMat = CvEx.InitCvMat(cornerMat);

            Cv.UndistortPoints(cornerMat, undistMat, this.UndistortionData.CameraStruct.CreateCvMat(), this.UndistortionData.DistortStruct.CreateCvMat(true), null, this.UndistortionData.CameraStruct.CreateCvMat());
            CvEx.DrawChessboardCornerFrame(displayMat, boardSize, undistMat.Select(x => new CvPoint2D32f(x.Val0, x.Val1)).ToArray(), new CvScalar(216, 216, 216));
        }
示例#54
0
        private void GetEnclosingCircle(
            IEnumerable <CvPoint> points, out CvPoint2D32f center, out float radius)
        {
            var pointsArray = points.ToArray();

            using (var pointsMat = new CvMat(pointsArray.Length, 1, MatrixType.S32C2, pointsArray))
            {
                Cv.MinEnclosingCircle(pointsMat, out center, out radius);
            }
        }
示例#55
0
 public static Bitmap ChangeBrighness(Bitmap src, float[] kernel)
 {
     using (IplImage dst = Cv.CloneImage(BitmapConverter.ToIplImage(src)))
     {
         CvMat kernel_matrix = Cv.Mat(3, 3, MatrixType.F32C1, kernel);
         Cv.Filter2D(BitmapConverter.ToIplImage(src), dst, kernel_matrix, Cv.Point(-1, -1));
         using (Mat res = new Mat(dst, true))
             return(res.ToBitmap());
     }
 }
示例#56
0
        /// <summary>
        /// 指定したバッファメモリから画像をCvMatとして読み込む
        /// </summary>
        /// <param name="buf">入力のbyte配列</param>
        /// <param name="iscolor">出力の色を指定するフラグ</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Decode image stored in the buffer
        /// </summary>
        /// <param name="buf">The input array of vector of bytes</param>
        /// <param name="iscolor">Specifies color type of the loaded image</param>
        /// <returns></returns>
#endif
        public static CvMat DecodeImageM(CvMat buf, LoadMode iscolor)
        {
            if (buf == null)
                throw new ArgumentNullException("buf");
            IntPtr ptr = NativeMethods.cvDecodeImageM(buf.CvPtr, iscolor);
            if (ptr == IntPtr.Zero)
                return null;
            else
                return new CvMat(ptr, true);
        }
    // Return a region of interest (_rect_roi) from within the image _image
    //  This doesn't need to be its own function, but I had so much trouble
    //  finding a method that didn't crash the program that I separated it.
    CvMat GetROI(CvMat _image, CvRect rect_roi)
    {
        // Get the region of interest
        CvMat img_roi;  // Get the region of interest

        // Grab the region of interest using the mouse-drawn box
        _image.GetSubRect(out img_roi, rect_roi);

        return(img_roi);
    }
示例#58
0
        static public CvMat BGRtoHueCV(CvMat input)
        {
            CvMat hsl = MatOps.ConvertChannels(input, MatrixType.U8C3, ColorConversion.BgrToHsv_Full);
            CvMat hue = MatOps.CopySize(input, MatrixType.U8C1);

            //CvMat lum = hue.EmptyClone();
            //hsl.Split( hue, null, lum, null );
            hsl.Split(hue, null, null, null);
            return(hue);
        }
示例#59
0
        public KNearest(MainForm form)
        {
            this.form = form;

            trainData = cvlib.cvCreateMat(train_samples * classes, size * size, cvlib.CV_32FC1);
            trainClasses = cvlib.cvCreateMat(train_samples * classes, 1, cvlib.CV_32FC1);

            K = int.Parse(form.txtK.Text);

            p = new preprocessing(form);
        }
示例#60
0
        /// <summary>
        /// 学習データを与えて初期化
        /// </summary>
        /// <param name="layerSizes">入出力層を含む各層のニューロン数を指定する整数のベクトル</param>
        /// <param name="activFunc">各ニューロンの活性化関数</param>
        /// <param name="fParam1">活性化関数のフリーパラメータα</param>
        /// <param name="fParam2">活性化関数のフリーパラメータβ</param>
#else
		/// <summary>
        /// Training constructor
        /// </summary>
        /// <param name="layerSizes">The integer vector specifies the number of neurons in each layer including the input and output layers. </param>
        /// <param name="activFunc">Specifies the activation function for each neuron</param>
        /// <param name="fParam1">Free parameter α of the activation function</param>
		/// <param name="fParam2">Free parameter β of the activation function</param>
#endif
		public CvANN_MLP(
            CvMat layerSizes, 
            MLPActivationFunc activFunc = MLPActivationFunc.SigmoidSym, 
            double fParam1 = 0, double fParam2 = 0)
		{
            if (layerSizes == null)
                throw new ArgumentNullException("layerSizes");

            ptr = NativeMethods.ml_CvANN_MLP_new2_CvMat(
                layerSizes.CvPtr, (int)activFunc, fParam1, fParam2);
		}