public static void Read(out CvMat matrix, XmlElement element)
        {
            int cols = int.Parse(element.GetAttribute("cols"));
            int rows = int.Parse(element.GetAttribute("rows"));
            MatrixType matType = (MatrixType)Enum.Parse(typeof(MatrixType), element.GetAttribute("type"));
            string terms = element.GetAttribute("values");
            List<double> values = new List<double>();
            string[] words = terms.Split(',');
            foreach (string w in words)
                values.Add(double.Parse(w.Trim()));

            switch (matType)
            {
                case MatrixType.F32C1:
                    break;
                case MatrixType.F64C1:
                    break;
                default:
                    throw new Exception("Read unsupported MatrixType " + matType.ToString());
            }
            matrix = new CvMat(rows, cols, matType);

            // Fill the matrix popping values off
            for (int x = 0; x < cols; ++x)
            {
                for (int y = 0; y < rows; ++y)
                {
                    matrix.Set2D(x, y, new CvScalar(values[0]));
                    values.RemoveAt(0);
                }
            }
        }
Beispiel #2
0
        /// <summary>
        /// CvMatを指定されたデータで埋めます
        /// </summary>
        /// <param name="mat"></param>
        /// <param name="arr"></param>
        public static void FillCvMat(CvMat mat, IList <CvScalar> arr)
        {
            int count = 0;

            for (int i = 0; i < mat.Rows; i++)
            {
                for (int j = 0; j < mat.Cols; j++)
                {
                    mat.Set2D(i, j, arr[count++]);
                }
            }
        }
 public CvMat CreateCvMat()
 {
     CvMat ret = new CvMat(this.Rows, this.Cols, this.ElemType);
     int index = 0;
     for (int y = 0; y < this.Rows; y++)
     {
         for (int x = 0; x < this.Cols; x++)
         {
             ret.Set2D(y, x, this.Array[index++]);
         }
     }
     return ret;
 }
Beispiel #4
0
        /// <summary>
        /// kalman 初期化ルーチン
        /// </summary>
        /// <param name="elem">読み出した要素</param>
        public void kalman_update()
        {
            if (kalman_id == 0)
            {
                kalman_init();
                // 初期値設定
                double errcov = 1.0; //仮
                kv_kalman.StatePost.Set1D(0, (float)az2_c);
                kv_kalman.StatePost.Set1D(1, (float)alt2_c);
                Cv.SetIdentity(kv_kalman.ErrorCovPost, Cv.RealScalar(errcov));
            }
            kalman_id++;

            // 観測値(kalman)
            //float[] m = { (float)(az2_c), (float)(alt2_c) };
            //measurement = Cv.Mat(2, 1, MatrixType.F32C1, m);
            measurement.Set2D(0, 0, (float)az2_c);
            measurement.Set2D(1, 0, (float)alt2_c);

            // 観測誤差評価 FWHM=2.35σ
            double fwhm_az   = 0.005 * vaz2_kv / 2.0;
            double fwhm_alt  = 0.005 * valt2_kv / 2.0;
            double sigma_az  = (fwhm_az / 2.35);
            double sigma_alt = (fwhm_alt / 2.35);

            kv_kalman.MeasurementNoiseCov.Set2D(0, 0, sigma_az * sigma_az);
            kv_kalman.MeasurementNoiseCov.Set2D(1, 1, sigma_alt * sigma_alt);
            //Cv.SetIdentity(kv_kalman.MeasurementNoiseCov, Cv.RealScalar(0.001));

            // 修正フェーズ(kalman)
            correction = Cv.KalmanCorrect(kv_kalman, measurement);
            // 予測フェーズ(kalman)
            prediction = Cv.KalmanPredict(kv_kalman);
            kvaz       = prediction.DataArraySingle[0]; //ans
            kvalt      = prediction.DataArraySingle[1]; //ans
            //kvx = prediction.DataArraySingle[2];
            //kvy = prediction.DataArraySingle[3];
        }
        public CvMat CreateCvMat()
        {
            CvMat ret   = new CvMat(this.Rows, this.Cols, this.ElemType);
            int   index = 0;

            for (int y = 0; y < this.Rows; y++)
            {
                for (int x = 0; x < this.Cols; x++)
                {
                    ret.Set2D(y, x, this.Array[index++]);
                }
            }
            return(ret);
        }
        public static void Read(out CvMat matrix, XmlElement element)
        {
            int           cols    = int.Parse(element.GetAttribute("cols"));
            int           rows    = int.Parse(element.GetAttribute("rows"));
            MatrixType    matType = (MatrixType)Enum.Parse(typeof(MatrixType), element.GetAttribute("type"));
            string        terms   = element.GetAttribute("values");
            List <double> values  = new List <double>();

            string[] words = terms.Split(',');
            foreach (string w in words)
            {
                values.Add(double.Parse(w.Trim()));
            }

            switch (matType)
            {
            case MatrixType.F32C1:
                break;

            case MatrixType.F64C1:
                break;

            default:
                throw new Exception("Read unsupported MatrixType " + matType.ToString());
            }
            matrix = new CvMat(rows, cols, matType);

            // Fill the matrix popping values off
            for (int x = 0; x < cols; ++x)
            {
                for (int y = 0; y < rows; ++y)
                {
                    matrix.Set2D(x, y, new CvScalar(values[0]));
                    values.RemoveAt(0);
                }
            }
        }
Beispiel #7
0
        // NOTE : Also seems not well written and craves optimization at places. P.A.N.A.R.G.O.
        // => frame = 8 bit greyscale CvMat
        static public void ContrastEnhancement(CvMat frame)
        {
            //CvMat originalFrame = frame; // return this if cannot enhance
            //if (frame.ElemType != MatrixType.U8C1)
            //	frame = MatOps.Convert(frame, MatrixType.U8C1, 1 / 255.0 );

            /////original histogram
            const int HistBinSize = 256;

            int[] histSizes = new int[1];
            histSizes[0] = HistBinSize;
            CvHistogram hist = new CvHistogram(histSizes, HistogramFormat.Array);

            Cv.CalcArrHist(frame, hist, false);               // size = 256 implied

            CvHistogram newHist    = MatOps.CopyHistogram(hist);
            CvArr       newHistBin = newHist.Bins;

            //double[] origVals = new double[hist.Bins.GetDims( 0 )];
            List <double> origVals = new List <double>(HistBinSize);

            for (int i = 0; i < HistBinSize; i++)
            {
                double elem = newHistBin.GetReal1D(i);
                if (elem != 0)
                {
                    origVals.Add(elem);
                }
            }

            // FIX : See no need for histL, since we have origVals
            //////histogram with only nonzero bins
            //CvMat histL = new CvMat( imageRows, imageCols, MatrixType.F32C1, new CvScalar( 0 ) );
            //for (i = 0; i < origVals.size(); i++)
            //	histL.at<float>( i, 0 ) = origVals.at( i );

            List <double> peakValues = new List <double>(HistBinSize);             //std::vector<int> peakValues;

            //////////3 bin search window
            for (int i = 1; i < origVals.Count - 2; ++i)
            {
                double elem = origVals[i];
                if (elem > origVals[i - 1] && elem > origVals[i + 1])
                {
                    peakValues.Add(elem);
                }
            }

            if (peakValues.Count == 0)
            {
                //Console.Out.WriteLine( "Cannot enhance" );
                return;                 // cannot enhance?
            }

            //////Upper threshold
            double threshUP = 0;

            for (int i = 0; i < peakValues.Count; ++i)
            {
                threshUP += peakValues[i];
            }
            threshUP /= peakValues.Count;

            //////Lower threshold
            double threshDOWN = Math.Min((frame.Cols * frame.Rows), threshUP * origVals.Count) / 256.0;
            //Console.Out.WriteLine( "Enhance thresholds " + threshUP + "/" + threshDOWN );

            //////histogram reconstruction
            CvArr histBins = hist.Bins;

            for (int i = 0; i < HistBinSize; ++i)
            {
                double histElem = histBins.GetReal1D(i);
                if (histElem > threshUP)
                {
                    histBins.SetReal1D(i, threshUP);
                }
                else if (histElem <= threshUP && histElem >= threshDOWN)
                {
                    continue;
                }
                else if (histElem < threshDOWN && histElem > 0)
                {
                    histBins.SetReal1D(i, threshDOWN);
                }
                else if (histElem == 0)
                {
                    continue;
                }
            }
            // accumulated values(?)
            double[] accVals = new double[HistBinSize];             //std::vector<int> accVals;
            accVals[0] = (histBins.GetReal1D(0));
            for (int i = 1; i < HistBinSize; ++i)
            {
                accVals[i] = (accVals[i - 1] + histBins[i]);
            }

            byte[] lookUpTable = new byte[HistBinSize];             //cv::Mat lookUpTable = cv::Mat::zeros( hist.size(), CV_8UC1 );
            for (int i = 0; i < HistBinSize; ++i)
            {
                lookUpTable[i] = (byte)(255.0 * accVals[i] / accVals[255]);
            }

            // assign computed values to input frame
            //Console.Out.Write( "Enhance-->" );
            for (int i = 0; i < frame.Cols; ++i)
            {
                for (int j = 0; j < frame.Rows; ++j)
                {
                    // there is NO mask, thus no need to check for; was: "if (mask.data)..."
                    byte oldValue = (byte)frame.Get2D(j, i);
                    byte newValue = lookUpTable[oldValue];
                    //if ((newValue <1 || newValue > 254) && (newValue != oldValue)) Console.Out.Write( oldValue + " " + newValue + "|");
                    frame.Set2D(j, i, newValue);
                    //frame.SetReal2D( j, i, lookUpTable[ (int)(255.0 * frame.GetReal2D( j, i )) ] / 255.0);
                }
            }
            //Console.Out.WriteLine();

            //frame = MatOps.Convert( frame, MatrixType.U8C1, 255.0 );
        }
        // 孤立輝点除去
        // 周囲に輝点が無い場合,その輝点を消す
        static CvMat removeNoize(CvMat image)
        {
            // 1px大きい作業用画像
            CvMat workImage = new CvMat( image.Rows+1, image.Cols+1, MatrixType.U8C1 );
            image.CopyMakeBorder( workImage, new CvPoint(1, 1), BorderType.Constant );

            // 走査
            for ( int row = 0; row < image.Rows; row++ )
            {
                for ( int col = 0; col < image.Cols; col++ )
                {
                    // 注目画素が暗点ならば何もしない
                    if ( 0 == image.Get2D( row, col ))
                        continue;

                    // 範囲3x3の輝点が1ならば,中心画素を暗点にする
                    CvRect rect = new CvRect( col, row, 3, 3 );
                    CvMat area;
                    workImage.GetSubArr ( out area, rect );
                    int nonzero = area.CountNonZero();
                    if ( 1 == nonzero )
                        image.Set2D( row, col, 0 );
                }
            }
            return image;
        }
Beispiel #9
0
 /// <summary>
 /// CvMatを指定されたデータで埋めます
 /// </summary>
 /// <param name="mat"></param>
 /// <param name="arr"></param>
 public static void FillCvMat(CvMat mat, IList<CvScalar> arr)
 {
     int count = 0;
     for (int i = 0; i < mat.Rows; i++)
     {
         for (int j = 0; j < mat.Cols; j++)
         {
             mat.Set2D(i, j, arr[count++]);
         }
     }
 }
Beispiel #10
0
        /// <summary>
        /// CCD座標(cx,cy)->(az,alt)に変換
        /// </summary>
        //
        //   CCD座標(cx,cy):CCD中心からの誤差座標[pix]    Std. Cam が基準(cx = x-xc, cy = y-yc)
        //   中心位置(az_c,alt_c)と視野回転(theta_c)
        //   fl:焦点距離[mm], ccdpx,ccdpy:ピクセル間隔[mm]

        public void cxcy2azalt(double cx, double cy,
                               double az_c, double alt_c, int mode, double theta_c,
                               double fl, double ccdpx, double ccdpy,
                               ref double az, ref double alt)
        {
            double rad = Math.PI / 180.0;
            double cxmm, cymm;

            //ターゲットの方向余弦
            if (mode == mmEast)
            {
                cxmm = -cx * ccdpx; // +ccd -az
                cymm = +cy * ccdpy; // +ccd +alt
            }
            else
            {                       //mmWest
                cxmm = +cx * ccdpx; // +ccd +az
                cymm = -cy * ccdpy; // +ccd -alt
            }
            CvMat v1 = new CvMat(3, 1, MatrixType.F64C1);

            v1.Set2D(0, 0, fl);
            v1.Set2D(1, 0, -cxmm);
            v1.Set2D(2, 0, cymm);
            v1.Normalize(v1);// 方向余弦化

            CvMat v2 = new CvMat(3, 1, MatrixType.F64C1);
            CvMat Rx = new CvMat(3, 3, MatrixType.F64C1);
            CvMat Rz = new CvMat(3, 3, MatrixType.F64C1);
            CvMat Ry = new CvMat(3, 3, MatrixType.F64C1);

            //Rx.rotX(-theta_c * rad); // 回転マトリクスをセット
            double sin = Math.Sin(-theta_c * rad);
            double cos = Math.Cos(-theta_c * rad);

            Rx.Set2D(0, 0, 1); Rx.Set2D(0, 1, 0); Rx.Set2D(0, 2, 0);
            Rx.Set2D(1, 0, 0); Rx.Set2D(1, 1, cos); Rx.Set2D(1, 2, -sin);
            Rx.Set2D(2, 0, 0); Rx.Set2D(2, 1, sin); Rx.Set2D(2, 2, cos);


            //Rz.rotZ(-az_c   *rad ); // 天球座標系と回転方向が逆なのでマイナス
            sin = Math.Sin(-az_c * rad);
            cos = Math.Cos(-az_c * rad);
            Rz.Set2D(0, 0, cos); Rz.Set2D(0, 1, -sin); Rz.Set2D(0, 2, 0);
            Rz.Set2D(1, 0, sin); Rz.Set2D(1, 1, cos); Rz.Set2D(1, 2, 0);
            Rz.Set2D(2, 0, 0); Rz.Set2D(2, 1, 0); Rz.Set2D(2, 2, 1);

            //Ry.rotY(-alt_c  *rad ); // 回転マトリクスをセット
            sin = Math.Sin(-alt_c * rad);
            cos = Math.Cos(-alt_c * rad);
            Ry.Set2D(0, 0, cos); Ry.Set2D(0, 1, 0); Ry.Set2D(0, 2, sin);
            Ry.Set2D(1, 0, 0); Ry.Set2D(1, 1, 1); Ry.Set2D(1, 2, 0);
            Ry.Set2D(2, 0, -sin); Ry.Set2D(2, 1, 0); Ry.Set2D(2, 2, cos);
            v2 = Rz * (Ry * (Rx * v1)); // 順番注意(画像中心をx軸に一致させている状態がスタート)

            // Retrun Val
            az = Math.Atan2(-v2.Get2D(1, 0), v2.Get2D(0, 0)) / rad;
            if (az < 0)
            {
                az += 360;
            }
            alt = Math.Asin(v2.Get2D(2, 0)) / rad;

            //    //Az_Cとの距離が近い値を採用
            //    double az2 = az - 360;
            //    double az3 = az + 360;
            //    double dis1 = Math.Abs(az - az_c);
            //    double dis2 = Math.Abs(az2 - az_c);
            //    double dis3 = Math.Abs(az3 - az_c);
            //    if (dis1 > dis2) az = az2;
            //    if (dis1 > dis3) az = az3;
        }
Beispiel #11
0
        void Setting()
        {
            var cameraSetting = new[]
            {
                1.1695652991231043e+003,
                0,
                6.4021949743978928e+002,
                0,
                1.1681614889356322e+003,
                3.9445679497160154e+002,
                0,
                0,
                1
            };

            var distortionSetting = new[]
            {
                2.6841557745990408e-001,
                -1.2536292326179452e+000,
                -5.1949672285252447e-003,
                3.4873826852522883e-003,
                1.6164774281680905e+000
            };


            /*
             * var cameraSetting = new[]
             * {
             *  1.1442471246455027e+003,
             *  0,
             *  6.4812020158754228e+002,
             *  0,
             *  1.1443391554606812e+003,
             *  3.8794788696276186e+002,
             *  0,
             *  0,
             *  1
             * };
             *
             * var distortionSetting = new[]
             * {
             *    2.0156935445340091e-001,
             *    -1.0814314304293935e+000,
             *    -6.7653166226057113e-003,
             *    3.7355841949211124e-003,
             *    1.7023014402148473e+000
             * };
             */

            for (int i = 0; i < 3; i++)
            {
                for (int j = 0; j < 3; j++)
                {
                    _camera.Set2D(i, j, cameraSetting[i * 3 + j]);
                }
            }

            for (int i = 0; i < 5; i++)
            {
                _distortion.Set2D(i, 0, distortionSetting[i]);
            }
        }
    // Convert the Texture2D type of Unity to OpenCV's CvMat
    // This uses Adcock's parallel C# code to parallelize the conversion and make it faster
    // I found the code execution dropped from 180 msec per frame to 70 msec per frame with parallelization
    void Texture2DToCvMat()
    {
        //float startTime = Time.realtimeSinceStartup;

        Color[] pixels = _webcamTexture.GetPixels();

        // Parallel for loop
        Parallel.For(0, imHeight, i =>
        {
            for (var j = 0; j < imWidth; j++)
            {
                var pixel = pixels[j + i * imWidth];
                var col   = new CvScalar
                {
                    Val0 = (double)pixel.b * 255,
                    Val1 = (double)pixel.g * 255,
                    Val2 = (double)pixel.r * 255
                };

                videoSourceImage.Set2D(i, j, col);
            }
        });



        //				CvScalar col;
        //				Color pixel;
        //				int i, j;
        //
        //				// Non-parallelized code
        //				for (i = 0; i < imHeight; i++) {
        //						for (j = 0; j < imWidth; j++) {
        //								pixel = pixels [j + i * imWidth];
        //
        //								col = new CvScalar
        //								{
        //									Val0 = (double)pixel.b * 255,
        //									Val1 = (double)pixel.g * 255,
        //									Val2 = (double)pixel.r * 255
        //								};
        //
        //								videoSourceImage.Set2D (i, j, col);
        //						}
        //
        //				}

        // Flip up/down dimension and right/left dimension
        if (!FlipUpDownAxis && FlipLeftRightAxis)
        {
            Cv.Flip(videoSourceImage, videoSourceImage, FlipMode.XY);
        }
        else if (!FlipUpDownAxis)
        {
            Cv.Flip(videoSourceImage, videoSourceImage, FlipMode.X);
        }
        else if (FlipLeftRightAxis)
        {
            Cv.Flip(videoSourceImage, videoSourceImage, FlipMode.Y);
        }

        // Test difference in time between parallel and non-parallel code
        //Debug.Log (Time.realtimeSinceStartup - startTime);
    }
    //  Use the CamShift algorithm to track to base histogram throughout the
    // succeeding frames
    void CalculateCamShift(CvMat _image)
    {
        CvMat _backProject = CalculateBackProjection(_image, _histogramToTrack);

        // Create convolution kernel for erosion and dilation
        IplConvKernel elementErode  = Cv.CreateStructuringElementEx(10, 10, 5, 5, ElementShape.Rect, null);
        IplConvKernel elementDilate = Cv.CreateStructuringElementEx(4, 4, 2, 2, ElementShape.Rect, null);

        // Try eroding and then dilating the back projection
        // Hopefully this will get rid of the noise in favor of the blob objects.
        Cv.Erode(_backProject, _backProject, elementErode, 1);
        Cv.Dilate(_backProject, _backProject, elementDilate, 1);


        if (backprojWindowFlag)
        {
            Cv.ShowImage("Back Projection", _backProject);
        }

        // Parameters returned by Camshift algorithm
        CvBox2D         _outBox;
        CvConnectedComp _connectComp;

        // Set the criteria for the CamShift algorithm
        // Maximum 10 iterations and at least 1 pixel change in centroid
        CvTermCriteria term_criteria = Cv.TermCriteria(CriteriaType.Iteration | CriteriaType.Epsilon, 10, 1);

        // Draw object center based on Kalman filter prediction
        CvMat _kalmanPrediction = _kalman.Predict();

        int predictX = Mathf.FloorToInt((float)_kalmanPrediction.GetReal2D(0, 0));
        int predictY = Mathf.FloorToInt((float)_kalmanPrediction.GetReal2D(1, 0));

        // Run the CamShift algorithm
        if (Cv.CamShift(_backProject, _rectToTrack, term_criteria, out _connectComp, out _outBox) > 0)
        {
            // Use the CamShift estimate of the object center to update the Kalman model
            CvMat _kalmanMeasurement = Cv.CreateMat(2, 1, MatrixType.F32C1);
            // Update Kalman model with raw data from Camshift estimate
            _kalmanMeasurement.Set2D(0, 0, _outBox.Center.X); // Raw X position
            _kalmanMeasurement.Set2D(1, 0, _outBox.Center.Y); // Raw Y position
                                                              //_kalmanMeasurement.Set2D (2, 0, _outBox.Center.X - lastPosition.X);
                                                              //_kalmanMeasurement.Set2D (3, 0, _outBox.Center.Y - lastPosition.Y);

            lastPosition.X = Mathf.FloorToInt(_outBox.Center.X);
            lastPosition.Y = Mathf.FloorToInt(_outBox.Center.Y);

            _kalman.Correct(_kalmanMeasurement); // Correct Kalman model with raw data

            // CamShift function returns two values: _connectComp and _outBox.

            //	_connectComp contains is the newly estimated position and size
            //  of the region of interest. This is passed into the subsequent
            // call to CamShift
            // Update the ROI rectangle with CamShift's new estimate of the ROI
            _rectToTrack = CheckROIBounds(_connectComp.Rect);

            // Draw a rectangle over the tracked ROI
            // This method will draw the rectangle but won't rotate it.
            _image.DrawRect(_rectToTrack, CvColor.Aqua);
            _image.DrawMarker(predictX, predictY, CvColor.Aqua);

            // _outBox contains a rotated rectangle esimating the position, size, and orientation
            // of the object we want to track (specified by the initial region of interest).
            // We then take this estimation and draw a rotated bounding box.
            // This method will draw the rotated rectangle
            rotatedBoxToTrack = _outBox;

            // Draw a rotated rectangle representing Camshift's estimate of the
            // object's position, size, and orientation.
            _image.DrawPolyLine(rectangleBoxPoint(_outBox.BoxPoints()), true, CvColor.Red);
        }
        else
        {
            //Debug.Log ("Object lost by Camshift tracker");

            _image.DrawMarker(predictX, predictY, CvColor.Purple, MarkerStyle.CircleLine);

            _rectToTrack = CheckROIBounds(new CvRect(predictX - Mathf.FloorToInt(_rectToTrack.Width / 2),
                                                     predictY - Mathf.FloorToInt(_rectToTrack.Height / 2),
                                                     _rectToTrack.Width, _rectToTrack.Height));
            _image.DrawRect(_rectToTrack, CvColor.Purple);
        }

        if (trackWindowFlag)
        {
            Cv.ShowImage("Image", _image);
        }
    }