Beispiel #1
0
        void Start()
        {
            List <AzureKinectSensor> kinectSensors = _AzureKinectManager.SensorList;

            if (_DeviceNumber < kinectSensors.Count)
            {
                _KinectSensor = _AzureKinectManager.SensorList[_DeviceNumber];
                if (_KinectSensor != null)
                {
                    Debug.Log("ColorResolution: " + _KinectSensor.ColorImageWidth + "x" + _KinectSensor.ColorImageHeight);
                    Debug.Log("DepthResolution: " + _KinectSensor.DepthImageWidth + "x" + _KinectSensor.DepthImageHeight);

                    _DepthRawData = new byte[_KinectSensor.DepthImageWidth * _KinectSensor.DepthImageHeight * sizeof(ushort)];

                    CameraCalibration deviceDepthCameraCalibration = _KinectSensor.DeviceCalibration.DepthCameraCalibration;
                    CameraCalibration deviceColorCameraCalibration = _KinectSensor.DeviceCalibration.ColorCameraCalibration;

                    K4A.Calibration calibration = new K4A.Calibration();
                    calibration.DepthCameraCalibration = CreateCalibrationCamera(deviceDepthCameraCalibration, _KinectSensor.DepthImageWidth, _KinectSensor.DepthImageHeight);
                    calibration.ColorCameraCalibration = CreateCalibrationCamera(deviceColorCameraCalibration, _KinectSensor.ColorImageWidth, _KinectSensor.ColorImageHeight);

                    _PointCloudRenderer = GetComponent <PointCloudRenderer>();
                    _PointCloudRenderer.GenerateMesh(calibration, K4A.CalibrationType.Depth);

                    _AzureKinectManager.OpenDevice(_DeviceNumber);
                }
            }
        }
Beispiel #2
0
        void Start()
        {
            _KinectSensor = _AzureKinectManager.Sensor;
            if (_KinectSensor != null)
            {
                Debug.Log("ColorResolution: " + _KinectSensor.ColorImageWidth + "x" + _KinectSensor.ColorImageHeight);
                Debug.Log("DepthResolution: " + _KinectSensor.DepthImageWidth + "x" + _KinectSensor.DepthImageHeight);

                _TransformedColorImageTexture = new Texture2D(_KinectSensor.DepthImageWidth, _KinectSensor.DepthImageHeight, TextureFormat.BGRA32, false);
                _DepthRawData      = new byte[_KinectSensor.DepthImageWidth * _KinectSensor.DepthImageHeight * sizeof(ushort)];
                _DepthImageTexture = new Texture2D(_KinectSensor.DepthImageWidth, _KinectSensor.DepthImageHeight, TextureFormat.R16, false);

                _ColorImageTexture            = new Texture2D(_KinectSensor.ColorImageWidth, _KinectSensor.ColorImageHeight, TextureFormat.BGRA32, false);
                _TransformedDepthRawData      = new byte[_KinectSensor.ColorImageWidth * _KinectSensor.ColorImageHeight * sizeof(ushort)];
                _TransformedDepthImageTexture = new Texture2D(_KinectSensor.ColorImageWidth, _KinectSensor.ColorImageHeight, TextureFormat.R16, false);

                _PointCloudRenderer = GetComponent <PointCloudRenderer>();

                CameraCalibration deviceDepthCameraCalibration = _KinectSensor.DeviceCalibration.DepthCameraCalibration;
                CameraCalibration deviceColorCameraCalibration = _KinectSensor.DeviceCalibration.ColorCameraCalibration;

                K4A.Calibration calibration = new K4A.Calibration();
                calibration.DepthCameraCalibration = CreateCalibrationCamera(deviceDepthCameraCalibration, _KinectSensor.DepthImageWidth, _KinectSensor.DepthImageHeight);
                calibration.ColorCameraCalibration = CreateCalibrationCamera(deviceColorCameraCalibration, _KinectSensor.ColorImageWidth, _KinectSensor.ColorImageHeight);

#if COLOR_TO_DEPTH
                _PointCloudRenderer.GenerateMesh(calibration, K4A.CalibrationType.Depth);
#else
                _PointCloudRenderer.GenerateMesh(calibration, K4A.CalibrationType.Color);
#endif
            }
        }
 // Use this for initialization
 void Start()
 {
     camCal = GetComponentInParent<CameraCalibration>();
     mtCal = GetComponent<MedientheaterCalibration>();
     char mc = mode == SCALE ? 'S' : 'V';
     mtCal.texts[selection].text = mc + mtCal.texts[selection].text;
 }
        override public void Process()
        {
            if (!Enabled)
            {
                return;
            }

            FInput.Image.GetImage(TColourFormat.L8, FGrayscale);

            Size SizeNow = BoardSize;

            PointF[] points = CameraCalibration.FindChessboardCorners(FGrayscale.GetImage() as Image <Gray, byte>, SizeNow, CALIB_CB_TYPE.ADAPTIVE_THRESH);

            lock (FFoundPointsLock)
            {
                if (points == null)
                {
                    FFoundPoints.SliceCount = 0;
                }
                else
                {
                    FFoundPoints.SliceCount = SizeNow.Width * SizeNow.Height;
                    for (int i = 0; i < FFoundPoints.SliceCount; i++)
                    {
                        FFoundPoints[i] = new Vector2D(points[i].X, points[i].Y);
                    }
                }
            }
        }
        public void Calibrate()
        {
            MCvPoint3D32f[][] cornerPointsOfImages = new MCvPoint3D32f[this.AcceptedImagesCount][];
            for (int i = 0; i < this.AcceptedImagesCount; i++)
            {
                // for each captured image the physical coordinates of the corner points of the chess board are the same
                // in the coordinate system of the chess board.
                cornerPointsOfImages[i] = this.ChessBoard.CornerPoints;
            }

            IntrinsicCameraParameters intrinsicCameraParameters = new IntrinsicCameraParameters();

            // We initialize the intrinsic matrix such that the two focal lengths have a ratio of 1.0
            intrinsicCameraParameters.IntrinsicMatrix[0, 0] = 1.0;
            intrinsicCameraParameters.IntrinsicMatrix[1, 1] = 1.0;

            ExtrinsicCameraParameters[] extrinsicCameraParametersForImages;             // will hold the rotation and translation for each captured chess board
            CameraCalibration.CalibrateCamera(
                cornerPointsOfImages,
                m_AcceptedImageCorners.ToArray(),
                this.OriginalImage.Size,
                intrinsicCameraParameters,
                Emgu.CV.CvEnum.CALIB_TYPE.CV_CALIB_FIX_ASPECT_RATIO | Emgu.CV.CvEnum.CALIB_TYPE.CV_CALIB_FIX_K3 | Emgu.CV.CvEnum.CALIB_TYPE.CV_CALIB_ZERO_TANGENT_DIST,
                out extrinsicCameraParametersForImages);

            m_IntrinsicCameraParameters = intrinsicCameraParameters;
            this.State = CalibratorState.Calibrated;
        }
Beispiel #6
0
        private void FindChessBoardCorners()
        {
            this.GrayImage = this.CalibrationImage.Convert <Gray, Byte>();

            bool foundAllCorners = CameraCalibration.FindChessboardCorners(
                this.GrayImage,
                m_PatternSize,
                Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH | Emgu.CV.CvEnum.CALIB_CB_TYPE.FILTER_QUADS,
                out m_FoundCorners);

            PointF[][] foundPointsForChannels = new PointF[][] { m_FoundCorners };
            if (foundAllCorners)
            {
                MCvTermCriteria terminationCriteria;
                terminationCriteria.max_iter = 30;
                terminationCriteria.epsilon  = 0.1;
                terminationCriteria.type     = Emgu.CV.CvEnum.TERMCRIT.CV_TERMCRIT_EPS | Emgu.CV.CvEnum.TERMCRIT.CV_TERMCRIT_ITER;

                this.GrayImage.FindCornerSubPix(foundPointsForChannels, new Size(11, 11), new Size(-1, -1), terminationCriteria);

                CameraCalibration.DrawChessboardCorners(this.GrayImage, m_PatternSize, m_FoundCorners, foundAllCorners);

                CircleF circle = new CircleF(m_FoundCorners[1], 3.0f);
                this.GrayImage.Draw(circle, new Gray(255), 2);
                PrintCorners();
            }
            m_ImageBoxWithHeading.ImageBox.Image = this.GrayImage;
        }
        // orients the image so that the plane of the sign is parallel to the camera
        Image <Bgr, Byte> orient(Image <Bgr, Byte> i)
        {
            if (annotation_file == "")
            {
                return(i);
            }

            image = i.Clone();
            List <Point> points = (new ReadAnnotationPoints.AnnotationPoints(annotation_file)).Points;

            PointF[] src = new PointF[4];
            PointF[] des = new PointF[4];

            src[0] = new PointF(30, 200f);
            src[1] = new PointF(200, 30f);
            src[2] = new PointF(370, 200f);
            src[3] = new PointF(200, 370f);

            for (int x = 0; x < 4; x++)
            {
                des[x] = points[x];
            }

            HomographyMatrix h**o = CameraCalibration.GetPerspectiveTransform(src, des);

            image = image.WarpPerspective(h**o, image.Width, image.Height, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR,
                                          Emgu.CV.CvEnum.WARP.CV_WARP_INVERSE_MAP, new Bgr(200, 0, 0));

            image.ROI = new Rectangle(0, 0, 400, 400);

            return(image);
        }
Beispiel #8
0
        public void Update(Image <Bgr, byte> nowImage)
        {
            DebugImage = nowImage;
            DerivePitchEdges(nowImage);

            TopLeft     = GetTopLeft();
            TopRight    = GetTopRight();
            BottomLeft  = GetBottomLeft();
            BottomRight = GetBottomRight();

            PointF[] sourcePoints = { TopLeft, TopRight, BottomLeft, BottomRight };
            PointF[] destPoints   =
            {
                new PointF(Instep,              Border),
                new PointF(PitchWidth - Instep, Border),
                new PointF(Instep,              PitchHeight + Border),
                new PointF(PitchWidth - Instep, PitchHeight + Border)
            };

            m_WarpMat    = CameraCalibration.GetPerspectiveTransform(sourcePoints, destPoints);
            m_WarpMatInv = CameraCalibration.GetPerspectiveTransform(destPoints, sourcePoints);

            PerspImage = nowImage.WarpPerspective(m_WarpMat, 1205, (int)(PitchHeight + Border * 2),
                                                  Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC,
                                                  Emgu.CV.CvEnum.WARP.CV_WARP_FILL_OUTLIERS,
                                                  new Bgr(200, 200, 200)).Convert <Bgr, byte>();
            ThresholdedPerspImage = ImageProcess.ThresholdHsv(PerspImage, 22, 89, 33, 240, 40, 250).
                                    ThresholdBinaryInv(new Gray(100), new Gray(255));

            //DerivePolePositions();
        }
Beispiel #9
0
        public void TestChessboardCalibration()
        {
            Size patternSize = new Size(9, 6);

            Image <Gray, Byte> chessboardImage = EmguAssert.LoadImage <Gray, byte>("left01.jpg");

            Util.VectorOfPointF corners = new Util.VectorOfPointF();
            bool patternWasFound        = CvInvoke.FindChessboardCorners(chessboardImage, patternSize, corners);

            chessboardImage.FindCornerSubPix(
                new PointF[][] { corners.ToArray() },
                new Size(10, 10),
                new Size(-1, -1),
                new MCvTermCriteria(0.05));

            MCvPoint3D32f[]           objectPts = CalcChessboardCorners(patternSize, 1.0f);
            IntrinsicCameraParameters intrisic  = new IntrinsicCameraParameters(8);

            ExtrinsicCameraParameters[] extrinsic;
            double error = CameraCalibration.CalibrateCamera(new MCvPoint3D32f[][] { objectPts }, new PointF[][] { corners.ToArray() },
                                                             chessboardImage.Size, intrisic, CvEnum.CalibType.Default, new MCvTermCriteria(30, 1.0e-10), out extrinsic);

            CvInvoke.DrawChessboardCorners(chessboardImage, patternSize, corners, patternWasFound);
            //CameraCalibration.DrawChessboardCorners(chessboardImage, patternSize, corners);
            Image <Gray, Byte> undistorted = intrisic.Undistort(chessboardImage);
            //UI.ImageViewer.Show(undistorted, String.Format("Reprojection error: {0}", error));
        }
Beispiel #10
0
        static void Main(string[] args)
        {
            // get the chess board width
            Console.WriteLine("Chess board width");
            Int32 width = Convert.ToInt32(Console.ReadLine());

            // get the chess board height
            Console.WriteLine("Chess board height");
            Int32 height = Convert.ToInt32(Console.ReadLine());

            // define the chess board size
            Size patternSize = new Size(width, height);

            // get the input gray scale image
            Image <Gray, Byte> InputImage = new Image <Gray, Byte>("calib-checkerboard.png");

            // show the input image
            CvInvoke.cvShowImage("gray scale input image", InputImage.Ptr);

            // create a buffer to store the chess board corner locations
            PointF[] corners = new PointF[] { };

            // find the chess board corners
            corners = CameraCalibration.FindChessboardCorners(InputImage, patternSize, Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH | Emgu.CV.CvEnum.CALIB_CB_TYPE.FILTER_QUADS);

            // draw the chess board corner markers on the image
            CameraCalibration.DrawChessboardCorners(InputImage, patternSize, corners);

            // show the result
            CvInvoke.cvShowImage("Result", InputImage.Ptr);

            // pause
            CvInvoke.cvWaitKey(0);
        }
Beispiel #11
0
                public DeviceCalibration(bool dummy = true)
                {
                    source   = 0;
                    date     = "1/17/2019; 2:24PM";
                    baseline = 0.059f;
                    cameras  = new List <CameraCalibration>();
                    for (int i = 0; i < 2; i++)
                    {
                        CameraCalibration calib = new CameraCalibration();
                        calib.distCoeffs          = new List <float>();
                        calib.cameraMatrix        = new List <float>();
                        calib.rectificationMatrix = new List <float>();
                        calib.newCameraMatrix     = new List <float>();

                        for (int j = 0; j < 12; j++)
                        {
                            if (j < 8)
                            {
                                calib.distCoeffs.Add(j);
                            }
                            if (j < 9)
                            {
                                calib.rectificationMatrix.Add(j);
                            }
                            if (j < 9)
                            {
                                calib.cameraMatrix.Add(j);
                            }
                            calib.newCameraMatrix.Add(j);
                        }
                        cameras.Add(calib);
                    }
                }
        private PointF[] CollectImageCorners(FileInfo calibrationImageFile)
        {
            Image <Bgr, Byte>  calibrationImage = new Image <Bgr, Byte>(calibrationImageFile.FullName);
            Image <Gray, Byte> grayImage        = calibrationImage.Convert <Gray, Byte>();

            PointF[] foundCorners;
            bool     foundAllCorners = CameraCalibration.FindChessboardCorners(
                grayImage,
                m_MainModel.CaptureSettings.ChessBoard.PatternSize,
                Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH | Emgu.CV.CvEnum.CALIB_CB_TYPE.FILTER_QUADS,
                out foundCorners);

            PointF[][] foundPointsForChannels = new PointF[][] { foundCorners };
            if (foundAllCorners)
            {
                MCvTermCriteria terminationCriteria;
                terminationCriteria.max_iter = 30;
                terminationCriteria.epsilon  = 0.1;
                terminationCriteria.type     = Emgu.CV.CvEnum.TERMCRIT.CV_TERMCRIT_EPS | Emgu.CV.CvEnum.TERMCRIT.CV_TERMCRIT_ITER;

                grayImage.FindCornerSubPix(foundPointsForChannels, new Size(11, 11), new Size(-1, -1), terminationCriteria);
            }

            return(foundCorners);
        }
        public void Initialize(AzureKinectSensor kinectSensor)
        {
            if (!_Initialized)
            {
                _KinectSensor = kinectSensor;
                if (_KinectSensor != null)
                {
                    Debug.Log("ColorResolution: " + _KinectSensor.ColorImageWidth + "x" + _KinectSensor.ColorImageHeight);
                    Debug.Log("DepthResolution: " + _KinectSensor.DepthImageWidth + "x" + _KinectSensor.DepthImageHeight);

                    // _LowPassFilter = new ExponentialSmoothingLowPassFilter((uint)_Accel.Length, 0.05f);
                    // _LowPassFilter = new DoubleExponentialSmoothingLowPassFilter((uint)_Accel.Length, 0.3f, 0.3f);
                    _LowPassFilter = new ButterworthFilter(_Order, _SamplingFrequency, _CutoffFrequency, (uint)_Accel.Length);

                    _DepthRawData       = new byte[_KinectSensor.DepthImageWidth * _KinectSensor.DepthImageHeight * sizeof(ushort)];
                    _PointCloudRenderer = GetComponent <PointCloudRenderer>();

                    CameraCalibration deviceDepthCameraCalibration = _KinectSensor.DeviceCalibration.DepthCameraCalibration;
                    CameraCalibration deviceColorCameraCalibration = _KinectSensor.DeviceCalibration.ColorCameraCalibration;

                    K4A.Calibration calibration = new K4A.Calibration();
                    calibration.DepthCameraCalibration = CreateCalibrationCamera(deviceDepthCameraCalibration, _KinectSensor.DepthImageWidth, _KinectSensor.DepthImageHeight);
                    calibration.ColorCameraCalibration = CreateCalibrationCamera(deviceColorCameraCalibration, _KinectSensor.ColorImageWidth, _KinectSensor.ColorImageHeight);

                    _PointCloudRenderer.GenerateMesh(calibration, K4A.CalibrationType.Depth);

                    _Initialized = true;
                }
            }
        }
Beispiel #14
0
        public MainForm()
        {
            InitializeComponent();
            m_CameraParameters = CameraParameters.Load(@"..\..\..\..\CalibrationFiles\MicrosoftCinema\Focus14\1280x720\MicrosoftCinemaFocus14_1280x720.txt");

            m_RawImage         = new Image <Bgr, byte>(@"..\..\..\..\CalibrationFiles\MicrosoftCinema\Focus14\1280x720\GroundProjectionCalibration.jpg");
            this.CurrentImage  = m_RawImage.Clone();
            this.BirdsEyeImage = m_RawImage.Clone();

            InitializeUndistortMap(m_RawImage);

            Undistort(m_RawImage, this.CurrentImage);

            this.ChessBoard = new ChessBoard(8, 10);
            PointF[] foundCorners = CollectImageCorners();

            DrawFoundCorners(this.CurrentImage, foundCorners);

            // We pick four corners for perspective transform
            PointF[] outerCorners = new PointF[4];
            outerCorners[0] = foundCorners[0];
            outerCorners[1] = foundCorners[this.ChessBoard.PatternSize.Width - 1];
            outerCorners[2] = foundCorners[this.ChessBoard.PatternSize.Width * this.ChessBoard.PatternSize.Height - this.ChessBoard.PatternSize.Width];
            outerCorners[3] = foundCorners[this.ChessBoard.PatternSize.Width * this.ChessBoard.PatternSize.Height - 1];
            DrawOuterCorners(this.CurrentImage, outerCorners);

            float side;
            float bottom;
            float centerX;

            side   = 25.0f;
            bottom = 310.0f;

            PointF[] physicalPointsForCalculation = new PointF[4];
            physicalPointsForCalculation[0] = new PointF(-3 * side, bottom + 8 * side);
            physicalPointsForCalculation[1] = new PointF(+3 * side, bottom + 8 * side);
            physicalPointsForCalculation[2] = new PointF(-3 * side, bottom);
            physicalPointsForCalculation[3] = new PointF(+3 * side, bottom);

            m_BirdsEyeViewTransformationForCalculation = CameraCalibration.GetPerspectiveTransform(outerCorners, physicalPointsForCalculation);
            HomographyMatrixSupport.Save(m_BirdsEyeViewTransformationForCalculation, "BirdsEyeViewTransformationForCalculation.txt");

            side    = 8f;
            bottom  = 700.0f;
            centerX = (float)m_CameraParameters.Intrinsic.Cx;

            PointF[] physicalPointsForUI = new PointF[4];
            physicalPointsForUI[0] = new PointF(-3 * side + centerX, bottom - 8 * side);
            physicalPointsForUI[1] = new PointF(+3 * side + centerX, bottom - 8 * side);
            physicalPointsForUI[2] = new PointF(-3 * side + centerX, bottom);
            physicalPointsForUI[3] = new PointF(+3 * side + centerX, bottom);

            m_BirdsEyeViewTransformationForUI = CameraCalibration.GetPerspectiveTransform(outerCorners, physicalPointsForUI);
            HomographyMatrixSupport.Save(m_BirdsEyeViewTransformationForUI, "BirdsEyeViewTransformationForUI.txt");

            //m_BirdsEyeViewTransformationForCalculation.ProjectPoints(outerCorners);

            CreateAndDrawBirdsEyeView();
        }
Beispiel #15
0
 /// <summary>
 /// single camera calibration
 /// </summary>
 /// <param name="objectPoints">corners value in world coordinate</param>
 /// <param name="imagePoints">corners value in image coordinate</param>
 /// <param name="imageSize">image size</param>
 /// <param name="intrinsicParam">camera intrinsic</param>
 /// <param name="extrinsicParams">camera extrinsic</param>
 /// <returns>reprojection error</returns>
 private double singleCameraCalibration(MCvPoint3D32f[][] objectPoints,
                                        PointF[][] imagePoints,
                                        Size imageSize,
                                        IntrinsicCameraParameters intrinsicParam,
                                        ExtrinsicCameraParameters[] extrinsicParams
                                        )
 {
     return(CameraCalibration.CalibrateCamera(objectPoints, imagePoints, imageSize, intrinsicParam,
                                              CALIB_TYPE.CV_CALIB_FIX_K3, criteria, out extrinsicParams));
 }
Beispiel #16
0
        private static PointF[] FindChessboardCorners(Image <Gray, byte> image, Size patternSize)
        {
            var corners = CameraCalibration.FindChessboardCorners(image, patternSize, CALIB_CB_TYPE.ADAPTIVE_THRESH);

            if (corners != null)
            {
                image.FindCornerSubPix(new[] { corners }, new Size(11, 11), new Size(-1, -1),
                                       new MCvTermCriteria(30, 0.01));
            }
            return(corners);
        }
Beispiel #17
0
    public double calibrateFromCorrespondences(List <Vector3> imagePoints, List <Vector3> worldPoints)
    {
        PointF[][]                imagePointsCvMat = PutImagePointsIntoArray(imagePoints, imagePoints.Count);
        MCvPoint3D32f[][]         worldPointsCvMat = PutObjectPointsIntoArray(worldPoints, imagePoints.Count, _numImages);
        IntrinsicCameraParameters intrinsic        = CreateIntrinsicGuess();

        ExtrinsicCameraParameters[] extrinsics;
        var calibrationError = CameraCalibration.CalibrateCamera(worldPointsCvMat, imagePointsCvMat, _size, intrinsic, _flags, _termCriteria, out extrinsics);

        ApplyCalibrationToUnityCamera(intrinsic, extrinsics[0]);
        return(calibrationError);
    }
Beispiel #18
0
        //called when data for any output pin is requested
        public void Evaluate(int SpreadMax)
        {
            if (FPinInDo[0])
            {
                bool useVVVVCoords = FPinInCoordSystem[0] == TCoordinateSystem.VVVV;

                SpreadMax = Math.Max(FPinInObject.SliceCount, FPinInImage.SliceCount);

                FPinOutExtrinsics.SliceCount = SpreadMax;
                FPinOutStatus.SliceCount     = SpreadMax;

                for (int i = 0; i < SpreadMax; i++)
                {
                    try
                    {
                        if (FPinInObject[i].SliceCount == 0 || FPinInImage[i].SliceCount == 0)
                        {
                            throw new Exception("No datapoints");
                        }
                        if (FPinInImage[i].SliceCount == 1)
                        {
                            throw new Exception("Only 1 image point is being input per board, check SliceCount!");
                        }
                        if (FPinInObject[i].SliceCount == 1)
                        {
                            throw new Exception("Only 1 object point is being input per board, check SliceCount!");
                        }
                        if (FPinInIntrinsics[i].intrinsics == null)
                        {
                            throw new Exception("Waiting for camera calibration intrinsics");
                        }

                        ExtrinsicCameraParameters extrinsics = CameraCalibration.FindExtrinsicCameraParams2(MatrixUtils.ObjectPoints(FPinInObject[i], useVVVVCoords), MatrixUtils.ImagePoints(FPinInImage[i]), FPinInIntrinsics[i].intrinsics);
                        FPinOutExtrinsics[i] = new Extrinsics(extrinsics);

                        if (useVVVVCoords)
                        {
                            FPinOutView[i] = MatrixUtils.ConvertToVVVV(FPinOutExtrinsics[i].Matrix);
                        }
                        else
                        {
                            FPinOutView[i] = FPinOutExtrinsics[i].Matrix;
                        }

                        FPinOutStatus[i] = "OK";
                    }
                    catch (Exception e)
                    {
                        FPinOutStatus[i] = e.Message;
                    }
                }
            }
        }
Beispiel #19
0
        public void homographie(List <IntPoint> LimiteTerain, bool imgCol)
        {
            /* AFORGE => OK Mais long
             * UnImgReel = UnmanagedImage.FromManagedImage(imgReel);
             *
             * // Remplacement de l'image par le terain détecte dedans
             * if (LimiteTerain.Count == 4)
             * {
             *   QuadrilateralTransformation quadrilateralTransformation = new QuadrilateralTransformation(LimiteTerain, UnImgReel.Width, UnImgReel.Height);
             *   UnImgReel = quadrilateralTransformation.Apply(UnImgReel);
             * }
             */

            if (LimiteTerain.Count == 4)
            {
                int wid = max(LimiteTerain[0].X, LimiteTerain[1].X, LimiteTerain[2].X, LimiteTerain[3].X) - min(LimiteTerain[0].X, LimiteTerain[1].X, LimiteTerain[2].X, LimiteTerain[3].X);
                int hei = max(LimiteTerain[0].Y, LimiteTerain[1].Y, LimiteTerain[2].Y, LimiteTerain[3].Y) - min(LimiteTerain[0].Y, LimiteTerain[1].Y, LimiteTerain[2].Y, LimiteTerain[3].Y);

                Image <Bgr, Byte> a    = new Image <Bgr, byte>(wid, hei);
                PointF[]          pts1 = new PointF[4];
                PointF[]          pts2 = new PointF[4];

                pts1[0] = new PointF(0, 0);
                pts1[1] = new PointF(wid, 0);
                pts1[3] = new PointF(wid, hei);
                pts1[2] = new PointF(0, hei);

                pts2[0] = new PointF(LimiteTerain[0].X, LimiteTerain[0].Y);
                pts2[1] = new PointF(LimiteTerain[1].X, LimiteTerain[1].Y);
                pts2[3] = new PointF(LimiteTerain[2].X, LimiteTerain[2].Y);
                pts2[2] = new PointF(LimiteTerain[3].X, LimiteTerain[3].Y);

                homography = CameraCalibration.GetPerspectiveTransform(pts2, pts1);
                MCvScalar s = new MCvScalar(0, 0, 0);

                //CvInvoke.cvFindHomography(matSource, matDest, homography, Emgu.CV.CvEnum.HOMOGRAPHY_METHOD.DEFAULT, 3.0, maskMat);
                CvInvoke.cvWarpPerspective(imgRecu, a, homography, (int)Emgu.CV.CvEnum.INTER.CV_INTER_NN, s);
                // CvInvoke.cvWarpAffine(imgRecu, a, homography, (int)Emgu.CV.CvEnum.INTER.CV_INTER_NN, s);

                imgRecu   = a;
                UnImgReel = UnmanagedImage.FromManagedImage(a.ToBitmap());
            }
            else
            {
                UnImgReel = UnmanagedImage.FromManagedImage(imgReel);
            }


            if (imgCol)
            {
                ImgColor = UnImgReel.Clone();
            }
        }
Beispiel #20
0
        /// <summary>
        /// Recover the homography matrix using RANDSAC. If the matrix cannot be recovered, null is returned.
        /// </summary>
        /// <param name="matchedFeatures">The Matched Features, only the first ModelFeature will be considered</param>
        /// <returns>The homography matrix, if it cannot be found, null is returned</returns>
        public static HomographyMatrix GetHomographyMatrixFromMatchedFeatures(MatchedSURFFeature[] matchedFeatures)
        {
            if (matchedFeatures.Length < 4)
            {
                return(null);
            }

            HomographyMatrix homography;

            if (matchedFeatures.Length < _randsacRequiredMatch)
            { // Too few points for randsac, use 4 points only
                PointF[] pts1 = new PointF[4];
                PointF[] pts2 = new PointF[4];
                for (int i = 0; i < 4; i++)
                {
                    pts1[i] = matchedFeatures[i].SimilarFeatures[0].Feature.Point.pt;
                    pts2[i] = matchedFeatures[i].ObservedFeature.Point.pt;
                }
                homography = CameraCalibration.GetPerspectiveTransform(pts1, pts2);
            }
            else
            {
                //use randsac to find the Homography Matrix
                PointF[] pts1 = new PointF[matchedFeatures.Length];
                PointF[] pts2 = new PointF[matchedFeatures.Length];
                for (int i = 0; i < matchedFeatures.Length; i++)
                {
                    pts1[i] = matchedFeatures[i].SimilarFeatures[0].Feature.Point.pt;
                    pts2[i] = matchedFeatures[i].ObservedFeature.Point.pt;
                }

                homography = CameraCalibration.FindHomography(
                    pts1, //points on the model image
                    pts2, //points on the observed image
                    CvEnum.HOMOGRAPHY_METHOD.RANSAC,
                    3);
                if (homography == null)
                {
                    return(null);
                }
            }

            if (homography.IsValid(10))
            {
                return(homography);
            }
            else
            {
                homography.Dispose();
                return(null);
            }
        }
        private static void WriteCalibration(BinaryWriter file, string prefix, CameraCalibration cal)
        {
            var rs = string.Join(" ", from f in cal.Extrinsics.Rotation select f.ToString("R"));

            file.Write(Encoding.ASCII.GetBytes($"device-{prefix}-calibration-rotation: {rs}\n"));
            var ts = string.Join(" ", from f in cal.Extrinsics.Translation select f.ToString("R"));

            file.Write(Encoding.ASCII.GetBytes($"device-{prefix}-calibration-translation: {ts}\n"));
            file.Write(Encoding.ASCII.GetBytes($"device-{prefix}-calibration-type: {cal.Intrinsics.Type}\n"));
            var ps = string.Join(" ", from f in cal.Intrinsics.Parameters select f.ToString("R"));

            file.Write(Encoding.ASCII.GetBytes($"device-{prefix}-calibration-parameters: {ps}\n"));
        }
Beispiel #22
0
    // Use this for initialization
    void Start()
    {
        Renderer renderer = GetComponent<Renderer>();
        StreamReader sr = new StreamReader(Scene + ".txt");
        string[] s = sr.ReadLine().Split(' ');
        height = System.Convert.ToInt32(s[0]);
        width = System.Convert.ToInt32(s[1]);
        CameraCalibration cc = new CameraCalibration();
        cc.pathXML = "View_" + Scene + ".xml";
        cc.GetDataFromXML();
        cc.GetMatrixesOfCameras();

        //Texture2D texture = new Texture2D(Mathf.CeilToInt(max_x - min_x), Mathf.CeilToInt(max_y - min_y));
        Vector2 hor = cc.FindHorizont();
        float k = -hor[0] / hor[1];
        float b = -1 / hor[1];

        float im_min_x = width, im_min_y = height, im_max_x = 0, im_max_y = 0;
        for (int x = 0; x < width; x++)
            for (int y = 0; y < height; y++)
                if (OnGround(x, y, k, b))
                {
                    if (im_min_x > x)
                        im_min_x = x;
                    if (im_min_y > y)
                        im_min_y = y;
                    if (im_max_x < x)
                        im_max_x = x;
                    if (im_max_y < y)
                        im_max_y = y;
                }
        disp (im_min_x, im_min_y, im_max_x, im_max_y);
        /////!!!!!!
        im_min_y = 150.0f;
        ///
        Vector3 leftUp, leftDown, rightUp, rightDown, center;
        leftUp = cc.Img2World(im_min_x, im_max_y);
        leftDown = cc.Img2World(im_min_x, im_min_y);
        rightDown = cc.Img2World(im_max_x, im_min_y);
        rightUp = cc.Img2World(im_max_x, im_max_y);
        center = cc.Img2World(width / 2, height / 2);
        float min_x = min (leftUp.x, leftDown.x, rightUp.x, rightDown.x);
        float min_y = min (leftUp.y, leftDown.y, rightUp.y, rightDown.y);
        float max_x = max (leftUp.x, leftDown.x, rightUp.x, rightDown.x);
        float max_y = max (leftUp.y, leftDown.y, rightUp.y, rightDown.y);
        transform.position = center;
        disp (Mathf.CeilToInt(max_x - min_x), Mathf.CeilToInt(max_y - min_y));
        transform.localScale = new Vector3((max_x - min_x) / 2,1.0f, (max_y - min_y) / 2);
        //Texture2D texture = new Texture2D(Mathf.CeilToInt(max_x - min_x), Mathf.CeilToInt(max_y - min_y));
        Texture2D texture = new Texture2D(10000, 10000);
    }
Beispiel #23
0
        public bool OpenSensor(int deviceIndex = 0)
        {
            if (_KinectSensor != null)
            {
                Debug.Log("AzureKinect [" + _KinectSensor.SerialNum + "] has already been opened.");
                return(false);
            }

            _KinectSensor = Device.Open(deviceIndex);
            if (_KinectSensor == null)
            {
                Debug.Log("AzureKinect cannot be opened.");
                return(false);
            }

            DeviceConfiguration kinectConfig = new DeviceConfiguration();

            kinectConfig.ColorFormat     = _ColorImageFormat;
            kinectConfig.ColorResolution = (ColorResolution)_ColorCameraMode;
            kinectConfig.DepthMode       = (DepthMode)_DepthCameraMode;

            if (_ColorCameraMode != ColorCameraMode._4096_x_3072_15fps &&
                _DepthCameraMode != DepthCameraMode._1024x1024_15fps)
            {
                kinectConfig.CameraFPS = FPS.FPS30;
            }
            else
            {
                kinectConfig.CameraFPS = FPS.FPS15;
            }

            _KinectSensor.StartCameras(kinectConfig);
            _IsCameraStarted = true;

            _KinectSensor.StartImu();

            _DeviceCalibration = _KinectSensor.GetCalibration();
            _Transformation    = _DeviceCalibration.CreateTransformation();

            CameraCalibration colorCamera = _DeviceCalibration.ColorCameraCalibration;

            _ColorImageWidth  = colorCamera.ResolutionWidth;
            _ColorImageHeight = colorCamera.ResolutionHeight;

            CameraCalibration depthCamera = _DeviceCalibration.DepthCameraCalibration;

            _DepthImageWidth  = depthCamera.ResolutionWidth;
            _DepthImageHeight = depthCamera.ResolutionHeight;

            return(true);
        }
Beispiel #24
0
        public void Evaluate(int SpreadMax)
        {
            if (FSource.IsChanged || FTarget.IsChanged)
            {
                SpreadMax = Math.Max(FSource.SliceCount, FTarget.SliceCount);
                FTransformOut.SliceCount = SpreadMax;
                FStatus.SliceCount       = SpreadMax;

                for (int slice = 0; slice < SpreadMax; slice++)
                {
                    try
                    {
                        var source = FSource[slice];
                        var target = FTarget[slice];

                        if (source.SliceCount < 4 || target.SliceCount < 4)
                        {
                            throw(new Exception("You need at least 4 source and 4 target points"));
                        }

                        int      sliceSpreadMax = Math.Max(source.SliceCount, target.SliceCount);
                        PointF[] sourcePoints   = new PointF[sliceSpreadMax];
                        PointF[] targetPoints   = new PointF[sliceSpreadMax];

                        for (int i = 0; i < sliceSpreadMax; i++)
                        {
                            sourcePoints[i].X = (float)source[i].x;
                            sourcePoints[i].Y = (float)source[i].y;

                            targetPoints[i].X = (float)target[i].x;
                            targetPoints[i].Y = (float)target[i].y;
                        }

                        var matrix = CameraCalibration.FindHomography(sourcePoints, targetPoints, Emgu.CV.CvEnum.HOMOGRAPHY_METHOD.LMEDS, 0.0);

                        FTransformOut[slice] = new Matrix4x4(matrix[0, 0], matrix[1, 0], 0.0, matrix[2, 0],
                                                             matrix[0, 1], matrix[1, 1], 0.0, matrix[2, 1],
                                                             0.0, 0.0, 1.0, 0.0,
                                                             matrix[0, 2], matrix[1, 2], 0.0, matrix[2, 2]);

                        FStatus[slice] = "OK";
                    }
                    catch (Exception e)
                    {
                        FTransformOut[slice] = VMath.IdentityMatrix;
                        FStatus[slice]       = e.Message;
                    }
                }
            }
        }
        //called when data for any output pin is requested
        public void Evaluate(int SpreadMax)
        {
            if (FPinInDo[0])
            {
                int nPointsPerImage = FPinInObject.SliceCount;
                if (nPointsPerImage == 0)
                {
                    FStatus[0] = "Insufficient points";
                    return;
                }
                int nImages = Math.Max(FPinInImage1.SliceCount, FPinInImage2.SliceCount) / nPointsPerImage;

                CALIB_TYPE        flags        = CALIB_TYPE.DEFAULT;
                MCvTermCriteria   termCrit     = new MCvTermCriteria(100, 0.001);
                MCvPoint3D32f[][] objectPoints = new MCvPoint3D32f[nImages][];
                PointF[][]        imagePoints1 = new PointF[nImages][];
                PointF[][]        imagePoints2 = new PointF[nImages][];
                Size imageSize = new Size((int)FPinInSensorSize[0].x, (int)FPinInSensorSize[0].y);
                ExtrinsicCameraParameters interCameraExtrinsics;
                Matrix <double>           foundamentalMatrix;
                Matrix <double>           essentialMatrix;
                IntrinsicCameraParameters intrinsics1 = FPinInIntrinsics1[0].intrinsics;
                IntrinsicCameraParameters intrinsics2 = FPinInIntrinsics2[0].intrinsics;

                imagePoints1 = MatrixUtils.ImagePoints(FPinInImage1, nPointsPerImage);
                imagePoints2 = MatrixUtils.ImagePoints(FPinInImage2, nPointsPerImage);

                for (int i = 0; i < nImages; i++)
                {
                    objectPoints[i] = MatrixUtils.ObjectPoints(FPinInObject, true);
                }

                try
                {
                    CameraCalibration.StereoCalibrate(objectPoints, imagePoints1, imagePoints2, intrinsics1, intrinsics2, imageSize, flags, termCrit, out interCameraExtrinsics, out foundamentalMatrix, out essentialMatrix);

                    Extrinsics extrinsics = new Extrinsics(interCameraExtrinsics);
                    FPinOutExtrinsics[0] = extrinsics;
                    FPinOutTransform[0]  = extrinsics.Matrix;

                    FPinOutSuccess[0] = true;
                    FStatus[0]        = "OK";
                }
                catch (Exception e)  {
                    FPinOutSuccess[0] = false;
                    FStatus[0]        = e.Message;
                }
            }
        }
 void Start()
 {
     CameraCalibration cc = new CameraCalibration();
     cc.pathXML = "View_" + Scene + ".xml";
     cc.GetDataFromXML();
     cc.GetMatrixesOfCameras();
     disp (transform.position);
     disp (GameObject.Find("CubeR").transform.localPosition);
     disp (cc.Img2World(cc.width, cc.height));
     //Vector3 pos = cc.Img2World(cc.width, cc.height);
     //Vector3 pos = cc.Img2World(cc.width, 0);
     //Vector3 pos = cc.Img2World(0, cc.height);
     Vector3 pos = cc.Img2World(0, 0);
     transform.position = new Vector3(pos.x, 0, pos.y);
 }
Beispiel #27
0
        public CalibrationResult SolvePnP(Bitmap image, MCvPoint3D32f[] objpoints, IntrinsicCameraParameters camIntrinsic, CheckBoardDefinition pattern)
        {
            //cvCvtColor(image, cv2.COLOR_BGR2GRAY)
            Image <Gray, Byte> gray = new Image <Gray, byte>(image);

            // the fast check flag reduces significantly the computation time if the pattern is out of sight
            PointF[] corners = CameraCalibration.FindChessboardCorners(gray, pattern.Pattern, Emgu.CV.CvEnum.CalibCbType.FastCheck);
            ExtrinsicCameraParameters ret = null;

            if (corners != null)
            {
                ret = CameraCalibration.SolvePnP(objpoints, corners, camIntrinsic);
            }
            return(new CalibrationResult(ret != null, ret));
        }
        public void ProcessFrame()
        {
            if (this.State == CalibratorState.CornersRecognized | this.State == CalibratorState.Calibrated)
            {
                return;
            }

            this.OriginalImage = m_Capture.QueryFrame();
            this.GrayImage     = this.OriginalImage.Convert <Gray, Byte>();

            bool foundAllCorners = CameraCalibration.FindChessboardCorners(
                this.GrayImage,
                this.ChessBoard.PatternSize,
                Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH | Emgu.CV.CvEnum.CALIB_CB_TYPE.FILTER_QUADS,
                out m_FoundCorners);

            PointF[][] foundPointsForChannels = new PointF[][] { m_FoundCorners };
            if (foundAllCorners)
            {
                MCvTermCriteria terminationCriteria;
                terminationCriteria.max_iter = 30;
                terminationCriteria.epsilon  = 0.1;
                terminationCriteria.type     = Emgu.CV.CvEnum.TERMCRIT.CV_TERMCRIT_EPS | Emgu.CV.CvEnum.TERMCRIT.CV_TERMCRIT_ITER;

                this.GrayImage.FindCornerSubPix(foundPointsForChannels, new Size(11, 11), new Size(-1, -1), terminationCriteria);

                CameraCalibration.DrawChessboardCorners(this.GrayImage, this.ChessBoard.PatternSize, m_FoundCorners, foundAllCorners);
            }

            // we are done with processing. If needed we flip the images for display purposes only.
            Emgu.CV.CvEnum.FLIP flipType = Emgu.CV.CvEnum.FLIP.NONE;
            if (this.FlipHorizontal)
            {
                flipType = Emgu.CV.CvEnum.FLIP.HORIZONTAL;
            }
            if (this.FlipVertical)
            {
                flipType = flipType |= Emgu.CV.CvEnum.FLIP.VERTICAL;
            }

            this.OriginalImage._Flip(flipType);
            this.GrayImage._Flip(flipType);

            if (foundAllCorners && this.State == CalibratorState.WaitingForCornersRecognition)
            {
                this.State = CalibratorState.CornersRecognized;
            }
        }
Beispiel #29
0
    public void calibrate()
    {
        if (objectPoints[0].Length < 4)
        {
            Debug.LogWarning("Need more than 4 points for calibration, not calibrating");
            return;
        }

        setIntrinsics();
        calibrationError = CameraCalibration.CalibrateCamera(objectPoints, imagePoints, imageSize, intrinsics, calibrationType, termCriteria, out extrinsics);

        cvExtrinsics = convertExtrinsics(extrinsics[0].ExtrinsicMatrix);
        cvIntrinsics = convertIntrinsics(intrinsics.IntrinsicMatrix);

        updateCameraParams();
    }
Beispiel #30
0
        private void ComputeCameraMatrix(Mat frame_S1, Mat frame_S2)
        {
            //fill the MCvPoint3D32f with correct mesurments
            for (int k = 0; k < buffer_length; k++)
            {
                //Fill our objects list with the real world mesurments for the intrinsic calculations
                List <MCvPoint3D32f> object_list = new List <MCvPoint3D32f>();
                for (int i = 0; i < patternModel.patternSize.Height; i++)
                {
                    for (int j = 0; j < patternModel.patternSize.Width; j++)
                    {
                        object_list.Add(new MCvPoint3D32f(j * patternModel.distance, i * patternModel.distance, 0.0F));
                    }
                }
                corners_object_Points[k] = object_list.ToArray();
            }
            //If Emgu.CV.CvEnum.CALIB_TYPE == CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be initialized before calling the function
            //if you use FIX_ASPECT_RATIO and FIX_FOCAL_LEGNTH options, these values needs to be set in the intrinsic parameters before the CalibrateCamera function is called. Otherwise 0 values are used as default.
            CameraCalibration.StereoCalibrate(corners_object_Points, corners_points_Left, corners_points_Right, calibrationModel.IntrinsicCam1, calibrationModel.IntrinsicCam2, frame_S1.Size,
                                              CalibType.Default, new MCvTermCriteria(0.1e5),
                                              out EX_Param, out fundamental, out essential);

            calibrationModel.EX_Param    = EX_Param;
            calibrationModel.fundamental = fundamental;
            calibrationModel.essential   = essential;
            MessageBox.Show("Intrinsic Calculation Complete"); //display that the mothod has been succesful
                                                               //currentMode = Mode.Calibrated;

            //Computes rectification transforms for each head of a calibrated stereo camera.
            CvInvoke.StereoRectify(calibrationModel.IntrinsicCam1.IntrinsicMatrix,
                                   calibrationModel.IntrinsicCam1.DistortionCoeffs, calibrationModel.IntrinsicCam2.IntrinsicMatrix, calibrationModel.IntrinsicCam2.DistortionCoeffs,
                                   frame_S1.Size,
                                   calibrationModel.EX_Param.RotationVector.RotationMatrix, calibrationModel.EX_Param.TranslationVector,
                                   calibrationModel.R1, calibrationModel.R2, calibrationModel.P1, calibrationModel.P2, calibrationModel.Q,
                                   StereoRectifyType.Default, 0,
                                   frame_S1.Size, ref Rec1, ref Rec2);

            calibrationModel.Rec1 = Rec1;
            calibrationModel.Rec2 = Rec2;

            InitUndistortMatrix();

            //This will Show us the usable area from each camera
            //MessageBox.Show("Left: " + Rec1.ToString() + " \nRight: " + Rec2.ToString());
            currentMode = ECalibrationMode.Calibrated;
        }
        /// <summary>
        /// 立体相机标定方法
        /// </summary>
        private void DoCalibrate()
        {
            try
            {
                //左标定
                double x = CameraCalibration.CalibrateCamera(
                    _objectPoints.ToArray(),
                    _imagePointsL.ToArray(),
                    _imageSize,
                    _intrParamL, //内部参数
                    _calibType,
                    _termCriteria,
                    out _extrParamsL);

                //右标定
                double y = CameraCalibration.CalibrateCamera(
                    _objectPoints.ToArray(),
                    _imagePointsR.ToArray(),
                    _imageSize,
                    _intrParamR,
                    _calibType,
                    _termCriteria,
                    out _extrParamsR);

                //立体标定
                CameraCalibration.StereoCalibrate(
                    _objectPoints.ToArray(),
                    _imagePointsL.ToArray(),
                    _imagePointsR.ToArray(),
                    _intrParamL,             //左内部参数
                    _intrParamR,             //右内部参数
                    _imageSize,              //图像的大小
                    _calibType,              //标定种类
                    _termCriteria,           //终止准则
                    out _extrParamsS,        //立体相机外部参数
                    out _foundamentalMatrix, //基础矩阵
                    out _essentialMatrix);   //本征矩阵
            }
            catch (System.NullReferenceException e)
            {
                MessageBox.Show("在标定处产生异常" + e.Message);
                return;
            }
        }
Beispiel #32
0
        /// <summary>
        /// corners detection
        /// </summary>
        /// <param name="chessboardImage">chessboard image</param>
        /// <param name="cornersDetected">corners value in image coordinate</param>
        /// <returns>
        /// return true if success
        /// </returns>
        private bool findCorners(ref Image <Gray, byte> chessboardImage, out PointF[] cornersDetected)
        {
            cornersDetected = new PointF[nPoints];

            cornersDetected = CameraCalibration.FindChessboardCorners(chessboardImage, patternSize,
                                                                      CALIB_CB_TYPE.ADAPTIVE_THRESH | CALIB_CB_TYPE.NORMALIZE_IMAGE);//////////////

            if (null == cornersDetected)
            {
                return(false);
            }
            else
            {
                CvInvoke.cvFindCornerSubPix(chessboardImage, cornersDetected, nPoints,
                                            new Size(5, 5), new Size(-1, -1), criteria);
                CameraCalibration.DrawChessboardCorners(chessboardImage, patternSize, cornersDetected);
                return(true);
            }
        }