/// <summary> /// Tries to find the composite pattern and returns the output parameter image_points. /// In case of success the boolean value 'true' is returned. /// Note, that CompositePatterns can only be found, if the cameras' intrinsics are set. /// /// The algorithm is working as follows: /// If the main pattern 'patternA' could be found, the algorithm is finished already and the resulting /// image_points are known and returned. /// If only 'patternB' could be found, the given object_points of 'patternA' are transformed in the /// 'patternB' coordinate system, using the predefined transformation matrix. /// Furthermore, an extrinsic calibration is performed in order to find the extrinsic matrix, which describes /// the relation between camera coordinate system and the coordinate system of 'patternB'. /// Finally, the library function 'ProjectPoints' is called in order to project the transformed object_points /// (currently expressed in 'patternB'-coordinates) into the camera image plane. /// The projected points correspond to the image_points of 'patternA'. /// ==> To sum up: the predefined transformation is used to calculate the image_points of 'patternA', even /// if 'patternA' is invisible. /// </summary> /// <param name="img"> Input grayscale image. </param> /// <param name="image_points"> 2D output image points. </param> /// <returns> true... if pattern has been found; false... otherwise. </returns> public override bool FindPattern(Emgu.CV.Image <Emgu.CV.Structure.Gray, byte> img, out System.Drawing.PointF[] image_points) { if (this.IntrinsicParameters != null && _patternA != null && _patternB != null) { bool foundA = false; System.Drawing.PointF[] currentImagePointsA; System.Drawing.PointF[] currentImagePointsB; //set the object_points of the composite pattern to the object_points of 'patternA' this.ObjectPoints = _patternA.ObjectPoints; //try to find 'patternA' foundA = _patternA.FindPattern(img, out currentImagePointsA); //if 'patternA' could be found: the image_points have been found. if (foundA) { image_points = currentImagePointsA; //_logger.Info("Pattern found."); return(true); } else //else: try to find 'patternB' if (_patternB.FindPattern(img, out currentImagePointsB)) { ExtrinsicCalibration ec_B = null; Emgu.CV.ExtrinsicCameraParameters ecp_B = null; Matrix extrinsic_matrix = Matrix.Identity(4, 4); Matrix temp_matrix = null; Emgu.CV.Structure.MCvPoint3D32f[] transformedCornerPoints = new Emgu.CV.Structure.MCvPoint3D32f[_patternA.ObjectPoints.Length]; try { //if 'patternB' has been found: find the extrinsic matrix (relation between coordinate systems of 'patternB' and camera ec_B = new ExtrinsicCalibration(_patternB.ObjectPoints, this.IntrinsicParameters); ecp_B = ec_B.Calibrate(currentImagePointsB); if (ecp_B != null) { //form the resulting extrinsic matrix to a homogeneous (4x4) matrix. temp_matrix = Parsley.Core.Extensions.ConvertToParsley.ToParsley(ecp_B.ExtrinsicMatrix); extrinsic_matrix.SetMatrix(0, temp_matrix.RowCount - 1, 0, temp_matrix.ColumnCount - 1, temp_matrix); //transform object points of A into B coordinate system. transformedCornerPoints = MatrixTransformation.TransformVectorToEmgu(_transformationBToA.Inverse(), 1.0, _patternA.ObjectPoints).ToArray <Emgu.CV.Structure.MCvPoint3D32f>(); //project the points into the 2D camera plane (image_points) image_points = Emgu.CV.CameraCalibration.ProjectPoints(transformedCornerPoints, ecp_B, this.IntrinsicParameters); return(true); } else { _logger.Warn("Error calculating extrinsic parameters."); image_points = null; return(false); } } catch (Exception e) { _logger.Warn("Caught Exception: {0}.", e); image_points = null; return(false); } } else { //reset the image_points if the pattern could not be found. image_points = null; return(false); } } else { _logger.Warn("Error: Intrinsics are needed to find a Composite Pattern but not available."); image_points = null; return(false); } }