// Use this for initialization void Start() { displayCameraPreviewToggle.isOn = displayCameraPreview; useSeparateDetectionToggle.isOn = useSeparateDetection; displayAxesToggle.isOn = displayAxes; displayHeadToggle.isOn = displayHead; displayEffectsToggle.isOn = displayEffects; imageOptimizationHelper = gameObject.GetComponent <ImageOptimizationHelper> (); webCamTextureToMatHelper = gameObject.GetComponent <HololensCameraStreamToMatHelper> (); #if NETFX_CORE webCamTextureToMatHelper.frameMatAcquired += OnFrameMatAcquired; #endif webCamTextureToMatHelper.Initialize(); rectangleTracker = new RectangleTracker(); // faceLandmarkDetector = new FaceLandmarkDetector (DlibFaceLandmarkDetector.Utils.getFilePath ("sp_human_face_68.dat")); faceLandmarkDetector = new FaceLandmarkDetector(DlibFaceLandmarkDetector.Utils.getFilePath("sp_human_face_68_for_mobile.dat")); // The coordinates of the detection object on the real world space connected with the pixel coordinates.(mm) objectPoints = new MatOfPoint3f( new Point3(-34, 90, 83), //l eye (Interpupillary breadth) new Point3(34, 90, 83), //r eye (Interpupillary breadth) new Point3(0.0, 50, 120), //nose (Nose top) new Point3(-26, 15, 83), //l mouse (Mouth breadth) new Point3(26, 15, 83), //r mouse (Mouth breadth) new Point3(-79, 90, 0.0), //l ear (Bitragion breadth) new Point3(79, 90, 0.0) //r ear (Bitragion breadth) ); imagePoints = new MatOfPoint2f(); rotMat = new Mat(3, 3, CvType.CV_64FC1); }
private void Run() { //set 3d face object points. objectPoints = new MatOfPoint3f(new Point3(-31, 72, 86), //l eye new Point3(31, 72, 86), //r eye new Point3(0, 40, 114), //nose new Point3(-20, 15, 90), //l mouse new Point3(20, 15, 90) //r mouse // , // new Point3 (-70, 60, -9),//l ear // new Point3 (70, 60, -9)//r ear ); imagePoints = new MatOfPoint2f(); rvec = new Mat(); tvec = new Mat(); rotM = new Mat(3, 3, CvType.CV_64FC1); //initialize FaceTracker faceTracker = new FaceTracker(tracker_model_json_filepath); //initialize FaceTrackerParams faceTrackerParams = new FaceTrackerParams(); cascade = new CascadeClassifier(); cascade.load(haarcascade_frontalface_alt_xml_filepath); // if (cascade.empty()) // { // Debug.LogError("cascade file is not loaded.Please copy from “FaceTrackerExample/StreamingAssets/” to “Assets/StreamingAssets/” folder. "); // } webCamTextureToMatHelper.Initialize(); }
// Use this for initialization void Start() { //set 3d face object points. objectPoints = new MatOfPoint3f(new Point3(-31, 72, 86), //l eye new Point3(31, 72, 86), //r eye new Point3(0, 40, 114), //nose new Point3(-20, 15, 90), //l mouse new Point3(20, 15, 90) //r mouse // , // new Point3 (-70, 60, -9),//l ear // new Point3 (70, 60, -9)//r ear ); imagePoints = new MatOfPoint2f(); rvec = new Mat(); tvec = new Mat(); rotM = new Mat(3, 3, CvType.CV_64FC1); //initialize FaceTracker faceTracker = new FaceTracker(Utils.getFilePath("tracker_model.json")); //initialize FaceTrackerParams faceTrackerParams = new FaceTrackerParams(); webCamTextureToMatHelper = gameObject.GetComponent <WebCamTextureToMatHelper> (); webCamTextureToMatHelper.Init(); autoResetModeToggle.isOn = autoResetMode; }
// Use this for initialization void Start() { isUsingSeparateDetectionToggle.isOn = isUsingSeparateDetection; isShowingAxesToggle.isOn = isShowingAxes; isShowingHeadToggle.isOn = isShowingHead; isShowingEffectsToggle.isOn = isShowingEffects; webCamTextureToMatHelper = gameObject.GetComponent <OptimizationWebCamTextureToMatHelper> (); webCamTextureToMatHelper.Init(); rectangleTracker = new RectangleTracker(); faceLandmarkDetector = new FaceLandmarkDetector(DlibFaceLandmarkDetector.Utils.getFilePath("shape_predictor_68_face_landmarks.dat")); // The coordinates of the detection object on the real world space connected with the pixel coordinates.(mm) objectPoints = new MatOfPoint3f( new Point3(-31, 72, 86), //l eye (Interpupillary breadth) new Point3(31, 72, 86), //r eye (Interpupillary breadth) new Point3(0, 40, 114), //nose (Nose top) new Point3(-20, 15, 90), //l mouse (Mouth breadth) new Point3(20, 15, 90), //r mouse (Mouth breadth) new Point3(-69, 76, -2), //l ear (Bitragion breadth) new Point3(69, 76, -2) //r ear (Bitragion breadth) ); imagePoints = new MatOfPoint2f(); rvec = new Mat(); tvec = new Mat(); rotMat = new Mat(3, 3, CvType.CV_64FC1); }
private void Run() { //set 3d face object points. objectPoints = new MatOfPoint3f(new Point3(-31, 72, 86), //l eye new Point3(31, 72, 86), //r eye new Point3(0, 40, 114), //nose new Point3(-20, 15, 90), //l mouse new Point3(20, 15, 90), //r mouse new Point3(-70, 60, -9), //l ear new Point3(70, 60, -9) //r ear ); imagePoints = new MatOfPoint2f(); rvec = new Mat(); tvec = new Mat(); rotM = new Mat(3, 3, CvType.CV_64FC1); //initialize FaceTracker faceTracker = new FaceTracker(tracker_model_json_filepath); //initialize FaceTrackerParams faceTrackerParams = new FaceTrackerParams(); cascade = new CascadeClassifier(); cascade.load(haarcascade_frontalface_alt_xml_filepath); //if (cascade.empty()) //{ // Debug.LogError("cascade file is not loaded.Please copy from “FaceTrackerExample/StreamingAssets/” to “Assets/StreamingAssets/” folder. "); //} #if UNITY_ANDROID && !UNITY_EDITOR // Avoids the front camera low light issue that occurs in only some Android devices (e.g. Google Pixel, Pixel2). webCamTextureToMatHelper.avoidAndroidFrontCameraLowLightIssue = true; #endif webCamTextureToMatHelper.Initialize(); }
/// <summary> /// Updates a collection of points constituting a pattern for recognition. Zero point is at the center. /// </summary> /// <param name="patternSize">For chessbord, count the inner corners</param> /// <param name="transform">The transform must be scaled to fit the aspect of the pattern</param> /// <param name="patternType">OpenCv supported pattern type</param> /// <param name="patternPointsWorldSpace">Point collection</param> public static void UpdateWorldSpacePatternPoints(Vector2Int patternSize, Matrix4x4 patternToWorldMatrix, PatternType patternType, Vector2 patternBorderSizeUV, ref MatOfPoint3f pointsWorldSpace) { bool isAsym = patternType == PatternType.AsymmetricCircleGrid; // Instantiate array. int cornerCount = patternSize.y * patternSize.x; if (pointsWorldSpace == null || pointsWorldSpace.rows() != cornerCount) { pointsWorldSpace = new MatOfPoint3f(); pointsWorldSpace.alloc(cornerCount); } // Fill. int c = 0; Vector2 size = Vector2.one - patternBorderSizeUV * 2; Vector2 step = new Vector2(size.x / (patternSize.x - 1f + (isAsym ? 0.5f : 0)), size.y / (patternSize.y - 1f)); for (int ny = 0; ny < patternSize.y; ny++) { float y = 1 - patternBorderSizeUV.y - ny * step.y - 0.5f; for (int nx = 0; nx < patternSize.x; nx++, c++) { float x = patternBorderSizeUV.x + nx * step.x - 0.5f; Vector3 point = new Vector3(x, y, 0); if (isAsym && ny % 2 == 1) { point.x += step.x * 0.5f; } point = patternToWorldMatrix.MultiplyPoint3x4(point); pointsWorldSpace.WriteVector3(point, c); } } }
private double computeReprojectionErrors(List <Mat> objectPoints, List <Mat> rvecs, List <Mat> tvecs, Mat perViewErrors) { MatOfPoint2f cornersProjected = new MatOfPoint2f(); double totalError = 0; double error; float[] viewErrors = new float[objectPoints.Count]; MatOfDouble distortionCoefficients = new MatOfDouble(mDistortionCoefficients); int totalPoints = 0; for (int i = 0; i < objectPoints.Count; i++) { MatOfPoint3f points = new MatOfPoint3f(objectPoints[i]); Calib3d.Calib3d.ProjectPoints(points, rvecs[i], tvecs[i], mCameraMatrix, distortionCoefficients, cornersProjected); error = Core.Core.Norm(mCornersBuffer[i], cornersProjected, Core.Core.NormL2); int n = objectPoints[i].Rows(); viewErrors[i] = (float)Math.Sqrt(error * error / n); totalError += error * error; totalPoints += n; } perViewErrors.Create(objectPoints.Count, 1, CvType.Cv32fc1); perViewErrors.Put(0, 0, viewErrors); return(Math.Sqrt(totalError / totalPoints)); }
public void FishEyeCalibrate() { var patternSize = new Size(10, 7); using (var image = Image("calibration/00.jpg")) using (var corners = new MatOfPoint2f()) { Cv2.FindChessboardCorners(image, patternSize, corners); var objectPointsArray = Create3DChessboardCorners(patternSize, 1.0f).ToArray(); var imagePointsArray = corners.ToArray(); using (var objectPoints = MatOfPoint3f.FromArray(objectPointsArray)) using (var imagePoints = MatOfPoint2f.FromArray(imagePointsArray)) using (var cameraMatrix = new MatOfDouble(Mat.Eye(3, 3, MatType.CV_64FC1))) using (var distCoeffs = new MatOfDouble()) { var rms = Cv2.FishEye.Calibrate(new[] { objectPoints }, new[] { imagePoints }, image.Size(), cameraMatrix, distCoeffs, out var rotationVectors, out var translationVectors, FishEyeCalibrationFlags.None); var distCoeffValues = distCoeffs.ToArray(); Assert.Equal(55.15, rms, 2); Assert.Contains(distCoeffValues, d => Math.Abs(d) > 1e-20); Assert.NotEmpty(rotationVectors); Assert.NotEmpty(translationVectors); } } }
// vector_vector_Point3f public static void Mat_to_vector_vector_Point3f(Mat m, List <MatOfPoint3f> pts) { if (m != null) { m.ThrowIfDisposed(); } if (pts == null) { throw new CvException("Output List can't be null"); } if (m == null) { throw new CvException("Input Mat can't be null"); } List <Mat> mats = new List <Mat>(m.rows()); Mat_to_vector_Mat(m, mats); foreach (Mat mi in mats) { MatOfPoint3f pt = new MatOfPoint3f(mi); pts.Add(pt); mi.release(); } mats.Clear(); }
public void BuildPatternFromImage(Mat image, Pattern pattern) { // Store original image in pattern structure pattern.size = new Size(image.Cols, image.Rows); pattern.frame = image.Clone(); GetGray(image, pattern.grayImg); // Build 2d and 3d contours (3d contour lie in XY plane since it's planar) List <Point2f> points2dList = new List <Point2f>(4); List <Point3f> points3dList = new List <Point3f>(4); // Image dimensions float w = image.Cols; float h = image.Rows; // Normalized dimensions: points2dList.Add(new Point2f(0, 0)); points2dList.Add(new Point2f(w, 0)); points2dList.Add(new Point2f(w, h)); points2dList.Add(new Point2f(0, h)); pattern.points2d = MatOfPoint2f.FromArray(points2dList); points3dList.Add(new Point3f(-0.5f, -0.5f, 0)); points3dList.Add(new Point3f(+0.5f, -0.5f, 0)); points3dList.Add(new Point3f(+0.5f, +0.5f, 0)); points3dList.Add(new Point3f(-0.5f, +0.5f, 0)); pattern.points3d = MatOfPoint3f.FromArray(points3dList); ExtractFeatures(pattern.grayImg, ref pattern.keypoints, pattern.descriptors); Train(pattern); }
// Use this for initialization public override void Setup() { //set 3d face object points. objectPoints68 = new MatOfPoint3f( new Point3(-34, 90, 83), //l eye (Interpupillary breadth) new Point3(34, 90, 83), //r eye (Interpupillary breadth) new Point3(0.0, 50, 120), //nose (Nose top) new Point3(-26, 15, 83), //l mouse (Mouth breadth) new Point3(26, 15, 83), //r mouse (Mouth breadth) new Point3(-79, 90, 0.0), //l ear (Bitragion breadth) new Point3(79, 90, 0.0) //r ear (Bitragion breadth) ); objectPoints5 = new MatOfPoint3f( new Point3(-23, 90, 83), //l eye (Inner corner of the eye) new Point3(23, 90, 83), //r eye (Inner corner of the eye) new Point3(-50, 90, 80), //l eye (Tail of the eye) new Point3(50, 90, 80), //r eye (Tail of the eye) new Point3(0.0, 50, 120) //nose (Nose top) ); imagePoints = new MatOfPoint2f(); float width = 640; float height = 480; //set cameraparam int max_d = (int)Mathf.Max(width, height); double fx = max_d; double fy = max_d; double cx = width / 2.0f; double cy = height / 2.0f; camMatrix = new Mat(3, 3, CvType.CV_64FC1); camMatrix.put(0, 0, fx); camMatrix.put(0, 1, 0); camMatrix.put(0, 2, cx); camMatrix.put(1, 0, 0); camMatrix.put(1, 1, fy); camMatrix.put(1, 2, cy); camMatrix.put(2, 0, 0); camMatrix.put(2, 1, 0); camMatrix.put(2, 2, 1.0f); Debug.Log("camMatrix " + camMatrix.dump()); distCoeffs = new MatOfDouble(0, 0, 0, 0); Debug.Log("distCoeffs " + distCoeffs.dump()); invertYM = Matrix4x4.TRS(Vector3.zero, Quaternion.identity, new Vector3(1, -1, 1)); Debug.Log("invertYM " + invertYM.ToString()); invertZM = Matrix4x4.TRS(Vector3.zero, Quaternion.identity, new Vector3(1, 1, -1)); Debug.Log("invertZM " + invertZM.ToString()); didUpdateHeadRotation = false; }
/// <summary> /// Add a pair of real space + image space points. /// Beware that calibration can fail if pattern is not rotated to fade forward, so that z is zero. /// Also ensure that the point order in the the two point sets are matching. /// </summary> /// <param name="patternRealModelSample">Must be measured in millimeters</param> /// <param name="patternImageSample"></param> public void AddSample(MatOfPoint3f patternRealModelSample, MatOfPoint2f patternImageSample) { //Debug.Log( "patternRealModelSample\n" + patternRealModelSample.dump() ); //Debug.Log( "patternImageSample\n" + patternImageSample.dump() ); _patternRealSamples.Add(patternRealModelSample.clone()); _patternImageSamples.Add(patternImageSample.clone()); }
// // C++: vector_Point3f CharucoBoard::chessboardCorners // public MatOfPoint3f get_chessboardCorners() { ThrowIfDisposed(); return MatOfPoint3f.fromNativeAddr(aruco_CharucoBoard_get_1chessboardCorners_10(nativeObj)); }
private static MatOfPoint3f Subtract(MatOfPoint3f orig, MatOfPoint3f aCentroidMatrix) { MatOfPoint3f answer = new MatOfPoint3f(); for (int i = 0; i < orig.Rows; i++) { answer.Add(orig.ElementAt(i) - aCentroidMatrix.ElementAt(i)); } return(answer); }
/// <summary> /// Initializes a new instance of the <see cref="Pattern"/> class. /// </summary> public Pattern() { size = new Size(); frame = new Mat(); grayImg = new Mat(); keypoints = new MatOfKeyPoint(); descriptors = new Mat(); points2d = new MatOfPoint2f(); points3d = new MatOfPoint3f(); }
private static List <Vector3> CreateVector3Array(MatOfPoint3f origPoint3f) { var list = new List <Vector3>(); foreach (var p in origPoint3f) { list.Add(new Vector3(p.X, p.Y, p.Z)); } return(list); }
private void Run() { System.Random random = new System.Random(); int randomNumber = random.Next(0, 100); if (string.IsNullOrEmpty(dlibShapePredictorFilePath)) { Debug.LogError("shape predictor file does not exist. Please copy from “DlibFaceLandmarkDetector/StreamingAssets/” to “Assets/StreamingAssets/” folder. "); } //set 3d face object points. objectPoints68 = new MatOfPoint3f( new Point3(-34, 90, 83), //l eye (Interpupillary breadth) new Point3(34, 90, 83), //r eye (Interpupillary breadth) new Point3(0.0, 50, 117), //nose (Tip) new Point3(0.0, 32, 97), //nose (Subnasale) new Point3(-79, 90, 10), //l ear (Bitragion breadth) new Point3(79, 90, 10) //r ear (Bitragion breadth) ); objectPoints17 = new MatOfPoint3f( new Point3(-34, 90, 83), //l eye (Interpupillary breadth) new Point3(34, 90, 83), //r eye (Interpupillary breadth) new Point3(0.0, 50, 117), //nose (Tip) new Point3(0.0, 32, 97), //nose (Subnasale) new Point3(-79, 90, 10), //l ear (Bitragion breadth) new Point3(79, 90, 10) //r ear (Bitragion breadth) ); objectPoints6 = new MatOfPoint3f( new Point3(-34, 90, 83), //l eye (Interpupillary breadth) new Point3(34, 90, 83), //r eye (Interpupillary breadth) new Point3(0.0, 50, 117), //nose (Tip) new Point3(0.0, 32, 97) //nose (Subnasale) ); objectPoints5 = new MatOfPoint3f( new Point3(-23, 90, 83), //l eye (Inner corner of the eye) new Point3(23, 90, 83), //r eye (Inner corner of the eye) new Point3(-50, 90, 80), //l eye (Tail of the eye) new Point3(50, 90, 80), //r eye (Tail of the eye) new Point3(0.0, 32, 97) //nose (Subnasale) ); imagePoints = new MatOfPoint2f(); faceLandmarkDetector = new FaceLandmarkDetector(dlibShapePredictorFilePath); #if UNITY_ANDROID && !UNITY_EDITOR // Avoids the front camera low light issue that occurs in only some Android devices (e.g. Google Pixel, Pixel2). webCamTextureToMatHelper.avoidAndroidFrontCameraLowLightIssue = true; #endif webCamTextureToMatHelper.Initialize(); }
private static double[,] ConvertToDoubleArray(MatOfPoint3f aCentroidMatrix) { double[,] array = new double[aCentroidMatrix.Rows, 3]; for (int i = 0; i < aCentroidMatrix.Rows; i++) { array[i, 0] = aCentroidMatrix.ElementAt(i).X; array[i, 1] = aCentroidMatrix.ElementAt(i).Y; array[i, 2] = aCentroidMatrix.ElementAt(i).Z; } return(array); }
/// <summary> /// Initializes a new instance of the <see cref="Pattern"/> class. /// </summary> public Pattern() { bfMatcher = new BFMatcher(); size = new Size(); frame = new Mat(); grayImg = new Mat(); keypoints = new KeyPoint[] { }; descriptors = new Mat(); points2d = new MatOfPoint2f(); points3d = new MatOfPoint3f(); }
// // C++: vector_Point3f CharucoBoard::chessboardCorners // //javadoc: CharucoBoard::get_chessboardCorners() public MatOfPoint3f get_chessboardCorners() { ThrowIfDisposed(); #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER MatOfPoint3f retVal = MatOfPoint3f.fromNativeAddr(aruco_CharucoBoard_get_1chessboardCorners_10(nativeObj)); return(retVal); #else return(null); #endif }
private void Run() { if (string.IsNullOrEmpty(dlibShapePredictorFilePath)) { Debug.LogError("shape predictor file does not exist. Please copy from “DlibFaceLandmarkDetector/StreamingAssets/” to “Assets/StreamingAssets/” folder. "); } //set 3d face object points. (right-handed coordinates system) objectPoints68 = new MatOfPoint3f( new Point3(-34, 90, 83), //l eye (Interpupillary breadth) new Point3(34, 90, 83), //r eye (Interpupillary breadth) new Point3(0.0, 50, 117), //nose (Tip) new Point3(0.0, 32, 97), //nose (Subnasale) new Point3(-79, 90, 10), //l ear (Bitragion breadth) new Point3(79, 90, 10) //r ear (Bitragion breadth) ); objectPoints17 = new MatOfPoint3f( new Point3(-34, 90, 83), //l eye (Interpupillary breadth) new Point3(34, 90, 83), //r eye (Interpupillary breadth) new Point3(0.0, 50, 117), //nose (Tip) new Point3(0.0, 32, 97), //nose (Subnasale) new Point3(-79, 90, 10), //l ear (Bitragion breadth) new Point3(79, 90, 10) //r ear (Bitragion breadth) ); objectPoints6 = new MatOfPoint3f( new Point3(-34, 90, 83), //l eye (Interpupillary breadth) new Point3(34, 90, 83), //r eye (Interpupillary breadth) new Point3(0.0, 50, 117), //nose (Tip) new Point3(0.0, 32, 97) //nose (Subnasale) ); objectPoints5 = new MatOfPoint3f( new Point3(-23, 90, 83), //l eye (Inner corner of the eye) new Point3(23, 90, 83), //r eye (Inner corner of the eye) new Point3(-50, 90, 80), //l eye (Tail of the eye) new Point3(50, 90, 80), //r eye (Tail of the eye) new Point3(0.0, 32, 97) //nose (Subnasale) ); imagePoints = new MatOfPoint2f(); faceLandmarkDetector = new FaceLandmarkDetector(dlibShapePredictorFilePath); if (string.IsNullOrEmpty(sourceToMatHelper.requestedVideoFilePath)) { sourceToMatHelper.requestedVideoFilePath = VIDEO_FILENAME; } sourceToMatHelper.outputColorFormat = VideoCaptureToMatHelper.ColorFormat.RGB; sourceToMatHelper.Initialize(); }
void Awake() { _invertYM = Matrix4x4.TRS(Vector3.zero, Quaternion.identity, new Vector3(1, -1, 1)); _invertZM = Matrix4x4.TRS(Vector3.zero, Quaternion.identity, new Vector3(1, 1, -1)); // 顔の初期位置を設定 _objectPoints = new MatOfPoint3f( new Point3(-31, 72, 86), // 左目 new Point3(31, 72, 86), // 右目 new Point3(0, 40, 114), // 鼻 new Point3(-20, 15, 90), // 左口角 new Point3(20, 15, 90), // 右口角 new Point3(-69, 76, -2), // 左耳 new Point3(69, 76, -2) // 右耳 ); _imagePoints = new MatOfPoint2f(); _rotM = new Mat(3, 3, CvType.CV_64FC1); // カメラの内部パラメータ float maxD = Mathf.Max(normHeight, normWidth); float fx = maxD; float fy = maxD; float cx = normWidth / 2.0f; float cy = normHeight / 2.0f; _camMatrix = new Mat(3, 3, CvType.CV_64FC1); _camMatrix.put(0, 0, fx); _camMatrix.put(0, 1, 0); _camMatrix.put(0, 2, cx); _camMatrix.put(1, 0, 0); _camMatrix.put(1, 1, fy); _camMatrix.put(1, 2, cy); _camMatrix.put(2, 0, 0); _camMatrix.put(2, 1, 0); _camMatrix.put(2, 2, 1.0f); _distCoeffs = new MatOfDouble(0, 0, 0, 0); // カメラキャリブレーション Matrix4x4 P = ARUtils.CalculateProjectionMatrixFromCameraMatrixValues((float)fx, (float)fy, (float)cx, (float)cy, normWidth, normHeight, 0.3f, 2000f); Matrix4x4 V = Matrix4x4.TRS(Vector3.zero, Quaternion.identity, new Vector3(1, 1, -1)); _VP = P * V; _normPoints = new List <Vector2>(68); for (int i = 0; i < 68; i++) { _normPoints.Add(new Vector2(0, 0)); } }
private static Mat Reshape(MatOfPoint3f points) { Mat answer = new Mat(points.Rows, 3, MatType.CV_32FC1); for (int i = 0; i < points.Rows; i++) { var point = points.ElementAt(i); answer.Set <float>(i, 0, point.X); answer.Set <float>(i, 1, point.Y); answer.Set <float>(i, 2, point.Z); } return(answer); }
void Awake() { Application.targetFrameRate = 30; _chessPatternPointCount = _chessPatternSize.x * _chessPatternSize.y; // Prepare OpenCV. _extrinsicsCalibrator = new CameraExtrinsicsCalibrator(); _prevChessCorners = new Vector2[_chessPatternPointCount]; _chessCornersRealModelMat = TrackingToolsHelper.CreateRealModelPatternPoints(_chessPatternSize, _chessTileSize, TrackingToolsHelper.PatternType.Chessboard); // Create objects. _chessPatternTransform = GameObject.CreatePrimitive(PrimitiveType.Quad).transform; _chessPatternTransform.name = "Chessboard"; _chessPatternTransform.localScale = new Vector3((_chessPatternSize.x - 1) * _chessTileSize * 0.001f, (_chessPatternSize.y - 1) * _chessTileSize * 0.001f, 0); // Prepare world points. TrackingToolsHelper.UpdateWorldSpacePatternPoints(_chessPatternSize, _chessPatternTransform.localToWorldMatrix, TrackingToolsHelper.PatternType.Chessboard, Vector2.zero, ref _chessCornersWorldMat); // Prepare UI. TrackingToolsHelper.RenderPattern(_chessPatternSize, TrackingToolsHelper.PatternType.Chessboard, 1024, ref _chessPatternTexture, ref _patternRenderMaterial); _aspectFitter = _processedCameraImage.GetComponent <AspectRatioFitter>(); if (!_aspectFitter) { _aspectFitter = _processedCameraImage.gameObject.AddComponent <AspectRatioFitter>(); } _aspectFitter.aspectMode = AspectRatioFitter.AspectMode.FitInParent; Shader shader = Shader.Find(TrackingToolsConstants.previewShaderName); _previewMaterial = new Material(shader); _processedCameraImage.material = _previewMaterial; _processedCameraImage.color = Color.white; _arImage = new GameObject("ARImage").AddComponent <RawImage>(); _arImage.transform.SetParent(_processedCameraImage.transform); _arImage.rectTransform.FitParent(); _arImage.gameObject.SetActive(false); Shader unlitTextureShader = Shader.Find("Unlit/Texture"); Material chessboardMaterial = new Material(unlitTextureShader); chessboardMaterial.mainTexture = _chessPatternTexture; _chessPatternTransform.GetComponent <Renderer>().material = chessboardMaterial; if (_sampleCountMeterFillImage) { _sampleCountMeterFillImage.fillAmount = 0; } _previewFlasher = new MaterialPropFlasher(_previewMaterial, "_Whiteout", TrackingToolsConstants.flashDuration); // Setup camera. _mainCamera.backgroundColor = Color.clear; _mainCamera.gameObject.SetActive(false); }
public static Vector3 ReadVector3(this MatOfPoint3f vectorArrayMat, int index) { switch (vectorArrayMat.depth()) { case CvType.CV_64F: vectorArrayMat.get(index, 0, _temp3d); return(new Vector3((float)_temp3d[0], (float)_temp3d[1], (float)_temp3d[2])); case CvType.CV_32F: vectorArrayMat.get(index, 0, _temp3f); return(new Vector3(_temp3f[0], _temp3f[1], _temp3f[2])); } return(Vector3.zero); }
private void CalcChessboardCorners(Size patternSize, float squareSize, MatOfPoint3f corners, int markerType) { if ((int)(patternSize.width * patternSize.height) != corners.rows()) { Debug.Log("Invalid corners size."); corners.create((int)(patternSize.width * patternSize.height), 1, CvType.CV_32FC3); } const int cn = 3; float[] cornersArr = new float[corners.rows() * cn]; int width = (int)patternSize.width; int height = (int)patternSize.height; switch (markerType) { case (int)MarkerType.ChessBoard: case (int)MarkerType.CirclesGlid: for (int i = 0; i < height; ++i) { for (int j = 0; j < width; ++j) { cornersArr [(i * width * cn) + (j * cn)] = j * squareSize; cornersArr [(i * width * cn) + (j * cn) + 1] = i * squareSize; cornersArr [(i * width * cn) + (j * cn) + 2] = 0; } } corners.put(0, 0, cornersArr); break; case (int)MarkerType.AsymmetricCirclesGlid: for (int i = 0; i < height; ++i) { for (int j = 0; j < width; ++j) { cornersArr [(i * width * cn) + (j * cn)] = (2 * j + i % 2) * squareSize; cornersArr [(i * width * cn) + (j * cn) + 1] = i * squareSize; cornersArr [(i * width * cn) + (j * cn) + 2] = 0; } } corners.put(0, 0, cornersArr); break; default: Debug.Log("Unknown marker type."); break; } }
// modified from https://github.com/Itseez/opencv/blob/master/samples/cpp/calibration.cpp void calcChessboardCorners(Size boardSize, float squareSize, MatOfPoint3f corners) { List <Point3> lp = new List <Point3>(); for (int i = 0; i < boardSize.height; i++) { for (int j = 0; j < boardSize.width; j++) { lp.Add(new Point3((j * squareSize), (i * squareSize), 0)); } } corners.fromList(lp); }
private static Mat ConvertToFC1(MatOfPoint3f orig) { Point3f[] pts = new Point3f[orig.Cols * orig.Rows]; orig.CopyTo(pts, 0); Mat m = new Mat(orig.Rows, 3, MatType.CV_32FC1); for (int i = 0; i < orig.Rows; i++) { var pt = orig.ElementAt(i); m.SetArray(0, 0, pt.X); m.SetArray(0, 1, pt.Y); m.SetArray(0, 2, pt.Z); } return(m); }
public override void Setup() { NullCheck(matSourceGetterInterface, "matSourceGetter"); NullCheck(faceLandmarkGetterInterface, "faceLandmarkGetter"); //set 3d face object points. objectPoints68 = new MatOfPoint3f( new Point3(-34, 90, 83), //l eye (Interpupillary breadth) new Point3(34, 90, 83), //r eye (Interpupillary breadth) new Point3(0.0, 50, 117), //nose (Tip) new Point3(0.0, 32, 97), //nose (Subnasale) new Point3(-79, 90, 10), //l ear (Bitragion breadth) new Point3(79, 90, 10) //r ear (Bitragion breadth) ); objectPoints17 = new MatOfPoint3f( new Point3(-34, 90, 83), //l eye (Interpupillary breadth) new Point3(34, 90, 83), //r eye (Interpupillary breadth) new Point3(0.0, 50, 117), //nose (Tip) new Point3(0.0, 32, 97), //nose (Subnasale) new Point3(-79, 90, 10), //l ear (Bitragion breadth) new Point3(79, 90, 10) //r ear (Bitragion breadth) ); objectPoints6 = new MatOfPoint3f( new Point3(-34, 90, 83), //l eye (Interpupillary breadth) new Point3(34, 90, 83), //r eye (Interpupillary breadth) new Point3(0.0, 50, 117), //nose (Tip) new Point3(0.0, 32, 97) //nose (Subnasale) ); imagePoints = new MatOfPoint2f(); camMatrix = new Mat(3, 3, CvType.CV_64FC1); //Debug.Log ("camMatrix " + camMatrix.dump ()); distCoeffs = new MatOfDouble(0, 0, 0, 0); //Debug.Log ("distCoeffs " + distCoeffs.dump ()); invertYM = Matrix4x4.TRS(Vector3.zero, Quaternion.identity, new Vector3(1, -1, 1)); //Debug.Log ("invertYM " + invertYM.ToString ()); invertZM = Matrix4x4.TRS(Vector3.zero, Quaternion.identity, new Vector3(1, 1, -1)); //Debug.Log ("invertZM " + invertZM.ToString ()); didUpdateHeadPositionAndRotation = false; }
public static MatOfPoint3f GetCentroid(MatOfPoint3f orig) { int i = 0; var centroid = new Point3f(); foreach (var p in orig) { centroid += p; i++; } var point = new Point3f(centroid.X / i, centroid.Y / i, centroid.Z / i); var mat = new MatOfPoint3f(); mat.Add(point); return(mat); }
/// <summary> /// performs perspective transformation of each element of multi-channel input matrix /// </summary> /// <param name="src">The source two-channel or three-channel floating-point array; /// each element is 2D/3D vector to be transformed</param> /// <param name="m">3x3 or 4x4 transformation matrix</param> /// <returns>The destination array; it will have the same size and same type as src</returns> public static Point3f[] PerspectiveTransform(IEnumerable<Point3f> src, Mat m) { if (src == null) throw new ArgumentNullException("src"); if (m == null) throw new ArgumentNullException("m"); using (var srcMat = MatOfPoint3f.FromArray(src)) using (var dstMat = new MatOfPoint3f()) { NativeMethods.core_perspectiveTransform_Mat(srcMat.CvPtr, dstMat.CvPtr, m.CvPtr); return dstMat.ToArray(); } }