public void DrawDetectedMarker() { // If you want to save markers image, you must change the following values. const string path = "C:\\detected_markers_6x6_250.png"; using (var image = Image("markers_6x6_250.png", ImreadModes.GrayScale)) using (var outputImage = image.CvtColor(ColorConversionCodes.GRAY2RGB)) using (var dict = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict6X6_250)) using (var param = DetectorParameters.Create()) { Point2f[][] corners; int[] ids; Point2f[][] rejectedImgPoints; CvAruco.DetectMarkers(image, dict, out corners, out ids, param, out rejectedImgPoints); CvAruco.DrawDetectedMarkers(outputImage, corners, ids, new Scalar(255, 0, 0)); CvAruco.DrawDetectedMarkers(outputImage, rejectedImgPoints, null, new Scalar(0, 0, 255)); if (Debugger.IsAttached) { Cv2.ImWrite(path, outputImage); Process.Start(path); } } }
public void Run(cv.Mat src) { // The locations of the markers in the image at FilePath.Image.Aruco. const int upperLeftMarkerId = 160; const int upperRightMarkerId = 268; const int lowerRightMarkerId = 176; const int lowerLeftMarkerId = 168; var detectorParameters = DetectorParameters.Create(); detectorParameters.CornerRefinementMethod = CornerRefineMethod.Subpix; detectorParameters.CornerRefinementWinSize = 9; var dictionary = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict4X4_1000); CvAruco.DetectMarkers(src, dictionary, out var corners, out var ids, detectorParameters, out var rejectedPoints); detectedMarkers = src.Clone(); CvAruco.DrawDetectedMarkers(detectedMarkers, corners, ids, cv.Scalar.White); // Find the index of the four markers in the ids array. We'll use this same index into the // corners array to find the corners of each marker. var upperLeftCornerIndex = Array.FindIndex(ids, id => id == upperLeftMarkerId); var upperRightCornerIndex = Array.FindIndex(ids, id => id == upperRightMarkerId); var lowerRightCornerIndex = Array.FindIndex(ids, id => id == lowerRightMarkerId); var lowerLeftCornerIndex = Array.FindIndex(ids, id => id == lowerLeftMarkerId); // Make sure we found all four markers. if (upperLeftCornerIndex < 0 || upperRightCornerIndex < 0 || lowerRightCornerIndex < 0 || lowerLeftCornerIndex < 0) { return; } // Marker corners are stored clockwise beginning with the upper-left corner. // Get the first (upper-left) corner of the upper-left marker. var upperLeftPixel = corners[upperLeftCornerIndex][0]; // Get the second (upper-right) corner of the upper-right marker. var upperRightPixel = corners[upperRightCornerIndex][1]; // Get the third (lower-right) corner of the lower-right marker. var lowerRightPixel = corners[lowerRightCornerIndex][2]; // Get the fourth (lower-left) corner of the lower-left marker. var lowerLeftPixel = corners[lowerLeftCornerIndex][3]; // Create coordinates for passing to GetPerspectiveTransform var sourceCoordinates = new List <cv.Point2f> { upperLeftPixel, upperRightPixel, lowerRightPixel, lowerLeftPixel }; var destinationCoordinates = new List <cv.Point2f> { new cv.Point2f(0, 0), new cv.Point2f(1024, 0), new cv.Point2f(1024, 1024), new cv.Point2f(0, 1024), }; var transform = cv.Cv2.GetPerspectiveTransform(sourceCoordinates, destinationCoordinates); normalizedImage = new cv.Mat(); cv.Cv2.WarpPerspective(src, normalizedImage, transform, new cv.Size(1024, 1024)); }
void Start () { // Create default parameres for detection DetectorParameters detectorParameters = DetectorParameters.Create(); // Dictionary holds set of all available markers Dictionary dictionary = CvAruco.GetPredefinedDictionary (PredefinedDictionaryName.Dict6X6_250); // Variables to hold results Point2f[][] corners; int[] ids; Point2f[][] rejectedImgPoints; // Create Opencv image from unity texture Mat mat = Unity.TextureToMat (this.texture); // Convert image to grasyscale Mat grayMat = new Mat (); Cv2.CvtColor (mat, grayMat, ColorConversionCodes.BGR2GRAY); // Detect and draw markers CvAruco.DetectMarkers (grayMat, dictionary, out corners, out ids, detectorParameters, out rejectedImgPoints); CvAruco.DrawDetectedMarkers (mat, corners, ids); // Create Unity output texture with detected markers Texture2D outputTexture = Unity.MatToTexture (mat); // Set texture to see the result RawImage rawImage = gameObject.GetComponent<RawImage> (); rawImage.texture = outputTexture; }
public void DetectMarkers() { using (var image = Image("markers_6x6_250.png", ImreadModes.Grayscale)) using (var dict = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict6X6_250)) using (var param = DetectorParameters.Create()) { CvAruco.DetectMarkers(image, dict, out _, out _, param, out _); } }
public void DetectMarkers() { using (var image = Image("markers_6x6_250.png", ImreadModes.GrayScale)) using (var dict = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict6X6_250)) using (var param = DetectorParameters.Create()) { Point2f[][] corners; int[] ids; Point2f[][] rejectedImgPoints; CvAruco.DetectMarkers(image, dict, out corners, out ids, param, out rejectedImgPoints); } }
public void EstimatePoseSingleMarkers() { using var image = Image("markers_6x6_250.png", ImreadModes.Grayscale); using var dict = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict6X6_250); var param = DetectorParameters.Create(); CvAruco.DetectMarkers(image, dict, out var corners, out _, param, out _); using var cameraMatrix = Mat.Eye(3, 3, MatType.CV_64FC1); using var distCoeffs = Mat.Zeros(4, 1, MatType.CV_64FC1); using var rvec = new Mat(); using var tvec = new Mat(); using var objPoints = new Mat(); CvAruco.EstimatePoseSingleMarkers(corners, 6, cameraMatrix, distCoeffs, rvec, tvec, objPoints); Assert.Equal(20, rvec.Total()); Assert.Equal(20, tvec.Total()); Assert.Equal(4, objPoints.Total()); }
private void DetectMarkers() { while (true) { //On android if there isn't a DebugLog or smth going on here then the thread wont start for some reason. #if UNITY_ANDROID Debug.Log("Updating..."); #endif if (!updateThread) { //we skip updating the thread when not needed and also avoids memory exceptions when we disable the //mono behaviour or we haven't updated the main thread yet! continue; } if (threadCounter > 0) { //Gray out the image Cv2.CvtColor(img, grayedImg, ColorConversionCodes.BGR2GRAY); //detect the markers and fill the corner and id arrays CvAruco.DetectMarkers(grayedImg, dictionary, out corners, out ids, detectorParameters, out rejectedImgPoints); //throw the callbacks if (throwMarkerCallbacks) { CheckIfLostMarkers(); CheckIfDetectedMarkers(); } //let the main thread know the process is done outputImage = true; Interlocked.Exchange(ref threadCounter, 0); } } }
/// <summary> /// Finds ARUCO markers in an image and /// </summary> /// <param name="colorCv"></param> /// <returns></returns> public static List <Marker> FindAruco(Mat colorCv) { Mat m = new Mat(); colorCv.CopyTo(m); if (colorCv.Channels() == 4) { m = m.CvtColor(ColorConversionCodes.BGRA2BGR); } //Kinect color image is flipped, correct before ARUCO detection var flipped = m.Flip(FlipMode.Y); List <Marker> markers = new List <Marker>(); var dictionary = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict6X6_250); Point2f[][] points; Point2f[][] rejPoints; int[] ids; var parms = DetectorParameters.Create(); CvAruco.DetectMarkers(flipped, dictionary, out points, out ids, parms, out rejPoints); for (int i = 0; i < ids.Length; i++) { var id = ids[i]; var arucoMarkerCorners = points[i]; for (int j = 0; j < arucoMarkerCorners.Length; j++) { arucoMarkerCorners[j].X = KinectSettings.COLOR_WIDTH - arucoMarkerCorners[j].X - 1; } var area = Cv2.ContourArea(arucoMarkerCorners); markers.Add(new Marker() { Id = id, Points = arucoMarkerCorners, Area = area }); } return(markers.OrderByDescending(ma => ma.Area).ToList()); }
// Update is called once per frame void Update() { Point2f[][] maker_corners; //ARマーカのカドの座標 int[] maker_ids; //検出されたARマーカのID Point2f[][] reject_points; cam.Read(cam_frame);//フレームの更新 //ARマーカの検出 CvAruco.DetectMarkers(cam_frame, ar_dict, out maker_corners, out maker_ids, detect_param, out reject_points); //キャリブレーションデータを設定 double[,] array3_3 = new double[3, 3] { { 548.8, 0.0, 332.1 }, { 0.0, 549.8, 236.2 }, { 0.0, 0.0, 1.0 } }; Mat cameraMatrix = new Mat(3, 3, MatType.CV_32FC1, array3_3); double[,] array1_5 = new double[1, 5] { { -0.0886, 0.4802, -0.0026, 0.0019, -0.8238 } }; Mat distortionCoefficients = new Mat(1, 5, MatType.CV_32FC1, array1_5); Mat rvec = new Mat(); Mat tvec = new Mat(); //VectorOfVec3f vector3_rvec = new VectorOfVec3f(); //IEnumerable<VectorOfVec3f> vector3_rvec = new IEnumerable<VectorOfVec3f>; if (maker_ids.Length != 0) { //検出されたマーカ情報の描画 CvAruco.DrawDetectedMarkers(cam_frame, maker_corners, maker_ids, new Scalar(0, 255, 0)); //Debug.Log("Find"); /* * Mat rvec = new Mat(3, maker_ids.Length, MatType.CV_32FC1); * Mat tvec = new Mat(3, maker_ids.Length, MatType.CV_32FC1); */ /* * Debug.Log(tvec.Rows); * Debug.Log(tvec.Cols); * Debug.Log(tvec.Size()); * Debug.Log(tvec.Dims()); */ CvAruco.EstimatePoseSingleMarkers(maker_corners, (float)1.8, cameraMatrix, distortionCoefficients, rvec, tvec); //マーカまでの距離 /* * //1 * Debug.Log(tvec.Rows); * //1 * Debug.Log(tvec.Cols); * //1:1 * Debug.Log(tvec.Size()); * //2 * Debug.Log(tvec.Dims()); * //3=#define CV_16S * Debug.Log(tvec.Channels()); */ Vec3d vec3D = tvec.At <Vec3d>(0, 0); double z = vec3D[2]; Debug.Log(z); /* * Debug.Log(tvec.At<Vec3d>(0, 0)[2]); */ //Debug.Log(tvec.Data[0]); //Debug.Log(tvec.At<Vec3b>(1, 1)[2]); //Debug.Log(tvec[new OpenCvSharp.Rect(0, 1,0,1)]); /* * Debug.Log(rvec.Rows); * Debug.Log(rvec.Cols); * Debug.Log(rvec.Size()); * Debug.Log(rvec.Dims()); */ /* * //Debug.Log(cameraMatrix.Size()); * //1 * Debug.Log(distortionCoefficients.Rows); * //5 * Debug.Log(distortionCoefficients.Cols); * Debug.Log(distortionCoefficients.Size()); */ /* * //1channelはdoubleできれいに取れる * Debug.Log(distortionCoefficients.At<double>(0,0)); * //1 * Debug.Log(distortionCoefficients.Channels()); */ //Debug.Log(tvec.) //24 //Debug.Log(tvec.Step()); //3 //Debug.Log(tvec.Channels()); //Debug.Log(tvec.At<double>(0, 0)); //Debug.Log(tvec.At<double>(0, 1)); //Debug.Log(tvec.At<double>(0, 2)); /* * Debug.Log(rvec.GetArray(0, 1)); * Debug.Log(rvec.GetArray(0, 2)); */ } cam_Texture.LoadImage(cam_frame.ImEncode()); }
public void DetectTouchByTwoCamera(Action action) { using (var video0 = new VideoCapture(0)) using (var video1 = new VideoCapture(1)) using (var window0 = new Window("capture0")) using (var window1 = new Window("capture1")) { var dictionary = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict6X6_250); var parameters = DetectorParameters.Create(); var frames = new List <Mat> { new Mat(), new Mat() }; var videos = new List <VideoCapture>(); var windows = new List <Window>(); Point2f[][] corners; int[] ids; int[] previousIds; Point2f[][] rejectedImgPoints; videos.Add(video0); videos.Add(video1); windows.Add(window0); windows.Add(window1); var wasFoundList = new List <bool> { false, false }; var isTouchedList = new List <bool> { false, false }; var wasTouched = false; while (true) { for (int i = 0; i < 2; i++) { videos[i].Read(frames[i]); CvAruco.DetectMarkers(frames[i], dictionary, out corners, out ids, parameters, out rejectedImgPoints); isTouchedList[i] = wasFoundList[i] && !(ids.Length > 0); if (ids.Length > 0) { wasFoundList[i] = true; CvAruco.DrawDetectedMarkers(frames[i], corners, ids); } else { wasFoundList[i] = false; isTouchedList[i] = true; } windows[i].ShowImage(frames[i]); } if (!isTouchedList.Contains(false)) { if (!wasTouched) { Console.WriteLine("Hello world!"); for (int i = 0; i < isTouchedList.Count; i++) { isTouchedList[i] = false; } } wasTouched = true; } else { wasTouched = false; } var key = Cv2.WaitKey(1); if (key == 'q') { break; } } } }
static void Main(string[] args) { //取得した画像とその差分画像を表示する #region //Mat src = new Mat(@"C:\Users\tyani\Downloads\lenna.png", ImreadModes.Grayscale); //// Mat src = Cv2.ImRead("lenna.png", ImreadModes.Grayscale); //Mat dst = new Mat(); //Cv2.Canny(src, dst, 50, 200); //using (new Window("src image", src)) //using (new Window("dst image", dst)) //{ // Cv2.WaitKey(); //} #endregion //キャプチャーした映像から顔を認識してその位置に丸を付ける #region //using (var win = new Window("capture")) //using (var video = new VideoCapture(0)) //{ // //保存先行列 // var frame = new Mat(); // //分類器 // var haarcascade = new CascadeClassifier(@"C:\Users\tyani\Downloads\opencv-master\data\haarcascades\haarcascade_frontalface_default.xml"); // //グレースケール // var gray = new Mat(); // while (true) // { // video.Read(frame); // //grayにframeのグレースケール返還画像を代入 // Cv2.CvtColor(frame, gray, ColorConversionCodes.BGR2GRAY); // //検出 // var faces = haarcascade.DetectMultiScale(gray); // //検出箇所に円を描画 // foreach (OpenCvSharp.Rect face in faces) // { // var center = new Point // { // X = (int)(face.X + face.Width * 0.5), // Y = (int)(face.Y + face.Height * 0.5) // }; // var axes = new Size // { // Width = (int)(face.Width * 0.5), // Height = (int)(face.Height * 0.5) // }; // Cv2.Ellipse(frame, center, axes, 0, 0, 360, Scalar.Red, 4); // Console.WriteLine("Found!"); // } // win.ShowImage(frame); // if (Cv2.WaitKey(30) >= 0) { break; } // } //} #endregion //カメラキャリブレーションを行う #region ////チェス盤のコーナー座標がを読み込めた数 //int imgInd = 0; ////読み込むチェス盤の定義 //const int BOARD_W = 7; //const int BOARD_H = 7; //Size BOARD_SIZE = new Size(BOARD_W, BOARD_H); //const int SCALE = 26; ////精度の低いコーナーが検出できた座標のリスト //var imageCorners = new Mat<Point2f>(); ////精度の高いコーナー座標の配列 //Point2f[] imageCorners2; ////検出できたframe中のコーナー座標のリスト //var imagePoints = new List<Mat<Point2f>>(); ////毎秒のフレーム画像 //var frame = new Mat(); ////フレーム画像のグレースケール //var gray = new Mat(); ////認識したチェス盤画像 //var chessboard = new List<Mat>(); ////精度の高いコーナー座標を作る際の終了基準 //var criteria = new TermCriteria(CriteriaType.Count | CriteriaType.Eps, 30, 0.001); ////ウィンドウを生成 //using (var capWindow = new Window("capture")) //using (var chessWindow = new Window("DrawChessboardCorner")) ////videoにカメラから取得した映像を代入 //using (var video = new VideoCapture(0)) //{ // while (true) // { // video.Read(frame); // var key = Cv2.WaitKey(1); // if (key == 'c') // { // Cv2.CvtColor(frame, gray, ColorConversionCodes.BGR2GRAY); // //チェス盤のコーナーが検出できたらretにtrueを代入 // var ret = Cv2.FindChessboardCorners(frame, BOARD_SIZE, imageCorners); // //Console.WriteLine(imageCorners.Get<Point2f>(0)); // if (ret) // { // Console.WriteLine("find chess board corners."); // chessboard.Add(frame); // //精度を高めたコーナー座標をimageCorners2に代入 // imageCorners2 = Cv2.CornerSubPix(gray, imageCorners, new Size(9, 9), new Size(-1, -1), criteria); // //Console.WriteLine(imageCorners2[0]); // Cv2.DrawChessboardCorners(frame, BOARD_SIZE, imageCorners2, ret); // imageCorners.SetArray<Point2f>(imageCorners2); // //Console.WriteLine(imageCorners.Get<Point2f>(0)); // //Console.WriteLine(imageCorners.Size()); // imagePoints.Add(imageCorners); // chessWindow.ShowImage(frame); // Cv2.WaitKey(500); // imgInd++; // } // } // else if (key == 'q') // { // break; // } // else // { // Cv2.PutText(frame, "Number of caputure: " + imgInd.ToString(), new Point(30, 20), HersheyFonts.HersheyPlain, 1, new Scalar(0, 255, 0)); // Cv2.PutText(frame, "c: Capture the image", new Point(30, 40), HersheyFonts.HersheyPlain, 1, new Scalar(0, 255, 0)); // Cv2.PutText(frame, "q: Finish capturing and calcurate the camera matrix and distortion", new Point(30, 60), HersheyFonts.HersheyPlain, 1, new Scalar(0, 255, 0)); // capWindow.ShowImage(frame); // } // } // //よくわからない // var objectPoints = new List<Mat<Point3f>>(); // var objectConrers = new Mat<Point3f>(); // for (int j = 0; j < BOARD_H; j++) // { // for (int i = 0; i < BOARD_W; i++) // { // objectConrers.PushBack(new Point3f(i * SCALE, j * SCALE, 0f)); // } // } // for (int i = 0; i < imgInd; i++) // objectPoints.Add(objectConrers); // var cameraMatrix = new Mat(); // var distCoeffs = new Mat(); // Mat[] rvecs; // Mat[] tvecs; // var rms = Cv2.CalibrateCamera(objectPoints, imagePoints, frame.Size(), cameraMatrix, distCoeffs, out rvecs, out tvecs); // Console.WriteLine("Re-projection Error(unit: pixel) : " + rms); // Console.WriteLine("cameraMatrix(unit: pixel) : " + cameraMatrix); // Console.WriteLine("distCoeffs : " + distCoeffs); // var dst = new Mat(); // Cv2.Undistort(chessboard[0], dst, cameraMatrix, distCoeffs); // using (new Window("補正画像", WindowMode.AutoSize, dst)) // { // while (true) // if (Cv2.WaitKey() == 'q') // break; // } //} #endregion //SIFTマッチング #region //var leftFrame = new Mat(); //var rightFrame = new Mat(); //var view = new Mat(); //var leftGray = new Mat(); //var rightGray = new Mat(); //var sift = SIFT.Create(); //KeyPoint[] keypoints1, keypoints2; //var descriptors1 = new Mat(); //var descriptors2 = new Mat(); //using (var leftCapWindow = new Window("left capture")) //using (var rightCapWindow = new Window("right capture")) //using (var leftVideo = new VideoCapture(0)) //using (var rightVideo = new VideoCapture(1)) //{ // while (true) // { // leftVideo.Read(leftFrame); // rightVideo.Read(rightFrame); // Cv2.PutText(leftFrame, "d: detect and compute SIFT", new Point(30, 40), HersheyFonts.HersheyPlain, 1, new Scalar(0, 255, 0)); // Cv2.PutText(rightFrame, "d: detect and compute SIFT", new Point(30, 40), HersheyFonts.HersheyPlain, 1, new Scalar(0, 255, 0)); // leftCapWindow.ShowImage(leftFrame); // rightCapWindow.ShowImage(rightFrame); // var key = Cv2.WaitKey(1); // if (key == 'd') // { // Cv2.CvtColor(leftFrame, leftGray, ColorConversionCodes.BGR2GRAY); // Cv2.CvtColor(rightFrame, rightGray, ColorConversionCodes.BGR2GRAY); // sift.DetectAndCompute(leftGray, null, out keypoints1, descriptors1); // sift.DetectAndCompute(rightGray, null, out keypoints2, descriptors2); // BFMatcher matcher = new BFMatcher(NormTypes.L2, false); // DMatch[] matches = matcher.Match(descriptors1, descriptors2); // Cv2.DrawMatches(leftGray, keypoints1, rightGray, keypoints2, matches, view); // using (new Window("SIFT matching", WindowMode.AutoSize, view)) // { // Cv2.WaitKey(); // } // } // } //} #endregion //視差マップ生成 #region //var leftFrame = new Mat(); //var rightFrame = new Mat(); //var disparity = new Mat(); //var leftGray = new Mat(); //var rightGray = new Mat(); //var sift = SIFT.Create(); //KeyPoint[] keypoints1, keypoints2; //var descriptors1 = new Mat(); //var descriptors2 = new Mat(); //using (var leftCapWindow = new Window("left capture")) //using (var rightCapWindow = new Window("right capture")) //using (var leftVideo = new VideoCapture(1)) //using (var rightVideo = new VideoCapture(0)) //{ // while (true) // { // leftVideo.Read(leftFrame); // rightVideo.Read(rightFrame); // leftCapWindow.ShowImage(leftFrame); // rightCapWindow.ShowImage(rightFrame); // Cv2.CvtColor(leftFrame, leftGray, ColorConversionCodes.BGR2GRAY); // Cv2.CvtColor(rightFrame, rightGray, ColorConversionCodes.BGR2GRAY); // var stereo = StereoSGBM.Create(0, 16*10, 11, mode:StereoSGBMMode.HH); // stereo.Compute(leftGray, rightGray, disparity); // disparity.Normalize(alpha: 0, beta: 255, normType: NormTypes.MinMax, dtype: MatType.CV_8U); // using (new Window("disparity map", WindowMode.AutoSize, disparity)) // { // var key = Cv2.WaitKey(1); // if (key == 'q') // break; // } // } //} #endregion //もっとも簡単なStereoMatchingのTutorial #region //var imgL = new Mat(@"C:\Users\tyani\Downloads\scene1.row3.col1.png", ImreadModes.Grayscale); //var imgR = new Mat(@"C:\Users\tyani\Downloads\scene1.row3.col3.png", ImreadModes.Grayscale); //var disparity = new Mat(); //var stereo = StereoBM.Create(); //stereo.Compute(imgL, imgR, disparity); //using(new Window(disparity)) //{ // Cv2.WaitKey(0); //} #endregion //ARマーカー #region using (var video0 = new VideoCapture(0)) using (var video1 = new VideoCapture(1)) using (var window0 = new Window("capture0")) using (var window1 = new Window("capture1")) { var dictionary = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict6X6_250); var parameters = DetectorParameters.Create(); var frames = new List <Mat> { new Mat(), new Mat() }; var videos = new List <VideoCapture>(); var windows = new List <Window>(); Point2f[][] corners; int[] ids; int[] previousIds; Point2f[][] rejectedImgPoints; videos.Add(video0); videos.Add(video1); windows.Add(window0); windows.Add(window1); var wasFoundList = new List <bool> { false, false }; var isTouchedList = new List <bool> { false, false }; var wasTouched = false; while (true) { for (int i = 0; i < 2; i++) { videos[i].Read(frames[i]); CvAruco.DetectMarkers(frames[i], dictionary, out corners, out ids, parameters, out rejectedImgPoints); isTouchedList[i] = wasFoundList[i] && !(ids.Length > 0); if (ids.Length > 0) { wasFoundList[i] = true; CvAruco.DrawDetectedMarkers(frames[i], corners, ids); } else { wasFoundList[i] = false; isTouchedList[i] = true; } windows[i].ShowImage(frames[i]); } if (!isTouchedList.Contains(false)) { if (!wasTouched) { Console.WriteLine("Hello world!"); for (int i = 0; i < isTouchedList.Count; i++) { isTouchedList[i] = false; } } wasTouched = true; } else { wasTouched = false; } var key = Cv2.WaitKey(1); if (key == 'q') { break; } } } #endregion #region //var numCamera = 1; //var dictionary = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict7X7_50); //var parameters = DetectorParameters.Create(); //var videoCaps = new List<VideoCapture>(numCamera); //var windows = new List<Window>(numCamera); //var capFrames = new List<Mat>(numCamera); ////var markerCornersList = new List<Point2f[][]>(numCamera); ////var markerIdsList = new List<int[]>(numCamera); ////var rejectedImgPointsList = new List<Point2f[][]>(numCamera); //var isFoundList = new List<bool>(numCamera); //var isTouchedList = new List<bool>(numCamera); //try //{ // for (int i = 0; i < numCamera; i++) // { // videoCaps.Add(new VideoCapture(i)); // windows.Add(new Window("VideoCapture" + i.ToString())); // } // while (true) // { // for (int i = 0; i < numCamera; i++) // { // var capFrame = new Mat(); // Point2f[][] markerCorners; // int[] markerIds; // Point2f[][] rejectedImgPoints; // videoCaps[i].Read(capFrame); // CvAruco.DetectMarkers(capFrame, dictionary, out markerCorners, out markerIds, parameters, out rejectedImgPoints); // if (!isFoundList[i] && markerIds.Length > 0) // { // isFoundList[i] = true; // } // if (isFoundList[i] && !(markerIds[i] > 0)) // { // isTouchedList[i] = true; // } // if (markerIds.Length > 0) // { // CvAruco.DrawDetectedMarkers(capFrame, markerCorners, markerIds); // } // windows[i].ShowImage(capFrame); // } // if (!isTouchedList.Contains(false)) // { // Console.WriteLine("Hello world!"); // } // var key = Cv2.WaitKey(1); // if (key == 'q') // { // break; // } // } //} //catch //{ // Console.WriteLine("例外:カメラが取得できませんでした。"); //} //finally //{ // for (int i = 0; i < numCamera; i++) // { // if (videoCaps[i] != null) // videoCaps[i].Dispose(); // if (windows[i] != null) // { // windows[i].Dispose(); // } // } //} //test #endregion }
void Update() { //Leapmotionのカメラの映像を取得 readLeftRightFrame(); Point2f[][] maker_corners; int[] maker_ids; Point2f[][] reject_points; //ARマーカの検出 CvAruco.DetectMarkers(leftRight_frame[0], ar_dict, out maker_corners, out maker_ids, detect_param, out reject_points); GameObject[] animals = GameObject.FindGameObjectsWithTag("animal"); //マーカが検出されない場合はすべてのanimalオブジェクトを削除 if (maker_ids.Length == 0) { foreach (GameObject animal in animals) { animalMaster animal_script = animal.GetComponent <animalMaster>(); animal_script.destoryThis(); Debug.Log("destroy"); } } //マーカが検出されなかったanimalオブジェクトを削除 else { foreach (GameObject animal in animals) { //検出されたIDと一致するゲームオブジェクトが存在しなかったら if (DetObj.CheckSameID(animal, maker_ids) == false) { animalMaster animal_script = animal.GetComponent <animalMaster>(); animal_script.destoryThis(); } } } if (maker_ids.Length > 0) { //検出されたマーカ情報の描画 CvAruco.DrawDetectedMarkers(leftRight_frame[0], maker_corners, maker_ids, new Scalar(0, 255, 0)); //マーカの姿勢推定 OutputArray tvec = new Mat(), rvec = new Mat(); CvAruco.EstimatePoseSingleMarkers(maker_corners, 0.6f, cameraMatrix, distCoefficients, rvec, tvec, null); //検出されたIDに対する操作 for (int i = 0; i < maker_ids.Length; i++) { //ID辞書と一致するマーカが検出されたら if (DetObj.CheckTargetMarker(maker_ids[i], id_dict) == true) { if (animals != null) { //すでにオブジェクトが存在していたら if (DetObj.CheckTargetObject(maker_ids[i], animals) == true) { int animal_index = DetObj.GetAnimalIndex(maker_ids[i], animals); animalMaster animal_script = animals[animal_index].GetComponent <animalMaster>(); animal_script.moveModel(i, tvec, rvec); } //オブジェクトが存在しない場合 else { GameObject newAnimal = Instantiate(animal); animalMaster animal_script = newAnimal.GetComponent <animalMaster>(); int animal_index = DetObj.GetAnimalIndex(maker_ids[i], animals); animal_script.setParam(maker_ids[i], i, tvec, rvec); } } else { GameObject newAnimal = Instantiate(animal); animalMaster animal_script = newAnimal.GetComponent <animalMaster>(); animal_script.setParam(maker_ids[i], 0, tvec, rvec); } } } } }
/// <summary> /// Detect markers. /// </summary> /// <param name="pixels"> /// The image where to detect markers. /// For example, you can use get pixels from web camera https://docs.unity3d.com/ScriptReference/WebCamTexture.GetPixels32.html /// </param> public List <int> Detect(Mat mat, int width, int height) { List <int> result = new List <int> (); markerTransforms.Clear(); // Create default parameres for detection DetectorParameters detectorParameters = DetectorParameters.Create(); // Dictionary holds set of all available markers Dictionary dictionary = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict6X6_250); // Variables to hold results Point2f[][] corners; int[] ids; Point2f[][] rejectedImgPoints; // Convert image to grasyscale Mat grayMat = new Mat(); Cv2.CvtColor(mat, grayMat, ColorConversionCodes.BGR2GRAY); // Detect markers CvAruco.DetectMarkers(grayMat, dictionary, out corners, out ids, detectorParameters, out rejectedImgPoints); // CvAruco.DrawDetectedMarkers (mat, corners, ids); float markerSizeInMeters = 1f; Point3f[] markerPoints = new Point3f[] { new Point3f(-markerSizeInMeters / 2f, markerSizeInMeters / 2f, 0f), new Point3f(markerSizeInMeters / 2f, markerSizeInMeters / 2f, 0f), new Point3f(markerSizeInMeters / 2f, -markerSizeInMeters / 2f, 0f), new Point3f(-markerSizeInMeters / 2f, -markerSizeInMeters / 2f, 0f) }; double max_wh = (double)Math.Max(width, height); double fx = max_wh; double fy = max_wh; double cx = width / 2.0d; double cy = height / 2.0d; // double[,] cameraMatrix = new double[3, 3] { { fx, 0d, cx }, { 0d, fy, cy }, { 0d, 0d, 1d } }; double[] distCoeffs = new double[4] { 0d, 0d, 0d, 0d }; double[] rvec = new double[3] { 0d, 0d, 0d }; double[] tvec = new double[3] { 0d, 0d, 0d }; double[,] rotMat = new double[3, 3] { { 0d, 0d, 0d }, { 0d, 0d, 0d }, { 0d, 0d, 0d } }; for (int i = 0; i < ids.Length; i++) { Cv2.SolvePnP(markerPoints, corners[i], cameraMatrix, distCoeffs, out rvec, out tvec, false, SolvePnPFlags.Iterative); // CvAruco.DrawAxis(mat, cameraMatrix, distCoeffs, rvec, tvec, 1.0f); Cv2.Rodrigues(rvec, out rotMat); Matrix4x4 matrix = new Matrix4x4(); matrix.SetRow(0, new Vector4((float)rotMat[0, 0], (float)rotMat[0, 1], (float)rotMat[0, 2], (float)tvec[0])); matrix.SetRow(1, new Vector4((float)rotMat[1, 0], (float)rotMat[1, 1], (float)rotMat[1, 2], (float)tvec[1])); matrix.SetRow(2, new Vector4((float)rotMat[2, 0], (float)rotMat[2, 1], (float)rotMat[2, 2], (float)tvec[2])); matrix.SetRow(3, new Vector4(0f, 0f, 0f, 1f)); result.Add(ids[i]); markerTransforms.Add(matrix); } return(result); }