void Start () { // Create default parameres for detection DetectorParameters detectorParameters = DetectorParameters.Create(); // Dictionary holds set of all available markers Dictionary dictionary = CvAruco.GetPredefinedDictionary (PredefinedDictionaryName.Dict6X6_250); // Variables to hold results Point2f[][] corners; int[] ids; Point2f[][] rejectedImgPoints; // Create Opencv image from unity texture Mat mat = Unity.TextureToMat (this.texture); // Convert image to grasyscale Mat grayMat = new Mat (); Cv2.CvtColor (mat, grayMat, ColorConversionCodes.BGR2GRAY); // Detect and draw markers CvAruco.DetectMarkers (grayMat, dictionary, out corners, out ids, detectorParameters, out rejectedImgPoints); CvAruco.DrawDetectedMarkers (mat, corners, ids); // Create Unity output texture with detected markers Texture2D outputTexture = Unity.MatToTexture (mat); // Set texture to see the result RawImage rawImage = gameObject.GetComponent<RawImage> (); rawImage.texture = outputTexture; }
public void DrawDetectedMarker() { // If you want to save markers image, you must change the following values. const string path = "C:\\detected_markers_6x6_250.png"; using (var image = Image("markers_6x6_250.png", ImreadModes.GrayScale)) using (var outputImage = image.CvtColor(ColorConversionCodes.GRAY2RGB)) using (var dict = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict6X6_250)) using (var param = DetectorParameters.Create()) { Point2f[][] corners; int[] ids; Point2f[][] rejectedImgPoints; CvAruco.DetectMarkers(image, dict, out corners, out ids, param, out rejectedImgPoints); CvAruco.DrawDetectedMarkers(outputImage, corners, ids, new Scalar(255, 0, 0)); CvAruco.DrawDetectedMarkers(outputImage, rejectedImgPoints, null, new Scalar(0, 0, 255)); if (Debugger.IsAttached) { Cv2.ImWrite(path, outputImage); Process.Start(path); } } }
public void Run(cv.Mat src) { // The locations of the markers in the image at FilePath.Image.Aruco. const int upperLeftMarkerId = 160; const int upperRightMarkerId = 268; const int lowerRightMarkerId = 176; const int lowerLeftMarkerId = 168; var detectorParameters = DetectorParameters.Create(); detectorParameters.CornerRefinementMethod = CornerRefineMethod.Subpix; detectorParameters.CornerRefinementWinSize = 9; var dictionary = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict4X4_1000); CvAruco.DetectMarkers(src, dictionary, out var corners, out var ids, detectorParameters, out var rejectedPoints); detectedMarkers = src.Clone(); CvAruco.DrawDetectedMarkers(detectedMarkers, corners, ids, cv.Scalar.White); // Find the index of the four markers in the ids array. We'll use this same index into the // corners array to find the corners of each marker. var upperLeftCornerIndex = Array.FindIndex(ids, id => id == upperLeftMarkerId); var upperRightCornerIndex = Array.FindIndex(ids, id => id == upperRightMarkerId); var lowerRightCornerIndex = Array.FindIndex(ids, id => id == lowerRightMarkerId); var lowerLeftCornerIndex = Array.FindIndex(ids, id => id == lowerLeftMarkerId); // Make sure we found all four markers. if (upperLeftCornerIndex < 0 || upperRightCornerIndex < 0 || lowerRightCornerIndex < 0 || lowerLeftCornerIndex < 0) { return; } // Marker corners are stored clockwise beginning with the upper-left corner. // Get the first (upper-left) corner of the upper-left marker. var upperLeftPixel = corners[upperLeftCornerIndex][0]; // Get the second (upper-right) corner of the upper-right marker. var upperRightPixel = corners[upperRightCornerIndex][1]; // Get the third (lower-right) corner of the lower-right marker. var lowerRightPixel = corners[lowerRightCornerIndex][2]; // Get the fourth (lower-left) corner of the lower-left marker. var lowerLeftPixel = corners[lowerLeftCornerIndex][3]; // Create coordinates for passing to GetPerspectiveTransform var sourceCoordinates = new List <cv.Point2f> { upperLeftPixel, upperRightPixel, lowerRightPixel, lowerLeftPixel }; var destinationCoordinates = new List <cv.Point2f> { new cv.Point2f(0, 0), new cv.Point2f(1024, 0), new cv.Point2f(1024, 1024), new cv.Point2f(0, 1024), }; var transform = cv.Cv2.GetPerspectiveTransform(sourceCoordinates, destinationCoordinates); normalizedImage = new cv.Mat(); cv.Cv2.WarpPerspective(src, normalizedImage, transform, new cv.Size(1024, 1024)); }
public void GetPredefinedDictionary() { foreach (PredefinedDictionaryName val in Enum.GetValues(typeof(PredefinedDictionaryName))) { var dict = CvAruco.GetPredefinedDictionary(val); dict.Dispose(); } }
public void DetectMarkers() { using (var image = Image("markers_6x6_250.png", ImreadModes.Grayscale)) using (var dict = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict6X6_250)) using (var param = DetectorParameters.Create()) { CvAruco.DetectMarkers(image, dict, out _, out _, param, out _); } }
public void GetPredefinedDictionary() { #pragma warning disable CS8605 foreach (PredefinedDictionaryName val in Enum.GetValues(typeof(PredefinedDictionaryName))) #pragma warning restore CS8605 { var dict = CvAruco.GetPredefinedDictionary(val); dict.Dispose(); } }
public void DetectMarkers() { using (var image = Image("markers_6x6_250.png", ImreadModes.GrayScale)) using (var dict = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict6X6_250)) using (var param = DetectorParameters.Create()) { Point2f[][] corners; int[] ids; Point2f[][] rejectedImgPoints; CvAruco.DetectMarkers(image, dict, out corners, out ids, param, out rejectedImgPoints); } }
//private detectObject Detect void Start() { cam = new VideoCapture(source); cam.Set(CaptureProperty.ConvertRgb, 0); //これが重要!! cam_frame = new Mat(); leftRight_frame = new Mat[2]; setCamParameters(); //カメラパラメータの設定 ar_dict = CvAruco.GetPredefinedDictionary(dictName); detect_param = DetectorParameters.Create(); }
public void DictionaryProperties() { var dict = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict6X6_250); Assert.Equal(250, dict.BytesList.Rows); Assert.Equal(5, dict.BytesList.Cols); // (6*6 + 7)/8 Assert.Equal(6, dict.MarkerSize); Assert.Equal(5, dict.MaxCorrectionBits); dict.MarkerSize = 4; dict.MaxCorrectionBits = 50; Assert.Equal(4, dict.MarkerSize); Assert.Equal(50, dict.MaxCorrectionBits); }
// Use this for initialization void Start() { cam = new VideoCapture(source); cam.Set(CaptureProperty.ConvertRgb, 3); cam_frame = new Mat(); ar_dict = CvAruco.GetPredefinedDictionary(dictName); detect_param = DetectorParameters.Create(); cam_Texture = new Texture2D( cam.FrameWidth, //カメラのwidth cam.FrameHeight, //カメラのheight TextureFormat.RGB24, //フォーマットを指定 false //ミニマップ設定 ); this.GetComponent <Renderer>().material.mainTexture = cam_Texture; }
protected virtual void Init() { //For mobile devices which by default have their target frame set to 30 Application.targetFrameRate = 60; //create OpenCV detector parameters detectorParameters = DetectorParameters.Create(); detectorParameters.DoCornerRefinement = doCornerRefinement; //Apply different detection parameters AssignDetectorParameterStructValues(); dictionary = CvAruco.GetPredefinedDictionary(markerDictionaryType); timeCount = markerDetectorPauseTime; //start the marker thread DetectMarkerAsync(); }
public void DrawMarker() { const int markerSidePixels = 128; const int columns = 4; const int rows = 5; const int margin = 20; // If you want to save markers image, you must change the following values. const string path = "C:\\markers_6x6_250.png"; int width = columns * markerSidePixels + margin * (columns + 1); int height = rows * markerSidePixels + margin * (rows + 1); var id = 0; Rect roi = new Rect(0, 0, markerSidePixels, markerSidePixels); using (var outputImage = new Mat(new Size(width, height), MatType.CV_8UC1, Scalar.White)) { for (var y = 0; y < rows; y++) { roi.Top = y * markerSidePixels + margin * (y + 1); for (var x = 0; x < columns; x++) { roi.Left = x * markerSidePixels + margin * (x + 1); using (var roiMat = new Mat(outputImage, roi)) using (var markerImage = new Mat()) using (var dict = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict6X6_250)) { CvAruco.DrawMarker(dict, id++, markerSidePixels, markerImage, 1); markerImage.CopyTo(roiMat); } } } if (Debugger.IsAttached) { Cv2.ImWrite(path, outputImage); Process.Start(path); } } }
public void EstimatePoseSingleMarkers() { using var image = Image("markers_6x6_250.png", ImreadModes.Grayscale); using var dict = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict6X6_250); var param = DetectorParameters.Create(); CvAruco.DetectMarkers(image, dict, out var corners, out _, param, out _); using var cameraMatrix = Mat.Eye(3, 3, MatType.CV_64FC1); using var distCoeffs = Mat.Zeros(4, 1, MatType.CV_64FC1); using var rvec = new Mat(); using var tvec = new Mat(); using var objPoints = new Mat(); CvAruco.EstimatePoseSingleMarkers(corners, 6, cameraMatrix, distCoeffs, rvec, tvec, objPoints); Assert.Equal(20, rvec.Total()); Assert.Equal(20, tvec.Total()); Assert.Equal(4, objPoints.Total()); }
/// <summary> /// Finds ARUCO markers in an image and /// </summary> /// <param name="colorCv"></param> /// <returns></returns> public static List <Marker> FindAruco(Mat colorCv) { Mat m = new Mat(); colorCv.CopyTo(m); if (colorCv.Channels() == 4) { m = m.CvtColor(ColorConversionCodes.BGRA2BGR); } //Kinect color image is flipped, correct before ARUCO detection var flipped = m.Flip(FlipMode.Y); List <Marker> markers = new List <Marker>(); var dictionary = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict6X6_250); Point2f[][] points; Point2f[][] rejPoints; int[] ids; var parms = DetectorParameters.Create(); CvAruco.DetectMarkers(flipped, dictionary, out points, out ids, parms, out rejPoints); for (int i = 0; i < ids.Length; i++) { var id = ids[i]; var arucoMarkerCorners = points[i]; for (int j = 0; j < arucoMarkerCorners.Length; j++) { arucoMarkerCorners[j].X = KinectSettings.COLOR_WIDTH - arucoMarkerCorners[j].X - 1; } var area = Cv2.ContourArea(arucoMarkerCorners); markers.Add(new Marker() { Id = id, Points = arucoMarkerCorners, Area = area }); } return(markers.OrderByDescending(ma => ma.Area).ToList()); }
/// <summary> /// Detect markers. /// </summary> /// <param name="pixels"> /// The image where to detect markers. /// For example, you can use get pixels from web camera https://docs.unity3d.com/ScriptReference/WebCamTexture.GetPixels32.html /// </param> public List <int> Detect(Mat mat, int width, int height) { List <int> result = new List <int> (); markerTransforms.Clear(); // Create default parameres for detection DetectorParameters detectorParameters = DetectorParameters.Create(); // Dictionary holds set of all available markers Dictionary dictionary = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict6X6_250); // Variables to hold results Point2f[][] corners; int[] ids; Point2f[][] rejectedImgPoints; // Convert image to grasyscale Mat grayMat = new Mat(); Cv2.CvtColor(mat, grayMat, ColorConversionCodes.BGR2GRAY); // Detect markers CvAruco.DetectMarkers(grayMat, dictionary, out corners, out ids, detectorParameters, out rejectedImgPoints); // CvAruco.DrawDetectedMarkers (mat, corners, ids); float markerSizeInMeters = 1f; Point3f[] markerPoints = new Point3f[] { new Point3f(-markerSizeInMeters / 2f, markerSizeInMeters / 2f, 0f), new Point3f(markerSizeInMeters / 2f, markerSizeInMeters / 2f, 0f), new Point3f(markerSizeInMeters / 2f, -markerSizeInMeters / 2f, 0f), new Point3f(-markerSizeInMeters / 2f, -markerSizeInMeters / 2f, 0f) }; double max_wh = (double)Math.Max(width, height); double fx = max_wh; double fy = max_wh; double cx = width / 2.0d; double cy = height / 2.0d; // double[,] cameraMatrix = new double[3, 3] { { fx, 0d, cx }, { 0d, fy, cy }, { 0d, 0d, 1d } }; double[] distCoeffs = new double[4] { 0d, 0d, 0d, 0d }; double[] rvec = new double[3] { 0d, 0d, 0d }; double[] tvec = new double[3] { 0d, 0d, 0d }; double[,] rotMat = new double[3, 3] { { 0d, 0d, 0d }, { 0d, 0d, 0d }, { 0d, 0d, 0d } }; for (int i = 0; i < ids.Length; i++) { Cv2.SolvePnP(markerPoints, corners[i], cameraMatrix, distCoeffs, out rvec, out tvec, false, SolvePnPFlags.Iterative); // CvAruco.DrawAxis(mat, cameraMatrix, distCoeffs, rvec, tvec, 1.0f); Cv2.Rodrigues(rvec, out rotMat); Matrix4x4 matrix = new Matrix4x4(); matrix.SetRow(0, new Vector4((float)rotMat[0, 0], (float)rotMat[0, 1], (float)rotMat[0, 2], (float)tvec[0])); matrix.SetRow(1, new Vector4((float)rotMat[1, 0], (float)rotMat[1, 1], (float)rotMat[1, 2], (float)tvec[1])); matrix.SetRow(2, new Vector4((float)rotMat[2, 0], (float)rotMat[2, 1], (float)rotMat[2, 2], (float)tvec[2])); matrix.SetRow(3, new Vector4(0f, 0f, 0f, 1f)); result.Add(ids[i]); markerTransforms.Add(matrix); } return(result); }
static void Main(string[] args) { //取得した画像とその差分画像を表示する #region //Mat src = new Mat(@"C:\Users\tyani\Downloads\lenna.png", ImreadModes.Grayscale); //// Mat src = Cv2.ImRead("lenna.png", ImreadModes.Grayscale); //Mat dst = new Mat(); //Cv2.Canny(src, dst, 50, 200); //using (new Window("src image", src)) //using (new Window("dst image", dst)) //{ // Cv2.WaitKey(); //} #endregion //キャプチャーした映像から顔を認識してその位置に丸を付ける #region //using (var win = new Window("capture")) //using (var video = new VideoCapture(0)) //{ // //保存先行列 // var frame = new Mat(); // //分類器 // var haarcascade = new CascadeClassifier(@"C:\Users\tyani\Downloads\opencv-master\data\haarcascades\haarcascade_frontalface_default.xml"); // //グレースケール // var gray = new Mat(); // while (true) // { // video.Read(frame); // //grayにframeのグレースケール返還画像を代入 // Cv2.CvtColor(frame, gray, ColorConversionCodes.BGR2GRAY); // //検出 // var faces = haarcascade.DetectMultiScale(gray); // //検出箇所に円を描画 // foreach (OpenCvSharp.Rect face in faces) // { // var center = new Point // { // X = (int)(face.X + face.Width * 0.5), // Y = (int)(face.Y + face.Height * 0.5) // }; // var axes = new Size // { // Width = (int)(face.Width * 0.5), // Height = (int)(face.Height * 0.5) // }; // Cv2.Ellipse(frame, center, axes, 0, 0, 360, Scalar.Red, 4); // Console.WriteLine("Found!"); // } // win.ShowImage(frame); // if (Cv2.WaitKey(30) >= 0) { break; } // } //} #endregion //カメラキャリブレーションを行う #region ////チェス盤のコーナー座標がを読み込めた数 //int imgInd = 0; ////読み込むチェス盤の定義 //const int BOARD_W = 7; //const int BOARD_H = 7; //Size BOARD_SIZE = new Size(BOARD_W, BOARD_H); //const int SCALE = 26; ////精度の低いコーナーが検出できた座標のリスト //var imageCorners = new Mat<Point2f>(); ////精度の高いコーナー座標の配列 //Point2f[] imageCorners2; ////検出できたframe中のコーナー座標のリスト //var imagePoints = new List<Mat<Point2f>>(); ////毎秒のフレーム画像 //var frame = new Mat(); ////フレーム画像のグレースケール //var gray = new Mat(); ////認識したチェス盤画像 //var chessboard = new List<Mat>(); ////精度の高いコーナー座標を作る際の終了基準 //var criteria = new TermCriteria(CriteriaType.Count | CriteriaType.Eps, 30, 0.001); ////ウィンドウを生成 //using (var capWindow = new Window("capture")) //using (var chessWindow = new Window("DrawChessboardCorner")) ////videoにカメラから取得した映像を代入 //using (var video = new VideoCapture(0)) //{ // while (true) // { // video.Read(frame); // var key = Cv2.WaitKey(1); // if (key == 'c') // { // Cv2.CvtColor(frame, gray, ColorConversionCodes.BGR2GRAY); // //チェス盤のコーナーが検出できたらretにtrueを代入 // var ret = Cv2.FindChessboardCorners(frame, BOARD_SIZE, imageCorners); // //Console.WriteLine(imageCorners.Get<Point2f>(0)); // if (ret) // { // Console.WriteLine("find chess board corners."); // chessboard.Add(frame); // //精度を高めたコーナー座標をimageCorners2に代入 // imageCorners2 = Cv2.CornerSubPix(gray, imageCorners, new Size(9, 9), new Size(-1, -1), criteria); // //Console.WriteLine(imageCorners2[0]); // Cv2.DrawChessboardCorners(frame, BOARD_SIZE, imageCorners2, ret); // imageCorners.SetArray<Point2f>(imageCorners2); // //Console.WriteLine(imageCorners.Get<Point2f>(0)); // //Console.WriteLine(imageCorners.Size()); // imagePoints.Add(imageCorners); // chessWindow.ShowImage(frame); // Cv2.WaitKey(500); // imgInd++; // } // } // else if (key == 'q') // { // break; // } // else // { // Cv2.PutText(frame, "Number of caputure: " + imgInd.ToString(), new Point(30, 20), HersheyFonts.HersheyPlain, 1, new Scalar(0, 255, 0)); // Cv2.PutText(frame, "c: Capture the image", new Point(30, 40), HersheyFonts.HersheyPlain, 1, new Scalar(0, 255, 0)); // Cv2.PutText(frame, "q: Finish capturing and calcurate the camera matrix and distortion", new Point(30, 60), HersheyFonts.HersheyPlain, 1, new Scalar(0, 255, 0)); // capWindow.ShowImage(frame); // } // } // //よくわからない // var objectPoints = new List<Mat<Point3f>>(); // var objectConrers = new Mat<Point3f>(); // for (int j = 0; j < BOARD_H; j++) // { // for (int i = 0; i < BOARD_W; i++) // { // objectConrers.PushBack(new Point3f(i * SCALE, j * SCALE, 0f)); // } // } // for (int i = 0; i < imgInd; i++) // objectPoints.Add(objectConrers); // var cameraMatrix = new Mat(); // var distCoeffs = new Mat(); // Mat[] rvecs; // Mat[] tvecs; // var rms = Cv2.CalibrateCamera(objectPoints, imagePoints, frame.Size(), cameraMatrix, distCoeffs, out rvecs, out tvecs); // Console.WriteLine("Re-projection Error(unit: pixel) : " + rms); // Console.WriteLine("cameraMatrix(unit: pixel) : " + cameraMatrix); // Console.WriteLine("distCoeffs : " + distCoeffs); // var dst = new Mat(); // Cv2.Undistort(chessboard[0], dst, cameraMatrix, distCoeffs); // using (new Window("補正画像", WindowMode.AutoSize, dst)) // { // while (true) // if (Cv2.WaitKey() == 'q') // break; // } //} #endregion //SIFTマッチング #region //var leftFrame = new Mat(); //var rightFrame = new Mat(); //var view = new Mat(); //var leftGray = new Mat(); //var rightGray = new Mat(); //var sift = SIFT.Create(); //KeyPoint[] keypoints1, keypoints2; //var descriptors1 = new Mat(); //var descriptors2 = new Mat(); //using (var leftCapWindow = new Window("left capture")) //using (var rightCapWindow = new Window("right capture")) //using (var leftVideo = new VideoCapture(0)) //using (var rightVideo = new VideoCapture(1)) //{ // while (true) // { // leftVideo.Read(leftFrame); // rightVideo.Read(rightFrame); // Cv2.PutText(leftFrame, "d: detect and compute SIFT", new Point(30, 40), HersheyFonts.HersheyPlain, 1, new Scalar(0, 255, 0)); // Cv2.PutText(rightFrame, "d: detect and compute SIFT", new Point(30, 40), HersheyFonts.HersheyPlain, 1, new Scalar(0, 255, 0)); // leftCapWindow.ShowImage(leftFrame); // rightCapWindow.ShowImage(rightFrame); // var key = Cv2.WaitKey(1); // if (key == 'd') // { // Cv2.CvtColor(leftFrame, leftGray, ColorConversionCodes.BGR2GRAY); // Cv2.CvtColor(rightFrame, rightGray, ColorConversionCodes.BGR2GRAY); // sift.DetectAndCompute(leftGray, null, out keypoints1, descriptors1); // sift.DetectAndCompute(rightGray, null, out keypoints2, descriptors2); // BFMatcher matcher = new BFMatcher(NormTypes.L2, false); // DMatch[] matches = matcher.Match(descriptors1, descriptors2); // Cv2.DrawMatches(leftGray, keypoints1, rightGray, keypoints2, matches, view); // using (new Window("SIFT matching", WindowMode.AutoSize, view)) // { // Cv2.WaitKey(); // } // } // } //} #endregion //視差マップ生成 #region //var leftFrame = new Mat(); //var rightFrame = new Mat(); //var disparity = new Mat(); //var leftGray = new Mat(); //var rightGray = new Mat(); //var sift = SIFT.Create(); //KeyPoint[] keypoints1, keypoints2; //var descriptors1 = new Mat(); //var descriptors2 = new Mat(); //using (var leftCapWindow = new Window("left capture")) //using (var rightCapWindow = new Window("right capture")) //using (var leftVideo = new VideoCapture(1)) //using (var rightVideo = new VideoCapture(0)) //{ // while (true) // { // leftVideo.Read(leftFrame); // rightVideo.Read(rightFrame); // leftCapWindow.ShowImage(leftFrame); // rightCapWindow.ShowImage(rightFrame); // Cv2.CvtColor(leftFrame, leftGray, ColorConversionCodes.BGR2GRAY); // Cv2.CvtColor(rightFrame, rightGray, ColorConversionCodes.BGR2GRAY); // var stereo = StereoSGBM.Create(0, 16*10, 11, mode:StereoSGBMMode.HH); // stereo.Compute(leftGray, rightGray, disparity); // disparity.Normalize(alpha: 0, beta: 255, normType: NormTypes.MinMax, dtype: MatType.CV_8U); // using (new Window("disparity map", WindowMode.AutoSize, disparity)) // { // var key = Cv2.WaitKey(1); // if (key == 'q') // break; // } // } //} #endregion //もっとも簡単なStereoMatchingのTutorial #region //var imgL = new Mat(@"C:\Users\tyani\Downloads\scene1.row3.col1.png", ImreadModes.Grayscale); //var imgR = new Mat(@"C:\Users\tyani\Downloads\scene1.row3.col3.png", ImreadModes.Grayscale); //var disparity = new Mat(); //var stereo = StereoBM.Create(); //stereo.Compute(imgL, imgR, disparity); //using(new Window(disparity)) //{ // Cv2.WaitKey(0); //} #endregion //ARマーカー #region using (var video0 = new VideoCapture(0)) using (var video1 = new VideoCapture(1)) using (var window0 = new Window("capture0")) using (var window1 = new Window("capture1")) { var dictionary = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict6X6_250); var parameters = DetectorParameters.Create(); var frames = new List <Mat> { new Mat(), new Mat() }; var videos = new List <VideoCapture>(); var windows = new List <Window>(); Point2f[][] corners; int[] ids; int[] previousIds; Point2f[][] rejectedImgPoints; videos.Add(video0); videos.Add(video1); windows.Add(window0); windows.Add(window1); var wasFoundList = new List <bool> { false, false }; var isTouchedList = new List <bool> { false, false }; var wasTouched = false; while (true) { for (int i = 0; i < 2; i++) { videos[i].Read(frames[i]); CvAruco.DetectMarkers(frames[i], dictionary, out corners, out ids, parameters, out rejectedImgPoints); isTouchedList[i] = wasFoundList[i] && !(ids.Length > 0); if (ids.Length > 0) { wasFoundList[i] = true; CvAruco.DrawDetectedMarkers(frames[i], corners, ids); } else { wasFoundList[i] = false; isTouchedList[i] = true; } windows[i].ShowImage(frames[i]); } if (!isTouchedList.Contains(false)) { if (!wasTouched) { Console.WriteLine("Hello world!"); for (int i = 0; i < isTouchedList.Count; i++) { isTouchedList[i] = false; } } wasTouched = true; } else { wasTouched = false; } var key = Cv2.WaitKey(1); if (key == 'q') { break; } } } #endregion #region //var numCamera = 1; //var dictionary = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict7X7_50); //var parameters = DetectorParameters.Create(); //var videoCaps = new List<VideoCapture>(numCamera); //var windows = new List<Window>(numCamera); //var capFrames = new List<Mat>(numCamera); ////var markerCornersList = new List<Point2f[][]>(numCamera); ////var markerIdsList = new List<int[]>(numCamera); ////var rejectedImgPointsList = new List<Point2f[][]>(numCamera); //var isFoundList = new List<bool>(numCamera); //var isTouchedList = new List<bool>(numCamera); //try //{ // for (int i = 0; i < numCamera; i++) // { // videoCaps.Add(new VideoCapture(i)); // windows.Add(new Window("VideoCapture" + i.ToString())); // } // while (true) // { // for (int i = 0; i < numCamera; i++) // { // var capFrame = new Mat(); // Point2f[][] markerCorners; // int[] markerIds; // Point2f[][] rejectedImgPoints; // videoCaps[i].Read(capFrame); // CvAruco.DetectMarkers(capFrame, dictionary, out markerCorners, out markerIds, parameters, out rejectedImgPoints); // if (!isFoundList[i] && markerIds.Length > 0) // { // isFoundList[i] = true; // } // if (isFoundList[i] && !(markerIds[i] > 0)) // { // isTouchedList[i] = true; // } // if (markerIds.Length > 0) // { // CvAruco.DrawDetectedMarkers(capFrame, markerCorners, markerIds); // } // windows[i].ShowImage(capFrame); // } // if (!isTouchedList.Contains(false)) // { // Console.WriteLine("Hello world!"); // } // var key = Cv2.WaitKey(1); // if (key == 'q') // { // break; // } // } //} //catch //{ // Console.WriteLine("例外:カメラが取得できませんでした。"); //} //finally //{ // for (int i = 0; i < numCamera; i++) // { // if (videoCaps[i] != null) // videoCaps[i].Dispose(); // if (windows[i] != null) // { // windows[i].Dispose(); // } // } //} //test #endregion }
public void DetectTouchByTwoCamera(Action action) { using (var video0 = new VideoCapture(0)) using (var video1 = new VideoCapture(1)) using (var window0 = new Window("capture0")) using (var window1 = new Window("capture1")) { var dictionary = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict6X6_250); var parameters = DetectorParameters.Create(); var frames = new List <Mat> { new Mat(), new Mat() }; var videos = new List <VideoCapture>(); var windows = new List <Window>(); Point2f[][] corners; int[] ids; int[] previousIds; Point2f[][] rejectedImgPoints; videos.Add(video0); videos.Add(video1); windows.Add(window0); windows.Add(window1); var wasFoundList = new List <bool> { false, false }; var isTouchedList = new List <bool> { false, false }; var wasTouched = false; while (true) { for (int i = 0; i < 2; i++) { videos[i].Read(frames[i]); CvAruco.DetectMarkers(frames[i], dictionary, out corners, out ids, parameters, out rejectedImgPoints); isTouchedList[i] = wasFoundList[i] && !(ids.Length > 0); if (ids.Length > 0) { wasFoundList[i] = true; CvAruco.DrawDetectedMarkers(frames[i], corners, ids); } else { wasFoundList[i] = false; isTouchedList[i] = true; } windows[i].ShowImage(frames[i]); } if (!isTouchedList.Contains(false)) { if (!wasTouched) { Console.WriteLine("Hello world!"); for (int i = 0; i < isTouchedList.Count; i++) { isTouchedList[i] = false; } } wasTouched = true; } else { wasTouched = false; } var key = Cv2.WaitKey(1); if (key == 'q') { break; } } } }
public static void BuildRegistrationCube(string path, double cmWidth = 10.4) { var definition = CoordinateDefinition.Cube(11 * 2.5, cmWidth); var dir = Path.GetDirectoryName(path); var sides = new Dictionary <string, int>() //side, starting index id { { "Side1", 0 }, { "Side2", 4 }, { "Side3", 8 }, { "Side4", 12 }, { "Side5", 16 }, { "Side6", 20 }, }; var pixelsPerCM = 300 / 2.54; //2 SQUARES WIDE, 2 SQUARES HIGH, 150 margin on width, //100 margin on height var paperPxWidth = pixelsPerCM * cmWidth; //11in - 1in margin *300px/in var paperPxHeight = pixelsPerCM * cmWidth; //17in - 1in margin *300px/in var markerPxWidth = (int)(pixelsPerCM * 4.5 / 10.4 * cmWidth); var marginWidth = (int)((paperPxWidth - 2.0 * markerPxWidth) / 4); var marginHeight = marginWidth; var rows = 2; var columns = 2; var id = 0; foreach (var side in sides) { Rect roi = new Rect(0, 0, markerPxWidth, markerPxWidth); using (var outputImage = new Mat(new Size(paperPxWidth, paperPxHeight), MatType.CV_8UC1, Scalar.White)) { for (var y = 0; y < rows; y++) { roi.Top = y * markerPxWidth + marginHeight * (y * 2 + 1); for (var x = 0; x < columns; x++) { roi.Left = x * markerPxWidth + marginWidth * (x * 2 + 1); using (var roiMat = new Mat(outputImage, roi)) using (var markerImage = new Mat()) using (var dict2 = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict6X6_250)) { CvAruco.DrawMarker(dict2, id++, markerPxWidth, markerImage, 1); markerImage.CopyTo(roiMat); } } } var crossHairWidth = 1 * pixelsPerCM; // 1 cm var crossHairColor = new Scalar(25, 25, 25); Cv2.Line(outputImage, new Point((float)paperPxWidth / 2, (float)paperPxHeight / 2), new Point((float)paperPxWidth / 2, (float)(paperPxHeight / 2 - crossHairWidth)), crossHairColor); Cv2.Line(outputImage, new Point((float)paperPxWidth / 2, (float)paperPxHeight / 2), new Point((float)paperPxWidth / 2, (float)(paperPxHeight / 2 + crossHairWidth)), crossHairColor); Cv2.Line(outputImage, new Point((float)paperPxWidth / 2, (float)paperPxHeight / 2), new Point((float)(paperPxWidth / 2 - crossHairWidth), (float)(paperPxHeight / 2)), crossHairColor); Cv2.Line(outputImage, new Point((float)paperPxWidth / 2, (float)paperPxHeight / 2), new Point((float)(paperPxWidth / 2 + crossHairWidth), (float)(paperPxHeight / 2)), crossHairColor); Cv2.PutText(outputImage, side.Key, new Point((float)paperPxWidth / 2 - marginWidth / 1.3, (float)paperPxHeight), HersheyFonts.HersheyPlain, 1.5 / 10.4 * cmWidth, new Scalar(25, 25, 25), 1); path = Path.Combine(dir, side.Key + ".png"); Cv2.ImWrite(path, outputImage); } } }