/// <summary> /// Saves the image to the specified file. The image format is chosen depending on the filename extension, see cvLoadImage. Only 8-bit single-channel or 3-channel (with 'BGR' channel order) images can be saved using this function. If the format, depth or channel order is different, use cvCvtScale and cvCvtColor to convert it before saving, or use universal cvSave to save the image to XML or YAML format /// </summary> /// <param name="filename">The name of the file to be saved to</param> /// <param name="image">The image to be saved</param> /// <param name="parameters">The parameters</param> /// <returns>true if success</returns> public static bool Imwrite(String filename, IInputArray image, params int[] parameters) { using (Util.VectorOfInt vec = new Util.VectorOfInt()) { if (parameters.Length > 0) vec.Push(parameters); using (CvString s = new CvString(filename)) using (InputArray iaImage = image.GetInputArray()) { #if !(__IOS__ || __ANDROID__ || NETFX_CORE) bool containsUnicode = (s.Length != filename.Length); if (containsUnicode && (Emgu.Util.Platform.OperationSystem != OS.MacOSX) && (Emgu.Util.Platform.OperationSystem != OS.Linux)) { //Handle unicode in Windows platform //Work around for Open CV ticket: //https://github.com/Itseez/opencv/issues/4292 //https://github.com/Itseez/opencv/issues/4866 System.IO.FileInfo fi = new System.IO.FileInfo(filename); using (VectorOfByte vb = new VectorOfByte()) { CvInvoke.Imencode(fi.Extension, image, vb, parameters); byte[] arr = vb.ToArray(); System.IO.File.WriteAllBytes(filename, arr); return true; } } else #endif return cveImwrite(s, iaImage, vec); } } }
/// <summary> /// Decode image stored in the buffer /// </summary> /// <param name="buf">The buffer</param> /// <param name="loadType">The image loading type</param> /// <param name="dst">The output placeholder for the decoded matrix.</param> public static void Imdecode(byte[] buf, CvEnum.LoadImageType loadType, Mat dst) { using (VectorOfByte vb = new VectorOfByte(buf)) { Imdecode(vb, loadType, dst); } }
public void TestMatToFileStorage() { //create a matrix m with random values Mat m = new Mat(120, 240, DepthType.Cv8U, 1); using (ScalarArray low = new ScalarArray(0)) using (ScalarArray high = new ScalarArray(255)) CvInvoke.Randu(m, low, high); //Convert the random matrix m to yml format, good for matrix that contains values such as calibration, homography etc. String mStr; using (FileStorage fs = new FileStorage(".yml", FileStorage.Mode.Write | FileStorage.Mode.Memory)) { fs.Write(m, "m"); mStr = fs.ReleaseAndGetString(); } //Treat the Mat as image data and convert it to png format. using (VectorOfByte bytes = new VectorOfByte()) { CvInvoke.Imencode(".png", m, bytes); byte[] rawData = bytes.ToArray(); } }
/// <summary> /// Computes an optimal affine transformation between two 3D point sets. /// </summary> /// <param name="src">First input 3D point set.</param> /// <param name="dst">Second input 3D point set.</param> /// <param name="estimate">Output 3D affine transformation matrix.</param> /// <param name="inliers">Output vector indicating which points are inliers.</param> /// <param name="ransacThreshold">Maximum reprojection error in the RANSAC algorithm to consider a point as an inlier.</param> /// <param name="confidence">Confidence level, between 0 and 1, for the estimated transformation. Anything between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.</param> /// <returns>The result</returns> public static int EstimateAffine3D(MCvPoint3D32f[] src, MCvPoint3D32f[] dst, out Matrix <double> estimate, out Byte[] inliers, double ransacThreshold, double confidence) { GCHandle srcHandle = GCHandle.Alloc(src, GCHandleType.Pinned); GCHandle dstHandle = GCHandle.Alloc(dst, GCHandleType.Pinned); int result; estimate = new Matrix <double>(3, 4); int sizeOfPoint3D32f = Toolbox.SizeOf <MCvPoint3D64f>(); using ( Matrix <float> srcMat = new Matrix <float>(1, src.Length, 3, srcHandle.AddrOfPinnedObject(), sizeOfPoint3D32f * src.Length)) using ( Matrix <float> dstMat = new Matrix <float>(1, dst.Length, 3, dstHandle.AddrOfPinnedObject(), sizeOfPoint3D32f * dst.Length)) using (Util.VectorOfByte vectorOfByte = new Util.VectorOfByte()) { result = EstimateAffine3D(srcMat, dstMat, estimate, vectorOfByte, ransacThreshold, confidence); inliers = vectorOfByte.ToArray(); } srcHandle.Free(); dstHandle.Free(); return(result); }
/// <summary> /// Computes an optimal affine transformation between two 3D point sets. /// </summary> /// <param name="src">First input 3D point set.</param> /// <param name="dst">Second input 3D point set.</param> /// <param name="estimate">Output 3D affine transformation matrix.</param> /// <param name="inliers">Output vector indicating which points are inliers.</param> /// <param name="ransacThreshold">Maximum reprojection error in the RANSAC algorithm to consider a point as an inlier.</param> /// <param name="confidence">Confidence level, between 0 and 1, for the estimated transformation. Anything between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.</param> /// <returns></returns> public static int CvEstimateAffine3D(MCvPoint3D32f[] src, MCvPoint3D32f[] dst, out Matrix <double> estimate, out Byte[] inliers, double ransacThreshold, double confidence) { GCHandle srcHandle = GCHandle.Alloc(src, GCHandleType.Pinned); GCHandle dstHandle = GCHandle.Alloc(dst, GCHandleType.Pinned); int result; estimate = new Matrix <double>(3, 4); using (Util.Mat affineEstimate = new Util.Mat()) using (Matrix <float> srcMat = new Matrix <float>(1, src.Length, 3, srcHandle.AddrOfPinnedObject(), Marshal.SizeOf(typeof(MCvPoint3D32f)) * src.Length)) using (Matrix <float> dstMat = new Matrix <float>(1, dst.Length, 3, dstHandle.AddrOfPinnedObject(), Marshal.SizeOf(typeof(MCvPoint3D32f)) * dst.Length)) using (Util.VectorOfByte vectorOfByte = new Util.VectorOfByte()) { result = _CvEstimateAffine3D(srcMat, dstMat, affineEstimate, vectorOfByte, ransacThreshold, confidence); inliers = vectorOfByte.ToArray(); CvInvoke.cvMatCopyToCvArr(affineEstimate, estimate); } srcHandle.Free(); dstHandle.Free(); return(result); }
/// <summary> /// Calculates optical flow for a sparse feature set using iterative Lucas-Kanade method in pyramids /// </summary> /// <param name="prev">First frame, at time t</param> /// <param name="curr">Second frame, at time t + dt </param> /// <param name="prevFeatures">Array of points for which the flow needs to be found</param> /// <param name="winSize">Size of the search window of each pyramid level</param> /// <param name="level">Maximal pyramid level number. If 0 , pyramids are not used (single level), if 1 , two levels are used, etc</param> /// <param name="criteria">Specifies when the iteration process of finding the flow for each point on each pyramid level should be stopped</param> /// <param name="flags">Flags</param> /// <param name="currFeatures">Array of 2D points containing calculated new positions of input features in the second image</param> /// <param name="status">Array. Every element of the array is set to 1 if the flow for the corresponding feature has been found, 0 otherwise</param> /// <param name="trackError">Array of double numbers containing difference between patches around the original and moved points</param> /// <param name="minEigThreshold">the algorithm calculates the minimum eigen value of a 2x2 normal matrix of optical flow equations (this matrix is called a spatial gradient matrix in [Bouguet00]), divided by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding feature is filtered out and its flow is not processed, so it allows to remove bad points and get a performance boost.</param> public static void CalcOpticalFlowPyrLK( IInputArray prev, IInputArray curr, PointF[] prevFeatures, Size winSize, int level, MCvTermCriteria criteria, out PointF[] currFeatures, out Byte[] status, out float[] trackError, Emgu.CV.CvEnum.LKFlowFlag flags = CvEnum.LKFlowFlag.Default, double minEigThreshold = 1.0e-4) { using (Util.VectorOfPointF prevPts = new Util.VectorOfPointF()) using (Util.VectorOfPointF nextPts = new Util.VectorOfPointF()) using (Util.VectorOfByte statusVec = new Util.VectorOfByte()) using (Util.VectorOfFloat errorVec = new Util.VectorOfFloat()) { prevPts.Push(prevFeatures); CalcOpticalFlowPyrLK( prev, curr, prevPts, nextPts, statusVec, errorVec, winSize, level, criteria, flags, minEigThreshold); status = statusVec.ToArray(); trackError = errorVec.ToArray(); currFeatures = nextPts.ToArray(); } }
/* #region Kalman Filter /// <summary> /// Allocates CvKalman and all its matrices and initializes them somehow. /// </summary> /// <param name="dynamParams">dimensionality of the state vector</param> /// <param name="measureParams">dimensionality of the measurement vector </param> /// <param name="controlParams">dimensionality of the control vector </param> /// <returns>Pointer to the created Kalman filter</returns> [DllImport(OpencvVideoLibrary, CallingConvention = CvInvoke.CvCallingConvention)] public static extern IntPtr cvCreateKalman(int dynamParams, int measureParams, int controlParams); /// <summary> /// Adjusts stochastic model state on the basis of the given measurement of the model state. /// The function stores adjusted state at kalman->state_post and returns it on output /// </summary> /// <param name="kalman">Pointer to the structure to be updated</param> /// <param name="measurement">Pointer to the structure CvMat containing the measurement vector</param> /// <returns>The function stores adjusted state at kalman->state_post and returns it on output</returns> [DllImport(OpencvVideoLibrary, CallingConvention = CvInvoke.CvCallingConvention)] public static extern IntPtr cvKalmanCorrect(ref MCvKalman kalman, IntPtr measurement); /// <summary> /// Estimates the subsequent stochastic model state by its current state and stores it at kalman->state_pre /// The function returns the estimated state /// </summary> /// <param name="kalman">Kalman filter state</param> /// <param name="control">Control vector (uk), should be NULL iff there is no external control (controlParams=0). </param> /// <returns>the estimated state</returns> [DllImport(OpencvVideoLibrary, CallingConvention = CvInvoke.CvCallingConvention)] public static extern IntPtr cvKalmanPredict(ref MCvKalman kalman, IntPtr control); /// <summary> /// Releases the structure CvKalman and all underlying matrices /// </summary> /// <param name="kalman">reference of the pointer to the Kalman filter structure.</param> [DllImport(OpencvVideoLibrary, CallingConvention = CvInvoke.CvCallingConvention)] public static extern void cvReleaseKalman(ref IntPtr kalman); #endregion */ #region optical flow /// <summary> /// Calculates optical flow for a sparse feature set using iterative Lucas-Kanade method in pyramids /// </summary> /// <param name="prev">First frame, at time t</param> /// <param name="curr">Second frame, at time t + dt </param> /// <param name="prevFeatures">Array of points for which the flow needs to be found</param> /// <param name="winSize">Size of the search window of each pyramid level</param> /// <param name="level">Maximal pyramid level number. If 0 , pyramids are not used (single level), if 1 , two levels are used, etc</param> /// <param name="criteria">Specifies when the iteration process of finding the flow for each point on each pyramid level should be stopped</param> /// <param name="flags">Flags</param> /// <param name="currFeatures">Array of 2D points containing calculated new positions of input features in the second image</param> /// <param name="status">Array. Every element of the array is set to 1 if the flow for the corresponding feature has been found, 0 otherwise</param> /// <param name="trackError">Array of double numbers containing difference between patches around the original and moved points</param> /// <param name="minEigThreshold">the algorithm calculates the minimum eigen value of a 2x2 normal matrix of optical flow equations (this matrix is called a spatial gradient matrix in [Bouguet00]), divided by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding feature is filtered out and its flow is not processed, so it allows to remove bad points and get a performance boost.</param> public static void CalcOpticalFlowPyrLK( IInputArray prev, IInputArray curr, PointF[] prevFeatures, Size winSize, int level, MCvTermCriteria criteria, out PointF[] currFeatures, out Byte[] status, out float[] trackError, Emgu.CV.CvEnum.LKFlowFlag flags = CvEnum.LKFlowFlag.Default, double minEigThreshold = 1.0e-4) { using (Util.VectorOfPointF prevPts = new Util.VectorOfPointF()) using (Util.VectorOfPointF nextPts = new Util.VectorOfPointF()) using (Util.VectorOfByte statusVec = new Util.VectorOfByte()) using (Util.VectorOfFloat errorVec = new Util.VectorOfFloat()) { prevPts.Push(prevFeatures); CalcOpticalFlowPyrLK( prev, curr, prevPts, nextPts, statusVec, errorVec, winSize, level, criteria, flags, minEigThreshold); status = statusVec.ToArray(); trackError = errorVec.ToArray(); currFeatures = nextPts.ToArray(); } }
/// <summary> /// Get all the text in the image /// </summary> /// <returns>All the text in the image</returns> public string GetText() { using (Util.VectorOfByte bytes = new Util.VectorOfByte()) { OcrInvoke.TessBaseAPIGetUTF8Text(_ptr, bytes); return _utf8.GetString(bytes.ToArray()).Replace("\n", Environment.NewLine); } }
/// <summary> /// Computes an optimal affine transformation between two 3D point sets. /// </summary> /// <param name="src">First input 3D point set.</param> /// <param name="dst">Second input 3D point set.</param> /// <param name="estimate">Output 3D affine transformation matrix.</param> /// <param name="inliers">Output vector indicating which points are inliers.</param> /// <param name="ransacThreshold">Maximum reprojection error in the RANSAC algorithm to consider a point as an inlier.</param> /// <param name="confidence">Confidence level, between 0 and 1, for the estimated transformation. Anything between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.</param> /// <returns></returns> public static int CvEstimateAffine3D(MCvPoint3D32f[] src, MCvPoint3D32f[] dst, out Matrix<double> estimate, out Byte[] inliers, double ransacThreshold, double confidence) { GCHandle srcHandle = GCHandle.Alloc(src, GCHandleType.Pinned); GCHandle dstHandle = GCHandle.Alloc(dst, GCHandleType.Pinned); int result; estimate = new Matrix<double>(3, 4); using (Util.Mat affineEstimate = new Util.Mat()) using (Matrix<float> srcMat = new Matrix<float>(1, src.Length, 3, srcHandle.AddrOfPinnedObject(), Marshal.SizeOf(typeof(MCvPoint3D32f)) * src.Length)) using (Matrix<float> dstMat = new Matrix<float>(1, dst.Length, 3, dstHandle.AddrOfPinnedObject(), Marshal.SizeOf(typeof(MCvPoint3D32f)) * dst.Length )) using (Util.VectorOfByte vectorOfByte = new Util.VectorOfByte()) { result = _CvEstimateAffine3D(srcMat, dstMat, affineEstimate, vectorOfByte, ransacThreshold, confidence); inliers = vectorOfByte.ToArray(); CvInvoke.cvMatCopyToCvArr(affineEstimate, estimate); } srcHandle.Free(); dstHandle.Free(); return result; }
public void SetImage(Emgu.CV.Mat image) { if (image == null) { this.DisplayImage.Source = null; return; } using (VectorOfByte vb = new VectorOfByte()) { CvInvoke.Imencode(".jpg", image, vb); byte[] rawData = vb.ToArray(); this.DisplayImage.Source = ImageSource.FromStream(() => new MemoryStream(rawData)); } }
/// <summary> /// encode image and store the result as a byte vector. /// </summary> /// <param name="ext">The image format</param> /// <param name="image">The image</param> /// <param name="buf">Output buffer resized to fit the compressed image.</param> /// <param name="parameters">The pointer to the array of intergers, which contains the parameter for encoding, use IntPtr.Zero for default</param> public static void Imencode(String ext, IInputArray image, VectorOfByte buf, params KeyValuePair<CvEnum.ImwriteFlags, int>[] parameters) { using (CvString extStr = new CvString(ext)) using (VectorOfInt p = new VectorOfInt()) { PushParameters(p, parameters); using (InputArray iaImage = image.GetInputArray()) cveImencode(extStr, iaImage, buf, p); } }
/// <summary> /// Computes an optimal affine transformation between two 3D point sets. /// </summary> /// <param name="src">First input 3D point set.</param> /// <param name="dst">Second input 3D point set.</param> /// <param name="estimate">Output 3D affine transformation matrix.</param> /// <param name="inliers">Output vector indicating which points are inliers.</param> /// <param name="ransacThreshold">Maximum reprojection error in the RANSAC algorithm to consider a point as an inlier.</param> /// <param name="confidence">Confidence level, between 0 and 1, for the estimated transformation. Anything between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.</param> /// <returns></returns> public static int EstimateAffine3D(MCvPoint3D32f[] src, MCvPoint3D32f[] dst, out Matrix<double> estimate, out Byte[] inliers, double ransacThreshold, double confidence) { GCHandle srcHandle = GCHandle.Alloc(src, GCHandleType.Pinned); GCHandle dstHandle = GCHandle.Alloc(dst, GCHandleType.Pinned); int result; estimate = new Matrix<double>(3, 4); #if NETFX_CORE int sizeOfPoint3D32f = Marshal.SizeOf<MCvPoint3D32f>(); #else int sizeOfPoint3D32f = Marshal.SizeOf(typeof (MCvPoint3D32f)); #endif using ( Matrix<float> srcMat = new Matrix<float>(1, src.Length, 3, srcHandle.AddrOfPinnedObject(), sizeOfPoint3D32f * src.Length)) using ( Matrix<float> dstMat = new Matrix<float>(1, dst.Length, 3, dstHandle.AddrOfPinnedObject(), sizeOfPoint3D32f * dst.Length)) using (Util.VectorOfByte vectorOfByte = new Util.VectorOfByte()) { result = EstimateAffine3D(srcMat, dstMat, estimate, vectorOfByte, ransacThreshold, confidence); inliers = vectorOfByte.ToArray(); } srcHandle.Free(); dstHandle.Free(); return result; }
/// <summary> /// Get all the text in the image /// </summary> /// <returns>All the text in the image</returns> public string GetText() { using (Util.VectorOfByte bytes = new Util.VectorOfByte()) { OcrInvoke.TessBaseAPIGetUTF8Text(_ptr, bytes); return UtfByteVectorToString(bytes); } }
public DebuggerProxy(VectorOfByte v) { _v = v; }
/// <summary> /// Detect all the characters in the image. /// </summary> /// <returns>All the characters in the image</returns> public Character[] GetCharacters() { using (VectorOfByte textSeq = new VectorOfByte()) using (VectorOfTesseractResult results = new VectorOfTesseractResult()) { //Seq<byte> textSeq = new Seq<byte>(stor); //Seq<TesseractResult> results = new Seq<TesseractResult>(stor); OcrInvoke.TessBaseAPIExtractResult(_ptr, textSeq, results); byte[] bytes = textSeq.ToArray(); TesseractResult[] trs = results.ToArray(); Character[] res = new Character[trs.Length]; int idx = 0; for (int i = 0; i < trs.Length; i++) { TesseractResult tr = trs[i]; res[i].Text = _utf8.GetString(bytes, idx, tr.Length).Replace("\n", Environment.NewLine); idx += tr.Length; res[i].Cost = tr.Cost; if (tr.Cost == 0) res[i].Region = Rectangle.Empty; else res[i].Region = tr.Region; } return res; } }
private String UtfByteVectorToString(VectorOfByte bytes) { #if NETFX_CORE byte[] bArr = bytes.ToArray(); return _utf8.GetString(bArr, 0, bArr.Length).Replace("\n", Environment.NewLine); #else return _utf8.GetString(bytes.ToArray()).Replace("\n", Environment.NewLine); #endif }
/// <summary> /// Make a HTML-formatted string with hOCR markup from the internal data structures. /// </summary> /// <param name="pageNumber">pageNumber is 0-based but will appear in the output as 1-based.</param> /// <returns>A HTML-formatted string with hOCR markup from the internal data structures.</returns> public String GetHOCRText(int pageNumber = 0) { using (Util.VectorOfByte bytes = new Util.VectorOfByte()) { OcrInvoke.TessBaseAPIGetHOCRText(_ptr, pageNumber, bytes); return UtfByteVectorToString(bytes); } }
/// <summary> /// Push multiple values from the other vector into this vector /// </summary> /// <param name="other">The other vector, from which the values will be pushed to the current vector</param> public void Push(VectorOfByte other) { VectorOfBytePushVector(_ptr, other); }
public void ParseTestVideo(string testFile) { //Capture Image if(string.IsNullOrWhiteSpace(OutputPath)) OutputPath = _defaultTestVideoPath; List<string> grayImgList = CatchImages(testFile, 0, OutputPath); //Get the Optical flow of L-K feature Image<Gray, Byte> mask = new Image<Gray, Byte>(grayImgList.First()); Image<Gray, Byte> grayImage1 = new Image<Gray, Byte>(grayImgList.First()); Image<Gray, Byte> grayImage2 = new Image<Gray, Byte>(grayImgList.Last()); EmguType features1 = SURFFeatureDetect(grayImage1, mask); VectorOfPointF vp1 = new VectorOfPointF(features1.KeyPoints.ToArray().Select(x => x.Point).ToArray()); VectorOfPointF vp2 = new VectorOfPointF(vp1.Size); VectorOfByte vstatus = new VectorOfByte(vp1.Size); VectorOfFloat verr = new VectorOfFloat(vp1.Size); Size winsize = new Size(grayImage1.Width, grayImage1.Height); int maxLevel = 1; // if 0, winsize is not used MCvTermCriteria criteria = new MCvTermCriteria(10, 1); try { //GFTTDetector gd = new GFTTDetector(); //MKeyPoint[] gdkp = gd.Detect(grayImage1); //VectorOfPointF gdvp1 = new VectorOfPointF(gdkp.Select(x => x.Point).ToArray()); CvInvoke.CalcOpticalFlowPyrLK(grayImage1, grayImage2, vp1, vp2, vstatus, verr, winsize, maxLevel, criteria); Utils.WriteJsonFile(vp1, grayImgList.First() + "p.dat"); Utils.WriteJsonFile(vp2, grayImgList.Last() + "p.dat"); } catch (Exception e) { _log.Debug("error: " + e.Message); } //List<string> grayImgList = CatchImages(testFile, 0, OutputPath); /* //Get SIFT Feature foreach (string grayImgPath in grayImgList) { //Image<Gray, float> grayImage = new Image<Gray, float>(grayImgPath); //List<Feature> features = SiftFeatureDetect(grayImage); Image<Gray, Byte> grayImage = new Image<Gray, Byte>(grayImgPath); //List<SiftFeature> features = SiftFeatureDetect(image: grayImage, showDetail: true); //Write features To File EmguType features = SURFFeatureDetect(grayImage); Utils.WriteJsonFile(features, grayImgPath + ".dat"); } */ _parseSuccess = true; }
/// <summary> /// encode image and store the result as a byte vector. /// </summary> /// <param name="ext">The image format</param> /// <param name="image">The image</param> /// <param name="buf">Output buffer resized to fit the compressed image.</param> /// <param name="parameters">The pointer to the array of intergers, which contains the parameter for encoding, use IntPtr.Zero for default</param> public static void Imencode(String ext, IInputArray image, VectorOfByte buf, params int[] parameters) { using (CvString extStr = new CvString(ext)) using (VectorOfInt p = new VectorOfInt()) { if (parameters.Length > 0) p.Push(parameters); using (InputArray iaImage = image.GetInputArray()) cveImencode(extStr, iaImage, buf, p); } }
/// <summary> /// Push a value into the standard vector /// </summary> /// <param name="value">The value to be pushed to the vector</param> public void Push(VectorOfByte value) { VectorOfVectorOfBytePush(_ptr, value.Ptr); }
public void InitOriginalVideo(string initFile) { //Capture Image OutputPath = _defaultInitVideoPath; List<string> grayImgList = CatchImages(initFile, 0, OutputPath); if (grayImgList.Count < 3) { return; } //Get the Optical flow of L-K feature Image<Gray, Byte> mask = new Image<Gray, Byte>(grayImgList.First()); Image<Gray, Byte> grayImage1 = null;//new Image<Gray, Byte>(grayImgList[1]); Image<Gray, Byte> grayImage2 = null;//new Image<Gray, Byte>(grayImgList.Last()); for (int i=1; i< grayImgList.Count-1; i++) { grayImage1 = new Image<Gray, Byte>(grayImgList[i]); grayImage2 = new Image<Gray, Byte>(grayImgList[i + 1]); EmguType features1 = SURFFeatureDetect(grayImage1, mask); Utils.WriteJsonFile(features1, grayImgList[i] + ".dat"); //VectorOfPointF vp1 = new VectorOfPointF(features1.KeyPoints.ToArray().Select(x => x.Point).ToArray()); //VectorOfPointF vp2 = new VectorOfPointF(vp1.Size); //VectorOfByte vstatus = new VectorOfByte(vp1.Size); //VectorOfFloat verr = new VectorOfFloat(vp1.Size); Size winsize = new Size(grayImage1.Width, grayImage1.Height); int maxLevel = 1; // if 0, winsize is not used MCvTermCriteria criteria = new MCvTermCriteria(10, 1); try { if (i % Constants.DETECTIVE_GROUP_COUNT == 1) { GFTTDetector gd = new GFTTDetector(); MKeyPoint[] gdkp = gd.Detect(grayImage1, mask); VectorOfPointF gdvp1 = new VectorOfPointF(gdkp.Select(x => x.Point).ToArray()); VectorOfPointF gdvp2 = new VectorOfPointF(gdvp1.Size); VectorOfByte vstatus = new VectorOfByte(gdvp1.Size); VectorOfFloat verr = new VectorOfFloat(gdvp1.Size); CvInvoke.CalcOpticalFlowPyrLK(grayImage1, grayImage2, gdvp1, gdvp2, vstatus, verr, winsize, maxLevel, criteria); Utils.WriteJsonFile(gdvp2, grayImgList[i] + "pp.dat"); } else { VectorOfPointF gdvp1 = Utils.ReadJsonFile<VectorOfPointF>(grayImgList[i - 1] + "pp.dat"); VectorOfPointF gdvp2 = new VectorOfPointF(gdvp1.Size); VectorOfByte vstatus = new VectorOfByte(gdvp1.Size); VectorOfFloat verr = new VectorOfFloat(gdvp1.Size); CvInvoke.CalcOpticalFlowPyrLK(grayImage1, grayImage2, gdvp1, gdvp2, vstatus, verr, winsize, maxLevel, criteria); Utils.WriteJsonFile(gdvp2, grayImgList[i] + "pp.dat"); } } catch (Exception e) { _log.Debug("error: " + e.Message); } } /* //Get SIFT Feature foreach (string grayImgPath in grayImgList) { Image<Gray, Byte> grayImage = new Image<Gray, Byte>(grayImgPath); //List<SiftFeature> features = SiftFeatureDetect(image: grayImage, showDetail: true); //Image<Gray, float> grayImage = new Image<Gray, float>(grayImgPath); //List<Feature> features = SiftFeatureDetect(grayImage); EmguType features = SURFFeatureDetect(grayImage); Utils.WriteJsonFile(features, grayImgPath + ".dat"); } */ _initSuccess = true; OutputPath = string.Empty; }