/// <summary> /// Compute the descriptor given the image and the point location /// </summary> /// <param name="image">The image where the descriptor will be computed from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <param name="keyPoints">The keypoint where the descriptor will be computed from. The order of the keypoints might be changed unless the GPU_SURF detector is UP-RIGHT.</param> /// <returns>The image features founded on the keypoint location</returns> public OclMat <float> ComputeDescriptorsRaw(OclImage <Gray, Byte> image, OclImage <Gray, byte> mask, OclMat <float> keyPoints) { OclMat <float> descriptors = new OclMat <float>(keyPoints.Size.Height, DescriptorSize, 1); OclInvoke.oclSURFDetectorCompute(_ptr, image, mask, keyPoints, descriptors, true); return(descriptors); }
/// <summary> /// Detect keypoints in the OclImage /// </summary> /// <param name="img">The image where keypoints will be detected from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <returns> /// The keypoints OclMat that will have 1 row. /// keypoints.at<float[6]>(1, i) contains i'th keypoint /// format: (x, y, size, response, angle, octave) /// </returns> public OclMat <float> DetectKeyPointsRaw(OclImage <Gray, Byte> img, OclImage <Gray, Byte> mask) { OclMat <float> result = new OclMat <float>(); OclInvoke.oclSURFDetectorDetectKeyPoints(_ptr, img, mask, result); return(result); }
/// <summary> /// Create a clone of this OclImage /// </summary> /// <returns>A clone of this GpuImage</returns> public OclImage <TColor, TDepth> Clone() { OclImage <TColor, TDepth> result = new OclImage <TColor, TDepth>(Size); OclInvoke.Copy(_ptr, result, IntPtr.Zero); return(result); }
/// <summary> /// Resize the OclImage. The calling OclMat be OclMat%lt;Byte>. /// </summary> /// <param name="size">The new size</param> /// <param name="interpolationType">The interpolation type</param> /// <returns>An OclImage of the new size</returns> public OclImage <TColor, TDepth> Resize(Size size, CvEnum.INTER interpolationType) { OclImage <TColor, TDepth> result = new OclImage <TColor, TDepth>(size); OclInvoke.Resize(_ptr, result, 0, 0, interpolationType); return(result); }
///<summary> ///Performs a convolution using the specific <paramref name="kernel"/> ///</summary> ///<param name="kernel">The convolution kernel</param> ///<returns>The result of the convolution</returns> public OclImage <TColor, Byte> Convolution(ConvolutionKernelF kernel) { OclImage <TColor, Byte> result = new OclImage <TColor, Byte>(Size); OclInvoke.Filter2D(_ptr, result, kernel, kernel.Center, CvEnum.BORDER_TYPE.REFLECT101); return(result); }
private static void ConvertColor(IntPtr src, IntPtr dest, Type srcColor, Type destColor, Size size) { try { // if the direct conversion exist, apply the conversion OclInvoke.CvtColor(src, dest, CvToolbox.GetColorCvtCode(srcColor, destColor)); } catch { try { //if a direct conversion doesn't exist, apply a two step conversion //in this case, needs to wait for the completion of the stream because a temporary local image buffer is used //we don't want the tmp image to be released before the operation is completed. using (OclImage <Bgr, TDepth> tmp = new OclImage <Bgr, TDepth>(size)) { OclInvoke.CvtColor(src, tmp.Ptr, CvToolbox.GetColorCvtCode(srcColor, typeof(Bgr))); OclInvoke.CvtColor(tmp.Ptr, dest, CvToolbox.GetColorCvtCode(typeof(Bgr), destColor)); } } catch { throw new NotSupportedException(String.Format( "Convertion from OclImage<{0}, {1}> to OclImage<{2}, {3}> is not supported by OpenCV", srcColor.ToString(), typeof(TDepth).ToString(), destColor.ToString(), typeof(TDepth).ToString())); } } }
/// <summary> /// Calculate an optical flow for a sparse feature set. /// </summary> /// <param name="frame0">First 8-bit input image (supports both grayscale and color images).</param> /// <param name="frame1">Second input image of the same size and the same type as <paramref name="frame0"/></param> /// <param name="points0"> /// Vector of 2D points for which the flow needs to be found. It must be one row /// matrix with 2 channels /// </param> /// <param name="points1"> /// Output vector of 2D points (with single-precision two channel floating-point coordinates) /// containing the calculated new positions of input features in the second image.</param> /// <param name="status"> /// Output status vector (CV_8UC1 type). Each element of the vector is set to 1 if the /// flow for the corresponding features has been found. Otherwise, it is set to 0. /// </param> /// <param name="err"> /// Output vector (CV_32FC1 type) that contains the difference between patches around /// the original and moved points or min eigen value if getMinEigenVals is checked. It can be /// null, if not needed. /// </param> public void Sparse(OclImage <Gray, byte> frame0, OclImage <Gray, byte> frame1, OclMat <float> points0, out OclMat <float> points1, out OclMat <Byte> status, out OclMat <float> err) { points1 = new OclMat <float>(); status = new OclMat <byte>(); err = new OclMat <float>(); OclInvoke.oclPyrLKOpticalFlowSparse(_ptr, frame0, frame1, points0, points1, status, err); }
/// <summary> /// Detect keypoints in the OclImage /// </summary> /// <param name="img">The image where keypoints will be detected from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <returns>An array of keypoints</returns> public MKeyPoint[] DetectKeyPoints(OclImage <Gray, Byte> img, OclImage <Gray, Byte> mask) { using (OclMat <float> tmp = DetectKeyPointsRaw(img, mask)) using (VectorOfKeyPoint kpts = new VectorOfKeyPoint()) { DownloadKeypoints(tmp, kpts); return(kpts.ToArray()); } }
///<summary> Convert the current GpuImage to the specific color and depth </summary> ///<typeparam name="TOtherColor"> The type of color to be converted to </typeparam> ///<typeparam name="TOtherDepth"> The type of pixel depth to be converted to </typeparam> ///<returns>GpuImage of the specific color and depth </returns> public OclImage <TOtherColor, TOtherDepth> Convert <TOtherColor, TOtherDepth>() where TOtherColor : struct, IColor where TOtherDepth : new() { OclImage <TOtherColor, TOtherDepth> res = new OclImage <TOtherColor, TOtherDepth>(Size); res.ConvertFrom(this); return(res); }
/// <summary> /// Perfroms object detection with increasing detection window. /// </summary> /// <param name="image">The OclImage to search in</param> /// <param name="hitThreshold">The threshold for the distance between features and classifying plane.</param> /// <param name="winStride">Window stride. Must be a multiple of block stride.</param> /// <param name="padding">Mock parameter to keep CPU interface compatibility. Must be (0,0).</param> /// <param name="scale">Coefficient of the detection window increase.</param> /// <param name="groupThreshold">After detection some objects could be covered by many rectangles. This coefficient regulates similarity threshold. 0 means don't perform grouping.</param> /// <returns>The regions where positives are found</returns> public Rectangle[] DetectMultiScale( OclImage <Gray, Byte> image, double hitThreshold, Size winStride, Size padding, double scale, int groupThreshold) { return(DetectMultiScale(image.Ptr, hitThreshold, winStride, padding, scale, groupThreshold)); }
///<summary> ///Split current Image into an array of gray scale images where each element ///in the array represent a single color channel of the original image ///</summary> ///<returns> ///An array of gray scale images where each element ///in the array represent a single color channel of the original image ///</returns> public new OclImage <Gray, TDepth>[] Split() { OclImage <Gray, TDepth>[] result = new OclImage <Gray, TDepth> [NumberOfChannels]; Size size = Size; for (int i = 0; i < result.Length; i++) { result[i] = new OclImage <Gray, TDepth>(size); } SplitInto(result); return(result); }
/// <summary> /// Computes disparity map for the input rectified stereo pair. /// </summary> /// <param name="left">The left single-channel, 8-bit image</param> /// <param name="right">The right image of the same size and the same type</param> /// <param name="disparity">The disparity map</param> public void FindStereoCorrespondence(OclImage <Gray, Byte> left, OclImage <Gray, Byte> right, OclImage <Gray, Byte> disparity) { OclInvoke.oclStereoConstantSpaceBPFindStereoCorrespondence(_ptr, left, right, disparity); }
/// <summary> /// Compute the dense optical flow. /// </summary> /// <param name="frame0">Source frame</param> /// <param name="frame1">Frame to track (with the same size as <paramref name="frame0"/>)</param> /// <param name="u">Flow horizontal component (along x axis)</param> /// <param name="v">Flow vertical component (along y axis)</param> public void Dense(OclImage <Gray, byte> frame0, OclImage <Gray, byte> frame1, OclImage <Gray, float> u, OclImage <Gray, float> v) { OclInvoke.oclOpticalFlowDualTVL1Compute(_ptr, frame0, frame1, u, v); }
/// <summary> /// Perfroms object detection with increasing detection window. /// </summary> /// <param name="image">The GpuImage to search in</param> /// <returns>The regions where positives are found</returns> public Rectangle[] DetectMultiScale(OclImage <Gray, Byte> image) { return(DetectMultiScale(image, 0, new Size(0, 0), new Size(0, 0), 1.05, 2)); }
/// <summary> /// Convert the source image to the current image, if the size are different, the current image will be a resized version of the srcImage. /// </summary> /// <typeparam name="TSrcColor">The color type of the source image</typeparam> /// <typeparam name="TSrcDepth">The color depth of the source image</typeparam> /// <param name="srcImage">The sourceImage</param> public void ConvertFrom <TSrcColor, TSrcDepth>(OclImage <TSrcColor, TSrcDepth> srcImage) where TSrcColor : struct, IColor where TSrcDepth : new() { if (!Size.Equals(srcImage.Size)) { //if the size of the source image do not match the size of the current image using (OclImage <TSrcColor, TSrcDepth> tmp = srcImage.Resize(Size, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR)) { ConvertFrom(tmp); return; } } if (typeof(TColor) == typeof(TSrcColor)) { #region same color if (typeof(TDepth) == typeof(TSrcDepth)) //same depth { OclInvoke.Copy(srcImage.Ptr, Ptr, IntPtr.Zero); } else //different depth { if (typeof(TDepth) == typeof(Byte) && typeof(TSrcDepth) != typeof(Byte)) { double[] minVal, maxVal; Point[] minLoc, maxLoc; srcImage.MinMax(out minVal, out maxVal, out minLoc, out maxLoc); double min = minVal[0]; double max = maxVal[0]; for (int i = 1; i < minVal.Length; i++) { min = Math.Min(min, minVal[i]); max = Math.Max(max, maxVal[i]); } double scale = 1.0, shift = 0.0; if (max > 255.0 || min < 0) { scale = (max == min) ? 0.0 : 255.0 / (max - min); shift = (scale == 0) ? min : -min * scale; } OclInvoke.ConvertTo(srcImage.Ptr, Ptr, scale, shift); } else { OclInvoke.ConvertTo(srcImage.Ptr, Ptr, 1.0, 0.0); } } #endregion } else { #region different color if (typeof(TDepth) == typeof(TSrcDepth)) { //same depth ConvertColor(srcImage.Ptr, Ptr, typeof(TSrcColor), typeof(TColor), Size); } else { //different depth using (OclImage <TSrcColor, TDepth> tmp = srcImage.Convert <TSrcColor, TDepth>()) //convert depth ConvertColor(tmp.Ptr, Ptr, typeof(TSrcColor), typeof(TColor), Size); } #endregion } }
/// <summary> /// Compute the dense optical flow. /// </summary> /// <param name="frame0">Source frame</param> /// <param name="frame1">Frame to track (with the same size as <paramref name="frame0"/>)</param> /// <param name="u">Flow horizontal component (along x axis)</param> /// <param name="v">Flow vertical component (along y axis)</param> public void Dense(OclImage <Gray, byte> frame0, OclImage <Gray, byte> frame1, OclImage <Gray, float> u, OclImage <Gray, float> v) { OclInvoke.oclPyrLKOpticalFlowDense(_ptr, frame0, frame1, u, v, IntPtr.Zero); }