/// <summary> /// Copy the src GpuMat to dst GpuMat asyncronously /// </summary> /// <typeparam name="TDepth">The type of depth for the GpuMat</typeparam> /// <param name="src">The source matrix</param> /// <param name="dst">The destination matrix. Must be the same size and same number of channels</param> public void Copy <TDepth>(GpuMat <TDepth> src, GpuMat <TDepth> dst) where TDepth : new() { GpuInvoke.streamEnqueueCopy(_ptr, src, dst); }
/// <summary> /// Copies scalar value to every selected element of the destination GpuMat: /// GpuMat(I)=value if mask(I)!=0 /// </summary> /// <param name="value">Fill value</param> /// <param name="mask">Operation mask, 8-bit single channel GpuMat; specifies elements of destination array to be changed. Can be null if not used.</param> /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param> public void SetTo(MCvScalar value, GpuMat <Byte> mask, Stream stream) { GpuInvoke.GpuMatSetTo(_ptr, value, mask, stream); }
/// <summary> /// Ontain a GpuMat from the keypoints array /// </summary> /// <param name="src">The keypoints array</param> /// <param name="dst">A GpuMat that represent the keypoints</param> public void UploadKeypoints(VectorOfKeyPoint src, GpuMat <float> dst) { gpuUploadKeypoints(_ptr, src, dst); }
/// <summary> /// Compute the descriptor given the image and the point location /// </summary> /// <param name="image">The image where the descriptor will be computed from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <param name="keyPoints">The keypoint where the descriptor will be computed from. The order of the keypoints might be changed unless the GPU_SURF detector is UP-RIGHT.</param> /// <returns>The image features founded on the keypoint location</returns> public GpuMat <float> ComputeDescriptorsRaw(GpuImage <Gray, Byte> image, GpuImage <Gray, byte> mask, GpuMat <float> keyPoints) { GpuMat <float> descriptors = new GpuMat <float>(keyPoints.Size.Height, DescriptorSize, 1); gpuSURFDetectorCompute(_ptr, image, mask, keyPoints, descriptors, true); return(descriptors); }
/// <summary> /// Obtain the keypoints array from GpuMat /// </summary> /// <param name="src">The keypoints obtained from DetectKeyPointsRaw</param> /// <param name="dst">The vector of keypoints</param> public void DownloadKeypoints(GpuMat <float> src, VectorOfKeyPoint dst) { gpuDownloadKeypoints(_ptr, src, dst); }
/* * /// <summary> * /// Add the model descriptors * /// </summary> * /// <param name="modelDescriptors">The model discriptors</param> * public void Add(Matrix<Byte> modelDescriptors) * { * if (!(_distanceType == DistanceType.HammingDist)) * throw new ArgumentException("Hamming distance type requires model descriptor to be Matrix<Byte>"); * gpuBruteForceMatcherAdd(_ptr, modelDescriptors); * } * * /// <summary> * /// Add the model descriptors * /// </summary> * /// <param name="modelDescriptors">The model discriptors</param> * public void Add(Matrix<float> modelDescriptors) * { * if (!(_distanceType == DistanceType.L2 || _distanceType == DistanceType.L1)) * throw new ArgumentException("L1 / L2 distance type requires model descriptor to be Matrix<float>"); * gpuBruteForceMatcherAdd(_ptr, modelDescriptors); * }*/ /// <summary> /// For L1 and L2 distance type, find the k nearest neighbour using the brute force matcher. /// </summary> /// <param name="queryDescriptors">The query descriptors</param> /// <param name="modelDescriptors">The model descriptors</param> /// <param name="modelIdx">The model index. A n x <paramref name="k"/> matrix where n = <paramref name="queryDescriptors"/>.Cols</param> /// <param name="distance">The matrix where the distance valus is stored. A n x <paramref name="k"/> matrix where n = <paramref name="queryDescriptors"/>.Size.Height</param> /// <param name="k">The number of nearest neighbours to be searched</param> /// <param name="mask">The mask</param> public void KnnMatch(GpuMat <float> queryDescriptors, GpuMat <float> modelDescriptors, GpuMat <int> modelIdx, GpuMat <float> distance, int k, GpuMat <Byte> mask) { gpuBruteForceMatcherKnnMatch(_ptr, queryDescriptors, modelDescriptors, modelIdx, distance, k, mask); }
/// <summary> /// Create a GpuMat from the specific region of <paramref name="mat"/>. The data is shared between the two GpuMat /// </summary> /// <param name="mat">The matrix where the region is extracted from</param> /// <param name="colRange">The column range. Use MCvSlice.WholeSeq for all columns.</param> /// <param name="rowRange">The row range. Use MCvSlice.WholeSeq for all rows.</param> public GpuMat(GpuMat <TDepth> mat, MCvSlice rowRange, MCvSlice colRange) { _ptr = GpuInvoke.GpuMatGetRegion(mat, rowRange, colRange); }
/// <summary> /// Obtain the keypoints array from GpuMat /// </summary> /// <param name="src">The keypoints obtained from DetectKeyPointsRaw</param> /// <param name="dst">The vector of keypoints</param> public void DownloadKeypoints(GpuMat <float> src, VectorOfKeyPoint dst) { GpuInvoke.gpuSURFDownloadKeypoints(_ptr, src, dst); }
/// <summary> /// Ontain a GpuMat from the keypoints array /// </summary> /// <param name="src">The keypoints array</param> /// <param name="dst">A GpuMat that represent the keypoints</param> public void UploadKeypoints(VectorOfKeyPoint src, GpuMat <float> dst) { GpuInvoke.gpuSURFUploadKeypoints(_ptr, src, dst); }