Ejemplo n.º 1
0
 public DebuggerProxy(GpuMat v)
 {
     _v = v;
 }
Ejemplo n.º 2
0
 /// <summary>
 /// Get the GpuMat from the input array
 /// </summary>
 /// <returns>The GpuMat</returns>
 public Cuda.GpuMat GetGpuMat()
 {
     Cuda.GpuMat m = new Cuda.GpuMat();
     CvInvoke.cveInputArrayGetGpuMat(Ptr, m);
     return(m);
 }
Ejemplo n.º 3
0
 /// <summary>
 /// Grabs, decodes and returns the next video frame.
 /// </summary>
 /// <param name="frame">The frame</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 /// <returns>If no frames has been grabbed (there are no more frames in video file), the methods return false . </returns>
 public bool NextFrame(GpuMat frame, Stream stream = null)
 {
     return(CudaInvoke.cudaVideoReaderNextFrame(_ptr, frame, stream));
 }
 /// <summary>
 /// Push a value into the standard vector
 /// </summary>
 /// <param name="value">The value to be pushed to the vector</param>
 public void Push(GpuMat value)
 {
     VectorOfGpuMatPush(_ptr, value.Ptr);
 }
Ejemplo n.º 5
0
 /// <summary>
 /// Compute the dense optical flow.
 /// </summary>
 /// <param name="frame0">Source frame</param>
 /// <param name="frame1">Frame to track (with the same size as <paramref name="frame0"/>)</param>
 /// <param name="u">Flow horizontal component (along x axis)</param>
 /// <param name="v">Flow vertical component (along y axis)</param>
 public void Dense(GpuMat frame0, GpuMat frame1, GpuMat u, GpuMat v, GpuMat error = null)
 {
     CudaInvoke.cudaPyrLKOpticalFlowDense(_ptr, frame0, frame1, u, v, error);
 }
Ejemplo n.º 6
0
 /// <summary>
 /// Calculate an optical flow for a sparse feature set.
 /// </summary>
 /// <param name="frame0">First 8-bit input image (supports both grayscale and color images).</param>
 /// <param name="frame1">Second input image of the same size and the same type as <paramref name="frame0"/></param>
 /// <param name="points0">
 /// Vector of 2D points for which the flow needs to be found. It must be one row
 /// matrix with 2 channels
 /// </param>
 /// <param name="points1">
 /// Output vector of 2D points (with single-precision two channel floating-point coordinates)
 /// containing the calculated new positions of input features in the second image.</param>
 /// <param name="status">
 /// Output status vector (CV_8UC1 type). Each element of the vector is set to 1 if the
 /// flow for the corresponding features has been found. Otherwise, it is set to 0.
 /// </param>
 /// <param name="err">
 /// Output vector (CV_32FC1 type) that contains the difference between patches around
 /// the original and moved points or min eigen value if getMinEigenVals is checked. It can be
 /// null, if not needed.
 /// </param>
 public void Sparse(GpuMat frame0, GpuMat frame1, GpuMat points0, GpuMat points1, GpuMat status, GpuMat err = null)
 {
     CudaInvoke.cudaPyrLKOpticalFlowSparse(_ptr, frame0, frame1, points0, points1, status, err);
 }
Ejemplo n.º 7
0
 /// <summary>
 /// Compute the optical flow.
 /// </summary>
 /// <param name="frame0">Source frame</param>
 /// <param name="frame1">Frame to track (with the same size as <paramref name="frame0"/>)</param>
 /// <param name="u">Flow horizontal component (along x axis)</param>
 /// <param name="v">Flow vertical component (along y axis)</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Compute(GpuMat frame0, GpuMat frame1, GpuMat u, GpuMat v, Stream stream = null)
 {
     CudaInvoke.cudaBroxOpticalFlowCompute(_ptr, frame0, frame1, u, v, stream);
 }
Ejemplo n.º 8
0
 /// <summary>
 /// Compute the optical flow.
 /// </summary>
 /// <param name="frame0">Source frame</param>
 /// <param name="frame1">Frame to track (with the same size as <paramref name="frame0"/>)</param>
 /// <param name="u">Flow horizontal component (along x axis)</param>
 /// <param name="v">Flow vertical component (along y axis)</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Compute(GpuMat frame0, GpuMat frame1, GpuMat u, GpuMat v)
 {
     CudaInvoke.cudaOpticalFlowDualTvl1Compute(_ptr, frame0, frame1, u, v);
 }
 /// <summary>
 /// Find the good features to track
 /// </summary>
 public void Detect(GpuMat image, GpuMat corners, GpuMat mask = null)
 {
     CudaInvoke.cudaCornersDetectorDetect(_ptr, image, corners, mask);
 }
Ejemplo n.º 10
0
 /// <summary>
 /// Obtain the keypoints array from GpuMat
 /// </summary>
 /// <param name="src">The keypoints obtained from DetectKeyPointsRaw</param>
 /// <param name="dst">The vector of keypoints</param>
 public void DownloadKeypoints(GpuMat src, VectorOfKeyPoint dst)
 {
     CudaInvoke.cudaFASTDownloadKeypoints(_ptr, src, dst);
 }
Ejemplo n.º 11
0
 /// <summary>
 /// Detect keypoints in the CudaImage
 /// </summary>
 /// <param name="img">The image where keypoints will be detected from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keypoints">
 /// The keypoints GpuMat that will have 1 row.
 /// keypoints.at&lt;float[6]&gt;(1, i) contains i'th keypoint
 /// format: (x, y, size, response, angle, octave)
 /// </param>
 public void DetectKeyPointsRaw(GpuMat img, GpuMat mask, GpuMat keypoints)
 {
     CudaInvoke.cudaFASTDetectorDetectKeyPoints(_ptr, img, mask, keypoints);
 }