Exemple #1
0
 /// <summary>
 /// Create a CudaImage from the specific region of <paramref name="image"/>. The data is shared between the two CudaImage
 /// </summary>
 /// <param name="image">The CudaImage where the region is extracted from</param>
 /// <param name="colRange">The column range. Use MCvSlice.WholeSeq for all columns.</param>
 /// <param name="rowRange">The row range. Use MCvSlice.WholeSeq for all rows.</param>
 public CudaImage(CudaImage <TColor, TDepth> image, Emgu.CV.Structure.Range rowRange, Emgu.CV.Structure.Range colRange)
     : this(CudaInvoke.GetRegion(image, ref rowRange, ref colRange), true)
 {
 }
Exemple #2
0
 public void Create(int rows, int cols, DepthType depthType, int channels)
 {
     CudaInvoke.gpuMatCreate(Ptr, rows, cols, CvInvoke.MakeType(depthType, channels));
 }
Exemple #3
0
 /// <summary>
 /// Create a BoxMax filter.
 /// </summary>
 /// <param name="ksize">Size of the kernel</param>
 /// <param name="anchor">The center of the kernel. User (-1, -1) for the default kernel center.</param>
 /// <param name="borderType">The border type.</param>
 /// <param name="borderValue">The border value.</param>
 /// <param name="srcDepth">The depth type of the source image</param>
 /// <param name="srcChannels">The number of channels of the source image</param>
 public CudaBoxMaxFilter(DepthType srcDepth, int srcChannels, Size ksize, Point anchor, CvEnum.BorderType borderType = BorderType.Default, MCvScalar borderValue = new MCvScalar())
 {
     _ptr = CudaInvoke.cudaCreateBoxMaxFilter(CvInvoke.MakeType(srcDepth, srcChannels), ref ksize, ref anchor, borderType, ref borderValue, ref _sharedPtr);
 }
Exemple #4
0
 /// <summary>
 /// Copies scalar value to every selected element of the destination GpuMat:
 /// arr(I)=value if mask(I)!=0
 /// </summary>
 /// <param name="value">Fill value</param>
 /// <param name="mask">Operation mask, 8-bit single channel GpuMat; specifies elements of destination GpuMat to be changed. Can be IntPtr.Zero if not used</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or IntPtr.Zero to call the function synchronously (blocking).</param>
 public void SetTo(MCvScalar value, IInputArray mask = null, Stream stream = null)
 {
     using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
         CudaInvoke.gpuMatSetTo(Ptr, ref value, iaMask, stream);
 }
Exemple #5
0
 /// <summary>
 /// This function has several different purposes and thus has several synonyms. It copies one GpuMat to another with optional scaling, which is performed first, and/or optional type conversion, performed after:
 /// dst(I)=src(I)*scale + (shift,shift,...)
 /// All the channels of multi-channel GpuMats are processed independently.
 /// The type conversion is done with rounding and saturation, that is if a result of scaling + conversion can not be represented exactly by a value of destination GpuMat element type, it is set to the nearest representable value on the real axis.
 /// In case of scale=1, shift=0 no prescaling is done. This is a specially optimized case and it has the appropriate convertTo synonym.
 /// </summary>
 /// <param name="dst">Destination GpuMat</param>
 /// <param name="scale">Scale factor</param>
 /// <param name="shift">Value added to the scaled source GpuMat elements</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or IntPtr.Zero to call the function synchronously (blocking).</param>
 public void ConvertTo(IOutputArray dst, CvEnum.DepthType rtype, double scale = 1.0, double shift = 0, Stream stream = null)
 {
     using (OutputArray oaDst = dst.GetOutputArray())
         CudaInvoke.gpuMatConvertTo(Ptr, oaDst, rtype, scale, shift, stream);
 }
Exemple #6
0
 /// <summary>
 /// Pointer to the OutputArray
 /// </summary>
 public OutputArray GetOutputArray()
 {
     return(new OutputArray(CudaInvoke.cveOutputArrayFromGpuMat(_ptr)));
 }
Exemple #7
0
 /// <summary>
 /// Pefroms blocking upload data to GpuMat
 /// </summary>
 /// <param name="arr">The CvArray to be uploaded to GpuMat</param>
 public void Upload(IInputArray arr)
 {
     using (InputArray iaArr = arr.GetInputArray())
         CudaInvoke.gpuMatUpload(_ptr, iaArr);
 }
 /// <summary>
 /// Create a median filter
 /// </summary>
 /// <param name="srcDepth">Type of of source image. Only 8U images are supported for now.</param>
 /// <param name="srcChannels">Type of of source image. Only single channel images are supported for now.</param>
 /// <param name="windowSize">Size of the kernerl used for the filtering. Uses a (windowSize x windowSize) filter.</param>
 /// <param name="partition">Specifies the parallel granularity of the workload. This parameter should be used GPU experts when optimizing performance.</param>
 public MedianFilter(DepthType srcDepth, int srcChannels, int windowSize, int partition = 128)
 {
     _ptr = CudaInvoke.cudaCreateMedianFilter(CvInvoke.MakeType(srcDepth, srcChannels), windowSize, partition, ref _sharedPtr);
 }
Exemple #9
0
 /// <summary>
 /// Create the Contrast Limited Adaptive Histogram Equalization
 /// </summary>
 /// <param name="clipLimit">Threshold for contrast limiting. Use 40.0 for default</param>
 /// <param name="tileGridSize">Size of grid for histogram equalization. Input image will be divided into equally sized rectangular tiles. This parameter defines the number of tiles in row and column. Use (8, 8) for default</param>
 public CudaClahe(double clipLimit, Size tileGridSize)
 {
     _ptr = CudaInvoke.cudaCLAHECreate(clipLimit, ref tileGridSize);
 }
 /// <summary>
 /// Create a CudaBruteForceMatcher using the specific distance type
 /// </summary>
 /// <param name="distanceType">The distance type</param>
 public CudaBFMatcher(DistanceType distanceType)
 {
     _ptr = CudaInvoke.cveCudaDescriptorMatcherCreateBFMatcher(distanceType, ref _algorithmPtr, ref _sharedPtr);
 }
Exemple #11
0
 /// <summary>
 /// Create a Morphology filter.
 /// </summary>
 /// <param name="op">Type of morphological operation</param>
 /// <param name="kernel">2D 8-bit structuring element for the morphological operation.</param>
 /// <param name="anchor">Anchor position within the structuring element. Negative values mean that the anchor is at the center.</param>
 /// <param name="iterations">Number of times erosion and dilation to be applied.</param>
 /// <param name="srcDepth">The depth type of the source image</param>
 /// <param name="srcChannels">The number of channels in the source image</param>
 public CudaMorphologyFilter(CvEnum.MorphOp op, DepthType srcDepth, int srcChannels, IInputArray kernel, Point anchor, int iterations)
 {
     using (InputArray iaKernel = kernel.GetInputArray())
         _ptr = CudaInvoke.cudaCreateMorphologyFilter(op, CvInvoke.MakeType(srcDepth, srcChannels), iaKernel, ref anchor, iterations);
 }
 /// <summary>
 /// Trains a descriptor matcher.
 /// </summary>
 public void Train()
 {
     CudaInvoke.cveCudaDescriptorMatcherTrain(_ptr);
 }
 /// <summary>
 /// Clear the matcher
 /// </summary>
 public void Clear()
 {
     CudaInvoke.cveCudaDescriptorMatcherClear(_ptr);
 }
 /// <summary>
 /// Add the model descriptors
 /// </summary>
 /// <param name="modelDescriptors">The model descriptors</param>
 public void Add(IInputArray modelDescriptors)
 {
     using (InputArray iaModelDescriptors = modelDescriptors.GetInputArray())
         CudaInvoke.cveCudaDescriptorMatcherAdd(_ptr, iaModelDescriptors);
 }
Exemple #15
0
 /// <summary>
 /// Create a Background/Foreground Segmentation model
 /// </summary>
 public CudaBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8)
 {
     _ptr = CudaInvoke.cudaBackgroundSubtractorGMGCreate(initializationFrames, decisionThreshold, ref _sharedPtr);
 }
Exemple #16
0
 /// <summary>
 /// Equalizes the histogram of a grayscale image using Contrast Limited Adaptive Histogram Equalization.
 /// </summary>
 /// <param name="source">Source image</param>
 /// <param name="dst">Destination image</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Apply(IInputArray source, IOutputArray dst, Stream stream = null)
 {
     using (InputArray iaSource = source.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             CudaInvoke.cudaCLAHEApply(_ptr, iaSource, oaDst, stream);
 }
Exemple #17
0
 /// <summary>
 /// Updates the background model
 /// </summary>
 /// <param name="frame">Next video frame.</param>
 /// <param name="learningRate">The learning rate, use -1.0f for default value.</param>
 /// <param name="foregroundMask">The output foreground mask as an 8-bit binary image.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public void Apply(IInputArray frame, IOutputArray foregroundMask, double learningRate = -1, Stream stream = null)
 {
     using (InputArray iaFrame = frame.GetInputArray())
         using (OutputArray oaForegroundMask = foregroundMask.GetOutputArray())
             CudaInvoke.cudaBackgroundSubtractorGMGApply(_ptr, iaFrame, oaForegroundMask, learningRate, stream);
 }
Exemple #18
0
 public CudaVideoReader(String fileName)
 {
     using (CvString s = new CvString(fileName))
         _ptr = CudaInvoke.cudaVideoReaderCreate(s, ref _sharedPtr);
 }
Exemple #19
0
 /// <summary>
 /// Pointer to the InputOutputArray
 /// </summary>
 public InputOutputArray GetInputOutputArray()
 {
     return(new InputOutputArray(CudaInvoke.cveInputOutputArrayFromGpuMat(_ptr)));
 }
Exemple #20
0
 /// <summary>
 /// Indicates if the device has the specific feature
 /// </summary>
 /// <param name="feature">The device feature</param>
 /// <returns>True if the feature is supported</returns>
 public bool Supports(GpuFeature feature)
 {
     return(CudaInvoke.cudaDeviceInfoSupports(_ptr, feature));
 }
Exemple #21
0
 /// <summary>
 /// Downloads data from device to host memory. Blocking calls
 /// </summary>
 /// <param name="arr">The destination CvArray where the GpuMat data will be downloaded to.</param>
 public void Download(IOutputArray arr)
 {
     //Debug.Assert(arr.Size.Equals(Size), "Destination CvArray size does not match source GpuMat size");
     using (OutputArray oaArr = arr.GetOutputArray())
         CudaInvoke.gpuMatDownload(_ptr, oaArr);
 }
Exemple #22
0
 /// <summary>
 /// Release the unmanaged resource related to the GpuDevice
 /// </summary>
 protected override void DisposeObject()
 {
     CudaInvoke.cudaDeviceInfoRelease(ref _ptr);
 }
Exemple #23
0
 /// <summary>
 /// Copy the source GpuMat to destination GpuMat, using an optional mask.
 /// </summary>
 /// <param name="dst">The output array to be copied to</param>
 /// <param name="mask">The optional mask, use IntPtr.Zero if not needed.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or IntPtr.Zero to call the function synchronously (blocking).</param>
 public void CopyTo(IOutputArray dst, IInputArray mask = null, Stream stream = null)
 {
     using (OutputArray oaDst = dst.GetOutputArray())
         using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             CudaInvoke.gpuMatCopyTo(Ptr, oaDst, iaMask, stream);
 }
Exemple #24
0
 /// <summary>
 /// Query the information of the gpu device that is currently in use.
 /// </summary>
 public CudaDeviceInfo()
     : this(CudaInvoke.GetDevice())
 {
 }
Exemple #25
0
 /// <summary>
 /// Create an empty GpuMat
 /// </summary>
 public GpuMat()
     : this(CudaInvoke.gpuMatCreateDefault(), true)
 {
 }
Exemple #26
0
 /// <summary>
 /// Query the information of the cuda device with the specific id.
 /// </summary>
 /// <param name="deviceId">The device id</param>
 public CudaDeviceInfo(int deviceId)
 {
     _ptr      = CudaInvoke.cudaDeviceInfoCreate(ref deviceId);
     _deviceID = deviceId;
 }
Exemple #27
0
 /// <summary>
 /// Create a GpuMat from the specific region of <paramref name="mat"/>. The data is shared between the two GpuMat
 /// </summary>
 /// <param name="mat">The matrix where the region is extracted from</param>
 /// <param name="colRange">The column range. Use MCvSlice.WholeSeq for all columns.</param>
 /// <param name="rowRange">The row range. Use MCvSlice.WholeSeq for all rows.</param>
 public GpuMat(GpuMat mat, MCvSlice rowRange, MCvSlice colRange)
     : this(CudaInvoke.GetRegion(mat, ref rowRange, ref colRange), true)
 {
 }
 /// <summary>
 /// Updates the background model
 /// </summary>
 /// <param name="frame">Next video frame.</param>
 /// <param name="learningRate">The learning rate, use -1.0f for default value.</param>
 /// <param name="forgroundMask">Output the current forground mask</param>
 public void Apply(IInputArray frame, IOutputArray forgroundMask, double learningRate = -1.0)
 {
     using (InputArray iaFrame = frame.GetInputArray())
         using (OutputArray oaForgroundMask = forgroundMask.GetOutputArray())
             CudaInvoke.cudaBackgroundSubtractorFGDApply(_ptr, iaFrame, oaForgroundMask, learningRate);
 }
Exemple #29
0
 /// <summary>
 /// Create a new Cuda Stream
 /// </summary>
 public Stream()
 {
     _ptr = CudaInvoke.streamCreate();
 }
Exemple #30
0
 /// <summary>
 /// Returns a CudaImage corresponding to a specified rectangle of the current CudaImage. The data is shared with the current matrix. In other words, it allows the user to treat a rectangular part of input array as a stand-alone array.
 /// </summary>
 /// <param name="region">Zero-based coordinates of the rectangle of interest.</param>
 /// <returns>A CudaImage that represent the region of the current CudaImage.</returns>
 /// <remarks>The parent CudaImage should never be released before the returned CudaImage that represent the subregion</remarks>
 public new CudaImage <TColor, TDepth> GetSubRect(Rectangle region)
 {
     return(new CudaImage <TColor, TDepth>(CudaInvoke.GetSubRect(this, ref region), true));
 }