예제 #1
0
파일: Net.cs 프로젝트: neutmute/emgucv
 /// <summary>
 /// Returns the layer output blob.
 /// </summary>
 /// <param name="outputName">the descriptor of the returning layer output blob.</param>
 /// <returns>The layer output blob.</returns>
 public Blob GetBlob(String outputName)
 {
    using (CvString outputNameStr = new CvString(outputName))
    {
       return new Blob(ContribInvoke.cveDnnNetGetBlob(_ptr, outputNameStr));
    }
 }
예제 #2
0
 public CudaVideoWriter(String fileName, Size frameSize, double fps, SurfaceFormat format = SurfaceFormat.BGR)
 {
    using (CvString s = new CvString(fileName))
    {
       _ptr = CudaInvoke.cudaVideoWriterCreate(s, ref frameSize, fps, format);
    }
 }
예제 #3
0
      public void TestOclKernel()
      {
         if (CvInvoke.HaveOpenCL && CvInvoke.UseOpenCL)
         {

            Ocl.Device defaultDevice = Ocl.Device.Default;

            Mat img = EmguAssert.LoadMat("lena.jpg");
            Mat imgGray = new Mat();
            CvInvoke.CvtColor(img, imgGray, ColorConversion.Bgr2Gray);
            Mat imgFloat = new Mat();
            imgGray.ConvertTo(imgFloat, DepthType.Cv32F, 1.0/255);
            UMat umat = imgFloat.GetUMat(AccessType.Read, UMat.Usage.AllocateDeviceMemory);
            UMat umatDst = new UMat();
            umatDst.Create(umat.Rows, umat.Cols, DepthType.Cv32F, umat.NumberOfChannels, UMat.Usage.AllocateDeviceMemory);
            
            String buildOpts = String.Format("-D dstT={0}", Ocl.OclInvoke.TypeToString(umat.Depth));
    
            String sourceStr = @"
__constant sampler_t samplerLN = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_LINEAR;
__kernel void shift(const image2d_t src, float shift_x, float shift_y, __global uchar* dst, int dst_step, int dst_offset, int dst_rows, int dst_cols)
{
   int x = get_global_id(0);
   int y = get_global_id(1);
   if (x >= dst_cols) return;
   int dst_index = mad24(y, dst_step, mad24(x, (int)sizeof(dstT), dst_offset));
   __global dstT *dstf = (__global dstT *)(dst + dst_index);
   float2 coord = (float2)((float)x+0.5f+shift_x, (float)y+0.5f+shift_y);
   dstf[0] = (dstT)read_imagef(src, samplerLN, coord).x;
}";

            using (CvString errorMsg = new CvString())
            using (Ocl.ProgramSource ps = new Ocl.ProgramSource(sourceStr))
            using (Ocl.Kernel kernel = new Ocl.Kernel())
            using (Ocl.Image2D image2d = new Ocl.Image2D(umat))
            using (Ocl.KernelArg ka = new Ocl.KernelArg(Ocl.KernelArg.Flags.ReadWrite, umatDst))
            {
               float shiftX = 100.5f;
               float shiftY = -50.0f;

               bool success = kernel.Create("shift", ps, buildOpts, errorMsg);
               EmguAssert.IsTrue(success, errorMsg.ToString());
               int idx = 0;
               idx = kernel.Set(idx, image2d);
               idx = kernel.Set(idx, ref shiftX);
               idx = kernel.Set(idx, ref shiftY);
               idx = kernel.Set(idx, ka);
               IntPtr[] globalThreads = new IntPtr[] {new IntPtr(umat.Cols), new IntPtr(umat.Rows), new IntPtr(1) };
               success = kernel.Run(globalThreads, null, true);
               EmguAssert.IsTrue(success, "Failed to run the kernel");
               using (Mat matDst = umatDst.GetMat(AccessType.Read))
               using (Mat saveMat = new Mat())
               {
                  matDst.ConvertTo(saveMat, DepthType.Cv8U, 255.0);
                  saveMat.Save("tmp.jpg");
               }
            }
         }
      }
예제 #4
0
파일: Importer.cs 프로젝트: neutmute/emgucv
 /// <summary>
 /// Creates the importer of Caffe framework network.
 /// </summary>
 /// <param name="prototxt">path to the .prototxt file with text description of the network architecture.</param>
 /// <param name="caffeModel">path to the .caffemodel file with learned network.</param>
 /// <returns>The created importer, NULL in failure cases.</returns>
 public static Importer CreateCaffeImporter(String prototxt, String caffeModel)
 {
    using (CvString prototxtStr = new CvString(prototxt))
    using (CvString caffeModelStr = new CvString(caffeModel))
    {
       IntPtr result = ContribInvoke.cveDnnCreateCaffeImporter(prototxtStr, caffeModelStr);
       return result == IntPtr.Zero ? null : new Importer(result);
    }
 }
예제 #5
0
        /// <summary>
        /// Creates blob from .pb file.
        /// </summary>
        /// <param name="path">Path to the .pb file with input tensor.</param>
        /// <returns>The blob</returns>
        public static Mat ReadTensorFromONNX(String path)
        {
            Mat m = new Mat();

            using (CvString csPath = new CvString(path))
            {
                cveReadTensorFromONNX(csPath, m);
            }
            return(m);
        }
예제 #6
0
 /// <summary>
 ///
 /// </summary>
 /// <param name="model">Binary file contains trained weights.</param>
 /// <param name="config">Text file contains network configuration.</param>
 public ClassificationModel(String model, String config = null)
 {
     using (CvString csModel = new CvString(model))
         using (CvString csConfig = new CvString(config))
         {
             _ptr = DnnInvoke.cveDnnClassificationModelCreate1(
                 csModel,
                 csConfig,
                 ref _model);
         }
 }
예제 #7
0
 /// <summary>
 /// Create Text Recognition model from deep learning network
 /// </summary>
 /// <param name="model">Binary file contains trained weights</param>
 /// <param name="config">Text file contains network configuration</param>
 /// <remarks>Set DecodeType and Vocabulary after constructor to initialize the decoding method.</remarks>
 public TextRecognitionModel(String model, String config = null)
 {
     using (CvString csModel = new CvString(model))
         using (CvString csConfig = new CvString(config))
         {
             _ptr = DnnInvoke.cveDnnTextRecognitionModelCreate1(
                 csModel,
                 csConfig,
                 ref _model);
         }
 }
예제 #8
0
 /// <summary>
 /// Create a new keypoints model
 /// </summary>
 /// <param name="model">Binary file contains trained weights.</param>
 /// <param name="config">Text file contains network configuration.</param>
 public KeypointsModel(String model, String config = null)
 {
     using (CvString csModel = new CvString(model))
         using (CvString csConfig = new CvString(config))
         {
             _ptr = DnnInvoke.cveDnnKeypointsModelCreate1(
                 csModel,
                 csConfig,
                 ref _model);
         }
 }
예제 #9
0
파일: ERFilter.cs 프로젝트: vinchu/emgucv
 /// <summary>
 /// Create an Extremal Region Filter for the 1st stage classifier of N&amp;M algorithm
 /// </summary>
 /// <param name="classifierFileName">The file name of the classifier</param>
 /// <param name="thresholdDelta">Threshold step in subsequent thresholds when extracting the component tree.</param>
 /// <param name="minArea">The minimum area (% of image size) allowed for retreived ER’s.</param>
 /// <param name="maxArea">The maximum area (% of image size) allowed for retreived ER’s.</param>
 /// <param name="minProbability">The minimum probability P(er|character) allowed for retreived ER’s.</param>
 /// <param name="nonMaxSuppression">Whenever non-maximum suppression is done over the branch probabilities.</param>
 /// <param name="minProbabilityDiff">The minimum probability difference between local maxima and local minima ERs.</param>
 public ERFilterNM1(
     String classifierFileName,
     int thresholdDelta       = 1,
     float minArea            = 0.00025f,
     float maxArea            = 0.13f,
     float minProbability     = 0.4f,
     bool nonMaxSuppression   = true,
     float minProbabilityDiff = 0.1f)
 {
     using (CvString s = new CvString(classifierFileName))
         _ptr = ContribInvoke.CvERFilterNM1Create(s, thresholdDelta, minArea, maxArea, minProbability, nonMaxSuppression, minProbabilityDiff);
 }
예제 #10
0
 /// <summary>
 /// Create text detection model from network represented in one of the supported formats.
 /// </summary>
 /// <param name="model">Binary file contains trained weights.</param>
 /// <param name="config">Text file contains network configuration.</param>
 public TextDetectionModel_DB(String model, String config = null)
 {
     using (CvString csModel = new CvString(model))
         using (CvString csConfig = new CvString(config))
         {
             _ptr = DnnInvoke.cveDnnTextDetectionModelDbCreate1(
                 csModel,
                 csConfig,
                 ref _textDetectionModel,
                 ref _model);
         }
 }
예제 #11
0
 /// <summary>
 /// Create the standard vector of CvString
 /// </summary>
 public VectorOfCvString(MCvERStat[][] values)
     : this()
 {
     using (CvString v = new CvString())
     {
         for (int i = 0; i < values.Length; i++)
         {
             v.Push(values[i]);
             Push(v);
             v.Clear();
         }
     }
 }
예제 #12
0
 /// <summary>
 /// Create an object which calculates quality.
 /// </summary>
 /// <param name="modelFilePath">Contains a path to the BRISQUE model data. If empty, attempts to load from ${OPENCV_DIR}/testdata/contrib/quality/brisque_model_live.yml</param>
 /// <param name="rangeFilePath">contains a path to the BRISQUE range data. If empty, attempts to load from ${OPENCV_DIR}/testdata/contrib/quality/brisque_range_live.yml</param>
 public QualityBRISQUE(
     String modelFilePath = "",
     String rangeFilePath = "")
 {
     using (CvString csModelFilePath = new CvString(modelFilePath))
         using (CvString csRangeFilePath = new CvString(rangeFilePath))
             _ptr = QualityInvoke.cveQualityBRISQUECreate(
                 csModelFilePath,
                 csRangeFilePath,
                 ref _qualityBasePtr,
                 ref _algorithmPtr,
                 ref _sharedPtr);
 }
예제 #13
0
        public Size GetTextSize(
            String text,
            int fontHeight, int thickness,
            ref int baseLine)
        {
            Size s = new Size();

            using (CvString csText = new CvString(text))
            {
                FreetypeInvoke.cveFreeType2GetTextSize(_ptr, csText, fontHeight, thickness, ref baseLine, ref s);
            }

            return(s);
        }
예제 #14
0
        /// <summary>
        /// Initialize the OCR engine using the specific dataPath and language name.
        /// </summary>
        /// <param name="dataPath">
        /// The datapath must be the name of the parent directory of tessdata and
        /// must end in / . Any name after the last / will be stripped.
        /// </param>
        /// <param name="language">
        /// The language is (usually) an ISO 639-3 string or NULL will default to eng.
        /// It is entirely safe (and eventually will be efficient too) to call
        /// Init multiple times on the same instance to change language, or just
        /// to reset the classifier.
        /// The language may be a string of the form [~]%lt;lang&gt;[+[~]&lt;lang&gt;]* indicating
        /// that multiple languages are to be loaded. Eg hin+eng will load Hindi and
        /// English. Languages may specify internally that they want to be loaded
        /// with one or more other languages, so the ~ sign is available to override
        /// that. Eg if hin were set to load eng by default, then hin+~eng would force
        /// loading only hin. The number of loaded languages is limited only by
        /// memory, with the caveat that loading additional languages will impact
        /// both speed and accuracy, as there is more work to do to decide on the
        /// applicable language, and there is more chance of hallucinating incorrect
        /// words.
        /// </param>
        /// <param name="mode">OCR engine mode</param>
        public void Init(String dataPath, String language, OcrEngineMode mode)
        {
            /*
             #if !NETFX_CORE
             *          if (!(dataPath.Length > 0 && dataPath.Substring(dataPath.Length - 1).ToCharArray()[0] == System.IO.Path.DirectorySeparatorChar))
             *          {  //if the data path end in slash
             *              int lastSlash = dataPath.LastIndexOf(System.IO.Path.DirectorySeparatorChar);
             *              if (lastSlash != -1)
             *              {
             *                  //there is a directory separator, get the path up to the separator, the same way tesseract-ocr calculate the folder
             *                  dataPath = dataPath.Substring(0, lastSlash + 1);
             *              }
             *          }
             #endif
             */
            /*
             * if (!System.IO.Directory.Exists(System.IO.Path.Combine(dataPath, "tessdata")))
             * {
             * throw new ArgumentException(String.Format("The directory {0} doesn't exist!", Path.Combine(dataPath, "tessdata")));
             * }
             *
             * //make sure the tesseract file exist.
             * if (mode == OcrEngineMode.OEM_TESSERACT_CUBE_COMBINED || mode == OcrEngineMode.OEM_TESSERACT_ONLY)
             * {
             * if (!System.IO.File.Exists(System.IO.Path.Combine(dataPath, "tessdata", language + ".traineddata")))
             *    throw new ArgumentException(String.Format("The required tesseract file {0}.traineddata doesn't exist", System.IO.Path.Combine(dataPath, language)));
             * }*/

            /*if (!IsEngineModeSupported(mode))
             * throw new ArgumentException(String.Format("The Ocr engine mode {0} is not supported in tesseract v{1}", mode, Version));*/


            using (CvString csDataPath = new CvString(dataPath))
                using (CvString csLanguage = new CvString(language))
                {
                    int initResult = OcrInvoke.TessBaseAPIInit(_ptr, csDataPath, csLanguage, mode);
                    if (initResult != 0)
                    {
#if !NETFX_CORE
                        if (dataPath.Equals(String.Empty))
                        {
                            dataPath = Path.GetFullPath(".");
                        }
#endif
                        throw new ArgumentException(
                                  String.Format("Unable to create ocr model using Path '{0}', language '{1}' and OcrEngineMode '{2}'.", dataPath,
                                                language, mode));
                    }
                }
        }
예제 #15
0
        /// <summary>
        /// Convert the standard vector to arrays of int
        /// </summary>
        /// <returns>Arrays of int</returns>
        public MCvERStat[][] ToArrayOfArray()
        {
            int size = Size;

            MCvERStat[][] res = new MCvERStat[size][];
            for (int i = 0; i < size; i++)
            {
                using (CvString v = this[i])
                {
                    res[i] = v.ToArray();
                }
            }
            return(res);
        }
예제 #16
0
파일: Detector.cs 프로젝트: zanker99/emgucv
 /// <summary>
 /// Add new object template.
 /// </summary>
 /// <param name="sources">Source images, one for each modality.</param>
 /// <param name="classId">Object class ID.</param>
 /// <param name="objectMask">Mask separating object from background.</param>
 /// <param name="boundingBox">Return bounding box of the extracted features.</param>
 /// <returns>Template ID, or -1 if failed to extract a valid template.</returns>
 public int AddTemplate(
     VectorOfMat sources,
     String classId,
     Mat objectMask,
     ref Rectangle boundingBox)
 {
     using (CvString csClassId = new CvString(classId))
     {
         return(LinemodInvoke.cveLinemodDetectorAddTemplate(
                    _ptr,
                    sources,
                    csClassId,
                    objectMask,
                    ref boundingBox));
     }
 }
예제 #17
0
      public void TestOclKernel()
      {
         if (CvInvoke.HaveOpenCL && CvInvoke.UseOpenCL)
         {

            OclDevice defaultDevice = OclDevice.Default;

            UMat umat = new UMat(256, 256, DepthType.Cv8U, 1);
            umat.SetTo(new MCvScalar(8));

            int rowsPerWI = 1;
            int cn = 1;
            
            String buildOpts = String.Format("-D rowsPerWI={0} -D cn={1} -D srcT1_C1=uchar -DdstT_C1=uchar", rowsPerWI, cn);

            String sourceStr = @"
__kernel void mytest(__global const uchar * srcptr1, int srcstep1, int srcoffset1, 
                 __global uchar *dstptr, int dststep, int dstoffset,
                 int rows, int cols )
{
               int x = get_global_id(0);
               int y0 = get_global_id(1) * rowsPerWI;

               if (x < cols)
               {
                  int src1_index = mad24(y0, srcstep1, mad24(x, (int)sizeof(srcT1_C1) * cn, srcoffset1));
                  int dst_index = mad24(y0, dststep, mad24(x, (int)sizeof(dstT_C1) * cn, dstoffset));

                  for (int y = y0, y1 = min(rows, y0 + rowsPerWI); y < y1; ++y, src1_index += srcstep1, dst_index += dststep)
                  {
                     *(__global uchar*) (dstptr + dst_index)= *(srcptr1 + src1_index);
                  }
               }
            }";

            

            using (CvString errorMsg = new CvString())
            using (OclProgramSource ps = new OclProgramSource(sourceStr))
            using (OclKernel kernel = new OclKernel())
            {
               bool success = kernel.Create("mytest", ps, buildOpts, errorMsg);
               bool empty = kernel.Empty;
            }
         }
      }
예제 #18
0
 /// <summary>
 /// Renders the specified text string in the image. Symbols that cannot be rendered using the specified font are replaced by "Tofu" or non-drawn.
 /// </summary>
 /// <param name="img">Image.</param>
 /// <param name="text">Text string to be drawn.</param>
 /// <param name="org">Bottom-left/Top-left corner of the text string in the image.</param>
 /// <param name="fontHeight">Drawing font size by pixel unit.</param>
 /// <param name="color">Text color.</param>
 /// <param name="thickness">Thickness of the lines used to draw a text when negative, the glyph is filled. Otherwise, the glyph is drawn with this thickness.</param>
 /// <param name="lineType">Line type</param>
 /// <param name="bottomLeftOrigin">When true, the image data origin is at the bottom-left corner. Otherwise, it is at the top-left corner.</param>
 public void PutText(
     IInputOutputArray img,
     String text,
     Point org,
     int fontHeight,
     MCvScalar color,
     int thickness,
     LineType lineType,
     bool bottomLeftOrigin
     )
 {
     using (InputOutputArray ioaImg = img.GetInputOutputArray())
         using (CvString csText = new CvString(text))
         {
             FreetypeInvoke.cveFreeType2PutText(_ptr, ioaImg, csText, ref org, fontHeight, ref color, thickness, lineType, bottomLeftOrigin);
         }
 }
예제 #19
0
        public void TestOclKernel()
        {
            if (CvInvoke.HaveOpenCL && CvInvoke.UseOpenCL)
            {
                OclDevice defaultDevice = OclDevice.Default;

                UMat umat = new UMat(256, 256, DepthType.Cv8U, 1);
                umat.SetTo(new MCvScalar(8));

                int rowsPerWI = 1;
                int cn        = 1;

                String buildOpts = String.Format("-D rowsPerWI={0} -D cn={1} -D srcT1_C1=uchar -DdstT_C1=uchar", rowsPerWI, cn);

                String sourceStr = @"
__kernel void mytest(__global const uchar * srcptr1, int srcstep1, int srcoffset1, 
                 __global uchar *dstptr, int dststep, int dstoffset,
                 int rows, int cols )
{
               int x = get_global_id(0);
               int y0 = get_global_id(1) * rowsPerWI;

               if (x < cols)
               {
                  int src1_index = mad24(y0, srcstep1, mad24(x, (int)sizeof(srcT1_C1) * cn, srcoffset1));
                  int dst_index = mad24(y0, dststep, mad24(x, (int)sizeof(dstT_C1) * cn, dstoffset));

                  for (int y = y0, y1 = min(rows, y0 + rowsPerWI); y < y1; ++y, src1_index += srcstep1, dst_index += dststep)
                  {
                     *(__global uchar*) (dstptr + dst_index)= *(srcptr1 + src1_index);
                  }
               }
            }";



                using (CvString errorMsg = new CvString())
                    using (OclProgramSource ps = new OclProgramSource(sourceStr))
                        using (OclKernel kernel = new OclKernel())
                        {
                            bool success = kernel.Create("mytest", ps, buildOpts, errorMsg);
                            bool empty   = kernel.Empty;
                        }
            }
        }
예제 #20
0
 /// <summary>
 /// Creates an instance of the TextDetectorCNN class using the provided parameters.
 /// </summary>
 /// <param name="modelArchFilename">The relative or absolute path to the prototxt file describing the classifiers architecture.</param>
 /// <param name="modelWeightsFilename">The relative or absolute path to the file containing the pretrained weights of the model in caffe-binary form.</param>
 /// <param name="detectionSizes">A list of sizes for multi-scale detection. The values[(300,300),(700,500),(700,300),(700,700),(1600,1600)] are recommended to achieve the best quality.</param>
 public TextDetectorCNN(String modelArchFilename, String modelWeightsFilename, Size[] detectionSizes = null)
 {
     using (CvString csModelArchFilename = new CvString(modelArchFilename))
         using (CvString csModelWeightsFilename = new CvString(modelWeightsFilename))
         {
             if (detectionSizes == null)
             {
                 _ptr = TextInvoke.cveTextDetectorCNNCreate(csModelArchFilename, csModelWeightsFilename,
                                                            ref _sharedPtr);
             }
             else
             {
                 using (VectorOfSize vs = new VectorOfSize(detectionSizes))
                     _ptr = TextInvoke.cveTextDetectorCNNCreate2(csModelArchFilename, csModelWeightsFilename, vs,
                                                                 ref _sharedPtr);
             }
         }
 }
예제 #21
0
        /// <summary>
        /// Create video frame source from video file
        /// </summary>
        /// <param name="fileName">The name of the file</param>
        /// <param name="tryUseGpu">If true, it will try to create video frame source using gpu</param>
        public FrameSource(String fileName, bool tryUseGpu)
        {
            using (CvString s = new CvString(fileName))
                if (tryUseGpu)
                {
                    try
                    {
                        _ptr = SuperresInvoke.cvSuperresCreateFrameSourceVideo(s, true);
                    }
                    catch
                    {
                        _ptr = SuperresInvoke.cvSuperresCreateFrameSourceVideo(s, false);
                    }
                }
                else
                {
                    _ptr = SuperresInvoke.cvSuperresCreateFrameSourceVideo(s, false);
                }

            _frameSourcePtr = _ptr;
        }
예제 #22
0
 /// <summary>
 /// Turn a single image into symbolic text.
 /// </summary>
 /// <param name="pix">The pix is the image processed.</param>
 /// <param name="pageIndex">Metadata used by side-effect processes, such as reading a box file or formatting as hOCR.</param>
 /// <param name="filename">Metadata used by side-effect processes, such as reading a box file or formatting as hOCR.</param>
 /// <param name="retryConfig">retryConfig is useful for debugging. If not NULL, you can fall back to an alternate configuration if a page fails for some reason.</param>
 /// <param name="timeoutMillisec">terminates processing if any single page takes too long. Set to 0 for unlimited time.</param>
 /// <param name="renderer">Responible for creating the output. For example, use the TessTextRenderer if you want plaintext output, or the TessPDFRender to produce searchable PDF.</param>
 /// <returns>Returns true if successful, false on error.</returns>
 public bool ProcessPage(
     Pix pix,
     int pageIndex,
     String filename,
     String retryConfig,
     int timeoutMillisec,
     ITessResultRenderer renderer)
 {
     using (CvString csFileName = new CvString(filename))
         using (CvString csRetryConfig = new CvString(retryConfig))
         {
             return(OcrInvoke.TessBaseAPIProcessPage(
                        _ptr,
                        pix,
                        pageIndex,
                        csFileName,
                        csRetryConfig,
                        timeoutMillisec,
                        renderer.TessResultRendererPtr));
         }
 }
예제 #23
0
        /// <summary>
        /// Create a new dpm detector with the specified files and classes
        /// </summary>
        /// <param name="files">A set of file names storing the trained detectors (models). Each file contains one model.</param>
        /// <param name="classes">A set of trained models names. If it's empty then the name of each model will be constructed from the name of file containing the model. E.g. the model stored in "/home/user/cat.xml" will get the name "cat".</param>
        public DpmDetector(string[] files, string[] classes = null)
        {
            CvString[] cfiles = new CvString[files.Length];
            for (int i = 0; i < files.Length; i++)
            {
                cfiles[i] = new CvString(files[i]);
            }

            CvString[] cclasses;
            if (classes == null)
            {
                cclasses = new CvString[0];
            }
            else
            {
                cclasses = new CvString[classes.Length];
                for (int i = 0; i < classes.Length; i++)
                {
                    cclasses[i] = new CvString(classes[i]);
                }
            }

            try
            {
                using (var vfiles = new Util.VectorOfCvString(cfiles))
                    using (var vclasses = new Util.VectorOfCvString(cclasses))
                        _ptr = DpmInvoke.cveDPMDetectorCreate(vfiles, vclasses, ref _sharedPtr);
            }
            finally
            {
                foreach (var c in cfiles)
                {
                    c.Dispose();
                }
                foreach (var c in cclasses)
                {
                    c.Dispose();
                }
            }
        }
예제 #24
0
        public void TestOclProgramCompile()
        {
            if (CvInvoke.HaveOpenCL && CvInvoke.UseOpenCL)
            {
                String      sourceStr = @"
__kernel void magnitude_filter_8u(
       __global const uchar* src, int src_step, int src_offset,
       __global uchar* dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,
       float scale)
{
   int x = get_global_id(0);
   int y = get_global_id(1);
   if (x < dst_cols && y < dst_rows)
   {
       int dst_idx = y * dst_step + x + dst_offset;
       if (x > 0 && x < dst_cols - 1 && y > 0 && y < dst_rows - 2)
       {
           int src_idx = y * src_step + x + src_offset;
           int dx = (int)src[src_idx]*2 - src[src_idx - 1]          - src[src_idx + 1];
           int dy = (int)src[src_idx]*2 - src[src_idx - 1*src_step] - src[src_idx + 1*src_step];
           dst[dst_idx] = convert_uchar_sat(sqrt((float)(dx*dx + dy*dy)) * scale);
       }
       else
       {
           dst[dst_idx] = 0;
       }
   }
}";
                Ocl.Context context   = Context.Default;
                String      buildOpts = String.Format("-D dstT={0}", Ocl.OclInvoke.TypeToString(DepthType.Cv8U));
                using (Ocl.ProgramSource ps = new Ocl.ProgramSource(sourceStr))
                    using (CvString errorMsg = new CvString())
                        using (Program p = context.GetProgram(ps, buildOpts, errorMsg))
                        {
                            byte[] binary = p.Binary;
                        }
            }
        }
예제 #25
0
 /// <summary>
 /// Add a new object to be tracked
 /// </summary>
 /// <param name="trackerType">The name of the tracker algorithm to be used</param>
 /// <param name="image">Input image</param>
 /// <param name="boundingBox">S rectangle represents ROI of the tracked object</param>
 /// <returns>True if sucessfully added</returns>
 public bool Add(String trackerType, Mat image, Rectangle boundingBox)
 {
    using (CvString trackerTypeStr = new CvString(trackerType))
       return ContribInvoke.cveMultiTrackerAddType(_ptr, trackerTypeStr, image, ref boundingBox);
 }
예제 #26
0
 /// <summary>
 /// Convert all weights of Caffe network to half precision floating point
 /// </summary>
 /// <param name="src">Path to origin model from Caffe framework contains single precision floating point weights (usually has .caffemodel extension).</param>
 /// <param name="dst">Path to destination model with updated weights.</param>
 public static void ShrinkCaffeModel(String src, String dst)
 {
     using (CvString csSrc = new CvString(src))
         using (CvString csDst = new CvString(dst))
             cveDnnShrinkCaffeModel(csSrc, csDst);
 }
예제 #27
0
 /// <summary>
 /// Save the statistic model to file
 /// </summary>
 /// <param name="fileName">The file name where this StatModel will be saved</param>
 public static void Save(this IStatModel model, String fileName)
 {
    using (CvString fs = new CvString(fileName))
       MlInvoke.StatModelSave(model.StatModelPtr, fs);
 }
예제 #28
0
 /// <summary>
 /// Sets the new value for the layer output blob.
 /// </summary>
 /// <param name="name">Descriptor of the updating layer output blob.</param>
 /// <param name="blob">Input blob</param>
 public void SetInput(Mat blob, String name)
 {
     using (CvString outputNameStr = new CvString(name))
         DnnInvoke.cveDnnNetSetInput(_ptr, blob, outputNameStr);
 }
예제 #29
0
 public CudaVideoReader(String fileName)
 {
    using (CvString s = new CvString(fileName))
       _ptr = CudaInvoke.cudaVideoReaderCreate(s);
 }
예제 #30
0
 /// <summary>
 /// Create a text representation for a binary network stored in protocol buffer format.
 /// </summary>
 /// <param name="model">A path to binary network.</param>
 /// <param name="output">A path to output text file to be created.</param>
 public static void WriteTextGraph(String model, String output)
 {
     using (CvString csModel = new CvString(model))
         using (CvString csOutput = new CvString(output))
             cveDnnWriteTextGraph(csModel, csOutput);
 }
 public CudaVideoReader(String fileName)
 {
     using (CvString s = new CvString(fileName))
         _ptr = CudaInvoke.cudaVideoReaderCreate(s);
 }
예제 #32
0
 /// <summary>
 /// Read the model from the given path.
 /// </summary>
 /// <param name="path">Path to the model file.</param>
 public void ReadModel(String path)
 {
     using (CvString csPath = new CvString(path))
         DnnSuperresInvoke.cveDnnSuperResImplReadModel1(_ptr, csPath);
 }
예제 #33
0
 /// <summary>
 /// Load the FaceRecognizer from the file
 /// </summary>
 /// <param name="fileName">The file where the FaceRecognizer will be loaded from</param>
 public void Load(String fileName)
 {
     using (CvString s = new CvString(fileName))
         ContribInvoke.CvFaceRecognizerLoad(_ptr, s);
 }
예제 #34
0
파일: ERFilter.cs 프로젝트: Delaley/emgucv
 /// <summary>
 /// Create an Extremal Region Filter for the 1st stage classifier of N&amp;M algorithm
 /// </summary>
 /// <param name="classifierFileName">The file name of the classifier</param>
 /// <param name="thresholdDelta">Threshold step in subsequent thresholds when extracting the component tree.</param>
 /// <param name="minArea">The minimum area (% of image size) allowed for retreived ER’s.</param>
 /// <param name="maxArea">The maximum area (% of image size) allowed for retreived ER’s.</param>
 /// <param name="minProbability">The minimum probability P(er|character) allowed for retreived ER’s.</param>
 /// <param name="nonMaxSuppression">Whenever non-maximum suppression is done over the branch probabilities.</param>
 /// <param name="minProbabilityDiff">The minimum probability difference between local maxima and local minima ERs.</param>
 public ERFilterNM1(
    String classifierFileName,
    int thresholdDelta = 1,
    float minArea = 0.00025f,
    float maxArea = 0.13f,
    float minProbability = 0.4f,
    bool nonMaxSuppression = true,
    float minProbabilityDiff = 0.1f)
 {
    using (CvString s = new CvString(classifierFileName))
       _ptr = CvERFilterNM1Create(s, thresholdDelta, minArea, maxArea, minProbability, nonMaxSuppression, minProbabilityDiff);
 }
예제 #35
0
 /// <summary>
 /// Open or create hdf5 file.
 /// </summary>
 /// <param name="fileName">Specify the HDF5 filename.</param>
 public HDF5(String fileName)
 {
     using (CvString csFileName = new CvString(fileName))
         _ptr = HdfInvoke.cveHDF5Create(csFileName, ref _sharedPtr);
 }
예제 #36
0
 /// <summary>
 /// Write an attribute inside the root group.
 /// </summary>
 /// <param name="value">Attribute value.</param>
 /// <param name="atLabel">Attribute name.</param>
 public void AtWrite(IInputArray value, String atLabel)
 {
     using (InputArray iaValue = value.GetInputArray())
         using (CvString csAtLabel = new CvString(atLabel))
             HdfInvoke.cveHDF5AtWriteArray(_ptr, iaValue, csAtLabel);
 }
예제 #37
0
파일: Net.cs 프로젝트: neutmute/emgucv
 /// <summary>
 /// Sets the new value for the layer output blob.
 /// </summary>
 /// <param name="outputName">Descriptor of the updating layer output blob.</param>
 /// <param name="blob">New blob</param>
 public void SetBlob(String outputName, Blob blob)
 {
    using (CvString outputNameStr = new CvString(outputName))
       ContribInvoke.cveDnnNetSetBlob(_ptr, outputNameStr, blob);
 }
예제 #38
0
파일: ERFilter.cs 프로젝트: Delaley/emgucv
 /// <summary>
 /// Create an Extremal Region Filter for the 2nd stage classifier of N&amp;M algorithm
 /// </summary>
 /// <param name="classifierFileName">The file name of the classifier</param>
 /// <param name="minProbability">The minimum probability P(er|character) allowed for retreived ER’s.</param>
 public ERFilterNM2(String classifierFileName, float minProbability = 0.3f)
 {
    using (CvString s = new CvString(classifierFileName))
       _ptr = CvERFilterNM2Create(s, minProbability);
 }
예제 #39
0
 /// <summary>
 /// Push a value into the standard vector
 /// </summary>
 /// <param name="value">The value to be pushed to the vector</param>
 public void Push(CvString value)
 {
     VectorOfCvStringPush(_ptr, value.Ptr);
 }
예제 #40
0
 /// <summary>
 /// Constructor which immediately sets the desired model.
 /// </summary>
 /// <param name="algorithm">String containing one of the desired models: "edsr", "espcn", "fsrcnn", "lapsrn"</param>
 /// <param name="scale">Integer specifying the upscale factor</param>
 public DnnSuperResImpl(String algorithm, int scale)
 {
     using (CvString csAlgorithm = new CvString(algorithm))
         _ptr = DnnSuperresInvoke.cveDnnSuperResImplCreate2(csAlgorithm, scale);
 }
예제 #41
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="model">name of the file where the model is stored</param>
 /// <param name="howToGetFeatures">optional object inheriting from RFFeatureGetter. You need it only if you would like to train your own forest, pass NULL otherwise</param>
 public StructuredEdgeDetection(String model, RFFeatureGetter howToGetFeatures)
 {
    using (CvString sModel = new CvString(model))
       _ptr = XimgprocInvoke.cveStructuredEdgeDetectionCreate(sModel, howToGetFeatures);
 }
예제 #42
0
 /// <summary>
 /// Set desired model.
 /// </summary>
 /// <param name="algorithm">String containing one of the desired models: "edsr", "espcn", "fsrcnn", "lapsrn"</param>
 /// <param name="scale">Integer specifying the upscale factor</param>
 public void SetModel(String algorithm, int scale)
 {
     using (CvString csAlgorithm = new CvString(algorithm))
         DnnSuperresInvoke.cveDnnSuperResImplSetModel(_ptr, csAlgorithm, scale);
 }
예제 #43
0
파일: Tracker.cs 프로젝트: neutmute/emgucv
 /// <summary>
 /// Creates a tracker by its name.
 /// </summary>
 /// <param name="trackerType">Tracker type, The following detector types are supported: "MIL" – TrackerMIL; "BOOSTING" – TrackerBoosting</param>
 public Tracker(String trackerType)
 {
    using (CvString trackerTypeStr = new CvString(trackerType))
       _ptr = ContribInvoke.cveTrackerCreate(trackerTypeStr);
 }