public void Classify( IInputArray frame, out int classId, out float conf) { classId = -1; conf = 0; using (InputArray iaFrame = frame.GetInputArray()) { DnnInvoke.cveDnnClassificationModelClassify( _ptr, iaFrame, ref classId, ref conf); } }
/// <summary> /// Given the input frame, create input blob, run net and return result detections. /// </summary> /// <param name="frame">The input image.</param> /// <param name="classIds">Class indexes in result detection.</param> /// <param name="confidences">A set of corresponding confidences.</param> /// <param name="boxes">A set of bounding boxes.</param> /// <param name="confThreshold">A threshold used to filter boxes by confidences.</param> /// <param name="nmsThreshold">A threshold used in non maximum suppression.</param> public void Detect( IInputArray frame, VectorOfInt classIds, VectorOfFloat confidences, VectorOfRect boxes, float confThreshold = 0.5f, float nmsThreshold = 0.5f) { using (InputArray iaFrame = frame.GetInputArray()) { DnnInvoke.cveDnnDetectionModelDetect( _ptr, iaFrame, classIds, confidences, boxes, confThreshold, nmsThreshold); } }
/// <summary> /// Create model from deep learning network. /// </summary> /// <param name="net">DNN Network</param> public DetectionModel(Net net) { _ptr = DnnInvoke.cveDnnDetectionModelCreate2( net, ref _model); }
/// <summary> /// Create Text Recognition model from deep learning network /// </summary> /// <param name="net">Dnn network</param> /// <remarks>Set DecodeType and Vocabulary after constructor to initialize the decoding method.</remarks> public TextRecognitionModel(Net net) { _ptr = DnnInvoke.cveDnnTextRecognitionModelCreate2( net, ref _model); }
/// <summary> /// Default constructor. /// </summary> public Net() { _ptr = DnnInvoke.cveDnnNetCreate(); }
/// <summary> /// Ask network to make computations on specific target device. /// </summary> /// <param name="value">The value</param> public void SetPreferableTarget(Target value) { DnnInvoke.cveNetSetPreferableTarget(_ptr, value); }
/// <summary> /// Constructs 4-dimensional blob (so-called batch) from image or array of images. /// </summary> /// <param name="image">2-dimensional multi-channel or 3-dimensional single-channel image (or array of images)</param> public Blob(IInputArray image) { using (InputArray iaImage = image.GetInputArray()) _ptr = DnnInvoke.cveDnnBlobCreateFromInputArray(iaImage); }
/// <summary> /// Ask network to use specific computation backend where it supported. /// </summary> /// <param name="value">The value</param> public void SetPreferableBackend(Backend value) { DnnInvoke.cveModelSetPreferableBackend(_model, value); }
/// <summary> /// Set flag crop for frame. /// </summary> /// <param name="crop">Flag which indicates whether image will be cropped after resize or not.</param> public void SetInputCrop(bool crop) { DnnInvoke.cveModelSetInputCrop(_model, crop); }
/// <summary> /// Dump net structure, hyperparameters, backend, target and fusion to dot file /// </summary> /// <param name="path">Path to output file with .dot extension</param> public void DumpToFile(String path) { using (CvString p = new CvString(path)) DnnInvoke.cveDnnNetDumpToFile(_ptr, p); }
/// <summary> /// Adds loaded layers into the <paramref name="net"/> and sets connetions between them. /// </summary> /// <param name="net">The net model</param> public void PopulateNet(Net net) { DnnInvoke.cveDnnImporterPopulateNet(_ptr, net); }
/// <summary> /// Runs forward pass for the whole network. /// </summary> public void Forward() { DnnInvoke.cveDnnNetForward(_ptr); }
/// <summary> /// Sets the new value for the layer output blob. /// </summary> /// <param name="outputName">Descriptor of the updating layer output blob.</param> /// <param name="blob">New blob</param> public void SetBlob(String outputName, Blob blob) { using (CvString outputNameStr = new CvString(outputName)) DnnInvoke.cveDnnNetSetBlob(_ptr, outputNameStr, blob); }
/// <summary> /// Create model from deep learning network. /// </summary> /// <param name="net">DNN Network</param> public KeypointsModel(Net net) { _ptr = DnnInvoke.cveDnnKeypointsModelCreate2( net, ref _model); }
/// <summary> /// Create model from deep learning network. /// </summary> /// <param name="network">DNN Network</param> public Model(Net network) { _ptr = DnnInvoke.cveModelCreateFromNet(network); _model = _ptr; }
/// <summary> /// Set mean value for frame. /// </summary> /// <param name="mean">Scalar with mean values which are subtracted from channels.</param> public void SetInputMean(MCvScalar mean) { DnnInvoke.cveModelSetInputMean(_model, ref mean); }
/// <summary> /// Set input size for frame. /// </summary> /// <param name="size">New input size.</param> /// <remarks>If shape of the new blob less than 0, then frame size not change.</remarks> public void SetInputSize(Size size) { DnnInvoke.cveModelSetInputSize(_model, ref size); }
/// <summary> /// Sets the new value for the layer output blob. /// </summary> /// <param name="name">Descriptor of the updating layer output blob.</param> /// <param name="blob">Input blob</param> /// <param name="scaleFactor">An optional normalization scale.</param> /// <param name="mean">An optional mean subtraction values.</param> public void SetInput(IInputArray blob, String name = "", double scaleFactor = 1.0, MCvScalar mean = new MCvScalar()) { using (CvString nameStr = new CvString(name)) using (InputArray iaBlob = blob.GetInputArray()) DnnInvoke.cveDnnNetSetInput(_ptr, iaBlob, nameStr, scaleFactor, ref mean); }
/// <summary> /// Set flag swapRB for frame. /// </summary> /// <param name="swapRB">Flag which indicates that swap first and last channels.</param> public void SetInputSwapRB(bool swapRB) { DnnInvoke.cveModelSetInputSwapRB(_model, swapRB); }
/// <summary> /// Runs forward pass to compute outputs of layers listed in outBlobNames. /// </summary> /// <param name="outputBlobs">Contains blobs for first outputs of specified layers.</param> /// <param name="outBlobNames">Names for layers which outputs are needed to get</param> public void Forward(IOutputArrayOfArrays outputBlobs, String[] outBlobNames) { using (OutputArray oaOutputBlobs = outputBlobs.GetOutputArray()) using (VectorOfCvString vcs = new VectorOfCvString(outBlobNames)) DnnInvoke.cveDnnNetForward3(_ptr, oaOutputBlobs, vcs); }
/// <summary> /// Ask network to make computations on specific target device. /// </summary> /// <param name="value">The value</param> public void SetPreferableTarget(Target value) { DnnInvoke.cveModelSetPreferableTarget(_model, value); }
/// <summary> /// Create model from deep learning network. /// </summary> /// <param name="net">DNN Network</param> public SegmentationModel(Net net) { _ptr = DnnInvoke.cveDnnSegmentationModelCreate2( net, ref _model); }
/// <summary> /// Ask network to use specific computation backend where it supported. /// </summary> /// <param name="value">The value</param> public void SetPreferableBackend(Backend value) { DnnInvoke.cveNetSetPreferableBackend(_ptr, value); }
public void BatchFromImages(IInputArray image, int dstCn = -1) { using (InputArray iaImage = image.GetInputArray()) DnnInvoke.cveDnnBlobBatchFromImages(_ptr, iaImage, dstCn); }
/// <summary> /// Enables or disables layer fusion in the network. /// </summary> /// <param name="value">The value</param> public void EnableFusion(bool value) { DnnInvoke.cveNetEnableFusion(_ptr, value); }
public IntPtr GetPtr(int n = 0, int cn = 0, int row = 0, int col = 0) { return(DnnInvoke.cveDnnBlobGetPtr(_ptr, n, cn, row, col)); }
/// <summary> /// Sets the new value for the layer output blob. /// </summary> /// <param name="name">Descriptor of the updating layer output blob.</param> /// <param name="blob">Input blob</param> public void SetInput(Mat blob, String name) { using (CvString outputNameStr = new CvString(name)) DnnInvoke.cveDnnNetSetInput(_ptr, blob, outputNameStr); }
public Blob() { _ptr = DnnInvoke.cveDnnBlobCreate(); }
/// <summary> /// Set the decoding method options for "CTC-prefix-beam-search" decode usage /// </summary> /// <param name="beamSize">Beam size for search</param> /// <param name="vocPruneSize">Parameter to optimize big vocabulary search, only take top <paramref name="vocPruneSize"/> tokens in each search step, <paramref name="vocPruneSize"/> <= 0 stands for disable this prune.</param> public void SetDecodeOptsCTCPrefixBeamSearch(int beamSize, int vocPruneSize) { DnnInvoke.cveDnnTextRecognitionModelSetDecodeOptsCTCPrefixBeamSearch(_ptr, beamSize, vocPruneSize); }
/// <summary> /// Set scalefactor value for frame. /// </summary> /// <param name="scale">Multiplier for frame values.</param> public void SetInputScale(double scale) { DnnInvoke.cveModelSetInputScale(_model, scale); }