//javadoc: CascadeClassifier::detectMultiScale3(image, objects, rejectLevels, levelWeights) public void detectMultiScale3(Mat image, MatOfRect objects, MatOfInt rejectLevels, MatOfDouble levelWeights) { ThrowIfDisposed(); if (image != null) { image.ThrowIfDisposed(); } if (objects != null) { objects.ThrowIfDisposed(); } if (rejectLevels != null) { rejectLevels.ThrowIfDisposed(); } if (levelWeights != null) { levelWeights.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat objects_mat = objects; Mat rejectLevels_mat = rejectLevels; Mat levelWeights_mat = levelWeights; objdetect_CascadeClassifier_detectMultiScale3_16(nativeObj, image.nativeObj, objects_mat.nativeObj, rejectLevels_mat.nativeObj, levelWeights_mat.nativeObj); return; #else return; #endif }
//javadoc: CascadeClassifier::detectMultiScale2(image, objects, numDetections) public void detectMultiScale2(Mat image, MatOfRect objects, MatOfInt numDetections) { ThrowIfDisposed(); if (image != null) { image.ThrowIfDisposed(); } if (objects != null) { objects.ThrowIfDisposed(); } if (numDetections != null) { numDetections.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat objects_mat = objects; Mat numDetections_mat = numDetections; objdetect_CascadeClassifier_detectMultiScale2_15(nativeObj, image.nativeObj, objects_mat.nativeObj, numDetections_mat.nativeObj); return; #else return; #endif }
//javadoc: detectRegions(image, er_filter1, er_filter2, groups_rects) public static void detectRegions(Mat image, ERFilter er_filter1, ERFilter er_filter2, MatOfRect groups_rects) { if (image != null) { image.ThrowIfDisposed(); } if (er_filter1 != null) { er_filter1.ThrowIfDisposed(); } if (er_filter2 != null) { er_filter2.ThrowIfDisposed(); } if (groups_rects != null) { groups_rects.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat groups_rects_mat = groups_rects; text_Text_detectRegions_13(image.nativeObj, er_filter1.getNativeObjAddr(), er_filter2.getNativeObjAddr(), groups_rects_mat.nativeObj); return; #else return; #endif }
/** * Given the {code input} frame, create input blob, run net and return result detections. * param classIds Class indexes in result detection. * param confidences A set of corresponding confidences. * param boxes A set of bounding boxes. * param frame automatically generated */ public void detect(Mat frame, MatOfInt classIds, MatOfFloat confidences, MatOfRect boxes) { ThrowIfDisposed(); if (frame != null) { frame.ThrowIfDisposed(); } if (classIds != null) { classIds.ThrowIfDisposed(); } if (confidences != null) { confidences.ThrowIfDisposed(); } if (boxes != null) { boxes.ThrowIfDisposed(); } Mat classIds_mat = classIds; Mat confidences_mat = confidences; Mat boxes_mat = boxes; dnn_DetectionModel_detect_12(nativeObj, frame.nativeObj, classIds_mat.nativeObj, confidences_mat.nativeObj, boxes_mat.nativeObj); }
//javadoc: erGrouping(image, channel, regions, groups_rects) public static void erGrouping(Mat image, Mat channel, List <MatOfPoint> regions, MatOfRect groups_rects) { if (image != null) { image.ThrowIfDisposed(); } if (channel != null) { channel.ThrowIfDisposed(); } if (groups_rects != null) { groups_rects.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER List <Mat> regions_tmplm = new List <Mat> ((regions != null) ? regions.Count : 0); Mat regions_mat = Converters.vector_vector_Point_to_Mat(regions, regions_tmplm); Mat groups_rects_mat = groups_rects; text_Text_erGrouping_13(image.nativeObj, channel.nativeObj, regions_mat.nativeObj, groups_rects_mat.nativeObj); return; #else return; #endif }
// // C++: void cv::text::TextDetector::detect(Mat inputImage, vector_Rect& Bbox, vector_float& confidence) // //javadoc: TextDetector::detect(inputImage, Bbox, confidence) public virtual void detect(Mat inputImage, MatOfRect Bbox, MatOfFloat confidence) { ThrowIfDisposed(); if (inputImage != null) { inputImage.ThrowIfDisposed(); } if (Bbox != null) { Bbox.ThrowIfDisposed(); } if (confidence != null) { confidence.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat Bbox_mat = Bbox; Mat confidence_mat = confidence; text_TextDetector_detect_10(nativeObj, inputImage.nativeObj, Bbox_mat.nativeObj, confidence_mat.nativeObj); return; #else return; #endif }
//javadoc: NMSBoxes(bboxes, scores, score_threshold, nms_threshold, indices) public static void NMSBoxes(MatOfRect bboxes, MatOfFloat scores, float score_threshold, float nms_threshold, MatOfInt indices) { if (bboxes != null) { bboxes.ThrowIfDisposed(); } if (scores != null) { scores.ThrowIfDisposed(); } if (indices != null) { indices.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat bboxes_mat = bboxes; Mat scores_mat = scores; Mat indices_mat = indices; dnn_Dnn_NMSBoxes_12(bboxes_mat.nativeObj, scores_mat.nativeObj, score_threshold, nms_threshold, indices_mat.nativeObj); return; #else return; #endif }
//javadoc: CascadeClassifier::detectMultiScale3(image, objects, rejectLevels, levelWeights, scaleFactor, minNeighbors, flags, minSize) public void detectMultiScale3(Mat image, MatOfRect objects, MatOfInt rejectLevels, MatOfDouble levelWeights, double scaleFactor, int minNeighbors, int flags, Size minSize) { ThrowIfDisposed(); if (image != null) { image.ThrowIfDisposed(); } if (objects != null) { objects.ThrowIfDisposed(); } if (rejectLevels != null) { rejectLevels.ThrowIfDisposed(); } if (levelWeights != null) { levelWeights.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat objects_mat = objects; Mat rejectLevels_mat = rejectLevels; Mat levelWeights_mat = levelWeights; objdetect_CascadeClassifier_detectMultiScale3_12(nativeObj, image.nativeObj, objects_mat.nativeObj, rejectLevels_mat.nativeObj, levelWeights_mat.nativeObj, scaleFactor, minNeighbors, flags, minSize.width, minSize.height); return; #else return; #endif }
//javadoc: CascadeClassifier::detectMultiScale2(image, objects, numDetections, scaleFactor, minNeighbors, flags, minSize) public void detectMultiScale2(Mat image, MatOfRect objects, MatOfInt numDetections, double scaleFactor, int minNeighbors, int flags, Size minSize) { ThrowIfDisposed(); if (image != null) { image.ThrowIfDisposed(); } if (objects != null) { objects.ThrowIfDisposed(); } if (numDetections != null) { numDetections.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat objects_mat = objects; Mat numDetections_mat = numDetections; objdetect_CascadeClassifier_detectMultiScale2_11(nativeObj, image.nativeObj, objects_mat.nativeObj, numDetections_mat.nativeObj, scaleFactor, minNeighbors, flags, minSize.width, minSize.height); return; #else return; #endif }
// // C++: void cv::ximgproc::segmentation::SelectiveSearchSegmentation::process(vector_Rect& rects) // /** * Based on all images, graph segmentations and stragies, computes all possible rects and return them * param rects The list of rects. The first ones are more relevents than the lasts ones. */ public void process(MatOfRect rects) { ThrowIfDisposed(); if (rects != null) { rects.ThrowIfDisposed(); } Mat rects_mat = rects; ximgproc_SelectiveSearchSegmentation_process_10(nativeObj, rects_mat.nativeObj); }
/** * Applies the Stroke Width Transform operator followed by filtering of connected components of similar Stroke Widths to return letter candidates. It also chain them by proximity and size, saving the result in chainBBs. * param input the input image with 3 channels. * param result a vector of resulting bounding boxes where probability of finding text is high * param dark_on_light a boolean value signifying whether the text is darker or lighter than the background, it is observed to reverse the gradient obtained from Scharr operator, and significantly affect the result. */ public static void detectTextSWT(Mat input, MatOfRect result, bool dark_on_light) { if (input != null) { input.ThrowIfDisposed(); } if (result != null) { result.ThrowIfDisposed(); } Mat result_mat = result; text_Text_detectTextSWT_12(input.nativeObj, result_mat.nativeObj, dark_on_light); }
public static void groupRectangles(MatOfRect rectList, MatOfInt weights, int groupThreshold) { if (rectList != null) { rectList.ThrowIfDisposed(); } if (weights != null) { weights.ThrowIfDisposed(); } Mat rectList_mat = rectList; Mat weights_mat = weights; objdetect_Objdetect_groupRectangles_11(rectList_mat.nativeObj, weights_mat.nativeObj, groupThreshold); }
// // C++: void cv::ximgproc::segmentation::SelectiveSearchSegmentation::process(vector_Rect& rects) // //javadoc: SelectiveSearchSegmentation::process(rects) public void process(MatOfRect rects) { ThrowIfDisposed(); if (rects != null) { rects.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat rects_mat = rects; ximgproc_SelectiveSearchSegmentation_process_10(nativeObj, rects_mat.nativeObj); return; #else return; #endif }
// // C++: void cv::MSER::detectRegions(Mat image, vector_vector_Point& msers, vector_Rect& bboxes) // /** * Detect %MSER regions * * param image input image (8UC1, 8UC3 or 8UC4, must be greater or equal than 3x3) * param msers resulting list of point sets * param bboxes resulting bounding boxes */ public void detectRegions(Mat image, List <MatOfPoint> msers, MatOfRect bboxes) { ThrowIfDisposed(); if (image != null) { image.ThrowIfDisposed(); } if (bboxes != null) { bboxes.ThrowIfDisposed(); } Mat msers_mat = new Mat(); Mat bboxes_mat = bboxes; features2d_MSER_detectRegions_10(nativeObj, image.nativeObj, msers_mat.nativeObj, bboxes_mat.nativeObj); Converters.Mat_to_vector_vector_Point(msers_mat, msers); msers_mat.release(); }
/** * Returns array containing proposal boxes. * * param edge_map edge image. * param orientation_map orientation map. * param boxes proposal boxes. */ public void getBoundingBoxes(Mat edge_map, Mat orientation_map, MatOfRect boxes) { ThrowIfDisposed(); if (edge_map != null) { edge_map.ThrowIfDisposed(); } if (orientation_map != null) { orientation_map.ThrowIfDisposed(); } if (boxes != null) { boxes.ThrowIfDisposed(); } Mat boxes_mat = boxes; ximgproc_EdgeBoxes_getBoundingBoxes_11(nativeObj, edge_map.nativeObj, orientation_map.nativeObj, boxes_mat.nativeObj); }
// // C++: bool cv::face::Facemark::fit(Mat image, vector_Rect faces, vector_vector_Point2f& landmarks) // /** * Detect facial landmarks from an image. * param image Input image. * param faces Output of the function which represent region of interest of the detected faces. * Each face is stored in cv::Rect container. * param landmarks The detected landmark points for each faces. * * <B>Example of usage</B> * <code> * Mat image = imread("image.jpg"); * std::vector<Rect> faces; * std::vector<std::vector<Point2f> > landmarks; * facemark->fit(image, faces, landmarks); * </code> * return automatically generated */ public bool fit(Mat image, MatOfRect faces, List <MatOfPoint2f> landmarks) { ThrowIfDisposed(); if (image != null) { image.ThrowIfDisposed(); } if (faces != null) { faces.ThrowIfDisposed(); } Mat faces_mat = faces; Mat landmarks_mat = new Mat(); bool retVal = face_Facemark_fit_10(nativeObj, image.nativeObj, faces_mat.nativeObj, landmarks_mat.nativeObj); Converters.Mat_to_vector_vector_Point2f(landmarks_mat, landmarks); landmarks_mat.release(); return(retVal); }
/** * Find groups of Extremal Regions that are organized as text blocks. * * * * param regions Vector of ER's retrieved from the ERFilter algorithm from each channel. * * provided regions. * * param groups_rects The output of the algorithm are stored in this parameter as list of rectangles. * * ERGROUPING_ORIENTATION_ANY. * * samples/trained_classifier_erGrouping.xml). Only to use when grouping method is * ERGROUPING_ORIENTATION_ANY. * * method is ERGROUPING_ORIENTATION_ANY. * param image automatically generated * param channel automatically generated */ public static void erGrouping(Mat image, Mat channel, List <MatOfPoint> regions, MatOfRect groups_rects) { if (image != null) { image.ThrowIfDisposed(); } if (channel != null) { channel.ThrowIfDisposed(); } if (groups_rects != null) { groups_rects.ThrowIfDisposed(); } List <Mat> regions_tmplm = new List <Mat>((regions != null) ? regions.Count : 0); Mat regions_mat = Converters.vector_vector_Point_to_Mat(regions, regions_tmplm); Mat groups_rects_mat = groups_rects; text_Text_erGrouping_13(image.nativeObj, channel.nativeObj, regions_mat.nativeObj, groups_rects_mat.nativeObj); }
//javadoc: groupRectangles(rectList, weights, groupThreshold) public static void groupRectangles(MatOfRect rectList, MatOfInt weights, int groupThreshold) { if (rectList != null) { rectList.ThrowIfDisposed(); } if (weights != null) { weights.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat rectList_mat = rectList; Mat weights_mat = weights; objdetect_Objdetect_groupRectangles_11(rectList_mat.nativeObj, weights_mat.nativeObj, groupThreshold); return; #else return; #endif }
// // C++: void cv::text::TextDetector::detect(Mat inputImage, vector_Rect& Bbox, vector_float& confidence) // /** * Method that provides a quick and simple interface to detect text inside an image * * param inputImage an image to process * param Bbox a vector of Rect that will store the detected word bounding box * param confidence a vector of float that will be updated with the confidence the classifier has for the selected bounding box */ public virtual void detect(Mat inputImage, MatOfRect Bbox, MatOfFloat confidence) { ThrowIfDisposed(); if (inputImage != null) { inputImage.ThrowIfDisposed(); } if (Bbox != null) { Bbox.ThrowIfDisposed(); } if (confidence != null) { confidence.ThrowIfDisposed(); } Mat Bbox_mat = Bbox; Mat confidence_mat = confidence; text_TextDetector_detect_10(nativeObj, inputImage.nativeObj, Bbox_mat.nativeObj, confidence_mat.nativeObj); }
//javadoc: CascadeClassifier::detectMultiScale(image, objects, scaleFactor) public void detectMultiScale(Mat image, MatOfRect objects, double scaleFactor) { ThrowIfDisposed(); if (image != null) { image.ThrowIfDisposed(); } if (objects != null) { objects.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat objects_mat = objects; objdetect_CascadeClassifier_detectMultiScale_14(nativeObj, image.nativeObj, objects_mat.nativeObj, scaleFactor); return; #else return; #endif }
// // C++: bool cv::face::Facemark::fit(Mat image, vector_Rect faces, vector_vector_Point2f& landmarks) // //javadoc: Facemark::fit(image, faces, landmarks) public bool fit(Mat image, MatOfRect faces, List <MatOfPoint2f> landmarks) { ThrowIfDisposed(); if (image != null) { image.ThrowIfDisposed(); } if (faces != null) { faces.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat faces_mat = faces; Mat landmarks_mat = new Mat(); bool retVal = face_Facemark_fit_10(nativeObj, image.nativeObj, faces_mat.nativeObj, landmarks_mat.nativeObj); Converters.Mat_to_vector_vector_Point2f(landmarks_mat, landmarks); landmarks_mat.release(); return(retVal); #else return(false); #endif }
// // C++: void cv::MSER::detectRegions(Mat image, vector_vector_Point& msers, vector_Rect& bboxes) // //javadoc: MSER::detectRegions(image, msers, bboxes) public void detectRegions(Mat image, List <MatOfPoint> msers, MatOfRect bboxes) { ThrowIfDisposed(); if (image != null) { image.ThrowIfDisposed(); } if (bboxes != null) { bboxes.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat msers_mat = new Mat(); Mat bboxes_mat = bboxes; features2d_MSER_detectRegions_10(nativeObj, image.nativeObj, msers_mat.nativeObj, bboxes_mat.nativeObj); Converters.Mat_to_vector_vector_Point(msers_mat, msers); msers_mat.release(); return; #else return; #endif }
// // C++: void cv::text::detectTextSWT(Mat input, vector_Rect& result, bool dark_on_light, Mat& draw = Mat(), Mat& chainBBs = Mat()) // /** * Applies the Stroke Width Transform operator followed by filtering of connected components of similar Stroke Widths to return letter candidates. It also chain them by proximity and size, saving the result in chainBBs. * param input the input image with 3 channels. * param result a vector of resulting bounding boxes where probability of finding text is high * param dark_on_light a boolean value signifying whether the text is darker or lighter than the background, it is observed to reverse the gradient obtained from Scharr operator, and significantly affect the result. * param draw an optional Mat of type CV_8UC3 which visualises the detected letters using bounding boxes. * param chainBBs an optional parameter which chains the letter candidates according to heuristics in the paper and returns all possible regions where text is likely to occur. */ public static void detectTextSWT(Mat input, MatOfRect result, bool dark_on_light, Mat draw, Mat chainBBs) { if (input != null) { input.ThrowIfDisposed(); } if (result != null) { result.ThrowIfDisposed(); } if (draw != null) { draw.ThrowIfDisposed(); } if (chainBBs != null) { chainBBs.ThrowIfDisposed(); } Mat result_mat = result; text_Text_detectTextSWT_10(input.nativeObj, result_mat.nativeObj, dark_on_light, draw.nativeObj, chainBBs.nativeObj); }
/** * Extracts text regions from image. * * param image Source image where text blocks needs to be extracted from. Should be CV_8UC3 (color). * param er_filter1 Extremal Region Filter for the 1st stage classifier of N&M algorithm CITE: Neumann12 * param er_filter2 Extremal Region Filter for the 2nd stage classifier of N&M algorithm CITE: Neumann12 * param groups_rects Output list of rectangle blocks with text */ public static void detectRegions(Mat image, ERFilter er_filter1, ERFilter er_filter2, MatOfRect groups_rects) { if (image != null) { image.ThrowIfDisposed(); } if (er_filter1 != null) { er_filter1.ThrowIfDisposed(); } if (er_filter2 != null) { er_filter2.ThrowIfDisposed(); } if (groups_rects != null) { groups_rects.ThrowIfDisposed(); } Mat groups_rects_mat = groups_rects; text_Text_detectRegions_13(image.nativeObj, er_filter1.getNativeObjAddr(), er_filter2.getNativeObjAddr(), groups_rects_mat.nativeObj); }
// // C++: void cv::text::detectRegions(Mat image, Ptr_ERFilter er_filter1, Ptr_ERFilter er_filter2, vector_Rect& groups_rects, int method = ERGROUPING_ORIENTATION_HORIZ, String filename = String(), float minProbability = (float)0.5) // /** * Extracts text regions from image. * * param image Source image where text blocks needs to be extracted from. Should be CV_8UC3 (color). * param er_filter1 Extremal Region Filter for the 1st stage classifier of N&M algorithm CITE: Neumann12 * param er_filter2 Extremal Region Filter for the 2nd stage classifier of N&M algorithm CITE: Neumann12 * param groups_rects Output list of rectangle blocks with text * param method Grouping method (see text::erGrouping_Modes). Can be one of ERGROUPING_ORIENTATION_HORIZ, ERGROUPING_ORIENTATION_ANY. * param filename The XML or YAML file with the classifier model (e.g. samples/trained_classifier_erGrouping.xml). Only to use when grouping method is ERGROUPING_ORIENTATION_ANY. * param minProbability The minimum probability for accepting a group. Only to use when grouping method is ERGROUPING_ORIENTATION_ANY. */ public static void detectRegions(Mat image, ERFilter er_filter1, ERFilter er_filter2, MatOfRect groups_rects, int method, string filename, float minProbability) { if (image != null) { image.ThrowIfDisposed(); } if (er_filter1 != null) { er_filter1.ThrowIfDisposed(); } if (er_filter2 != null) { er_filter2.ThrowIfDisposed(); } if (groups_rects != null) { groups_rects.ThrowIfDisposed(); } Mat groups_rects_mat = groups_rects; text_Text_detectRegions_10(image.nativeObj, er_filter1.getNativeObjAddr(), er_filter2.getNativeObjAddr(), groups_rects_mat.nativeObj, method, filename, minProbability); }
// // C++: void cv::motempl::segmentMotion(Mat mhi, Mat& segmask, vector_Rect& boundingRects, double timestamp, double segThresh) // //javadoc: segmentMotion(mhi, segmask, boundingRects, timestamp, segThresh) public static void segmentMotion(Mat mhi, Mat segmask, MatOfRect boundingRects, double timestamp, double segThresh) { if (mhi != null) { mhi.ThrowIfDisposed(); } if (segmask != null) { segmask.ThrowIfDisposed(); } if (boundingRects != null) { boundingRects.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat boundingRects_mat = boundingRects; optflow_Optflow_segmentMotion_10(mhi.nativeObj, segmask.nativeObj, boundingRects_mat.nativeObj, timestamp, segThresh); return; #else return; #endif }
// // C++: void cv::ximgproc::EdgeBoxes::getBoundingBoxes(Mat edge_map, Mat orientation_map, vector_Rect& boxes) // //javadoc: EdgeBoxes::getBoundingBoxes(edge_map, orientation_map, boxes) public void getBoundingBoxes(Mat edge_map, Mat orientation_map, MatOfRect boxes) { ThrowIfDisposed(); if (edge_map != null) { edge_map.ThrowIfDisposed(); } if (orientation_map != null) { orientation_map.ThrowIfDisposed(); } if (boxes != null) { boxes.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat boxes_mat = boxes; ximgproc_EdgeBoxes_getBoundingBoxes_10(nativeObj, edge_map.nativeObj, orientation_map.nativeObj, boxes_mat.nativeObj); return; #else return; #endif }