//javadoc: CascadeClassifier::detectMultiScale3(image, objects, rejectLevels, levelWeights) public void detectMultiScale3(Mat image, MatOfRect objects, MatOfInt rejectLevels, MatOfDouble levelWeights) { ThrowIfDisposed(); if (image != null) { image.ThrowIfDisposed(); } if (objects != null) { objects.ThrowIfDisposed(); } if (rejectLevels != null) { rejectLevels.ThrowIfDisposed(); } if (levelWeights != null) { levelWeights.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat objects_mat = objects; Mat rejectLevels_mat = rejectLevels; Mat levelWeights_mat = levelWeights; objdetect_CascadeClassifier_detectMultiScale3_16(nativeObj, image.nativeObj, objects_mat.nativeObj, rejectLevels_mat.nativeObj, levelWeights_mat.nativeObj); return; #else return; #endif }
//javadoc: CascadeClassifier::detectMultiScale3(image, objects, rejectLevels, levelWeights, scaleFactor, minNeighbors, flags, minSize) public void detectMultiScale3(Mat image, MatOfRect objects, MatOfInt rejectLevels, MatOfDouble levelWeights, double scaleFactor, int minNeighbors, int flags, Size minSize) { ThrowIfDisposed(); if (image != null) { image.ThrowIfDisposed(); } if (objects != null) { objects.ThrowIfDisposed(); } if (rejectLevels != null) { rejectLevels.ThrowIfDisposed(); } if (levelWeights != null) { levelWeights.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat objects_mat = objects; Mat rejectLevels_mat = rejectLevels; Mat levelWeights_mat = levelWeights; objdetect_CascadeClassifier_detectMultiScale3_12(nativeObj, image.nativeObj, objects_mat.nativeObj, rejectLevels_mat.nativeObj, levelWeights_mat.nativeObj, scaleFactor, minNeighbors, flags, minSize.width, minSize.height); return; #else return; #endif }
// // C++: int64 cv::dnn::Net::getPerfProfile(vector_double& timings) // /** * Returns overall time for inference and timings (in ticks) for layers. * Indexes in returned vector correspond to layers ids. Some layers can be fused with others, * in this case zero ticks count will be return for that skipped layers. * param timings vector for tick timings for all layers. * return overall ticks for model inference. */ public long getPerfProfile(MatOfDouble timings) { ThrowIfDisposed(); if (timings != null) { timings.ThrowIfDisposed(); } Mat timings_mat = timings; return(dnn_Net_getPerfProfile_10(nativeObj, timings_mat.nativeObj)); }
// // C++: int64 cv::dnn::Net::getPerfProfile(vector_double& timings) // //javadoc: Net::getPerfProfile(timings) public long getPerfProfile(MatOfDouble timings) { ThrowIfDisposed(); if (timings != null) { timings.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat timings_mat = timings; long retVal = dnn_Net_getPerfProfile_10(nativeObj, timings_mat.nativeObj); return(retVal); #else return(-1); #endif }