/// <summary> /// Raises the scan face mask button click event. /// </summary> public void OnScanFaceMaskButtonClick() { RemoveFaceMask(); // Capture webcam frame. if (webCamTextureToMatHelper.IsPlaying()) { Mat rgbaMat = webCamTextureToMatHelper.GetMat(); faceRectInMask = DetectFace(rgbaMat); if (faceRectInMask.width == 0 && faceRectInMask.height == 0) { Debug.Log("A face could not be detected from the input image."); return; } Rect rect = new Rect((int)faceRectInMask.x, (int)faceRectInMask.y, (int)faceRectInMask.width, (int)faceRectInMask.height); rect.inflate(rect.x / 5, rect.y / 5); rect = rect.intersect(new Rect(0, 0, rgbaMat.width(), rgbaMat.height())); faceMaskTexture = new Texture2D(rect.width, rect.height, TextureFormat.RGBA32, false); faceMaskMat = new Mat(rgbaMat, rect).clone(); OpenCVForUnity.UnityUtils.Utils.matToTexture2D(faceMaskMat, faceMaskTexture); Debug.Log("faceMaskMat ToString " + faceMaskMat.ToString()); faceRectInMask = DetectFace(faceMaskMat); faceLandmarkPointsInMask = DetectFaceLandmarkPoints(faceMaskMat, faceRectInMask); if (faceRectInMask.width == 0 && faceRectInMask.height == 0) { RemoveFaceMask(); Debug.Log("A face could not be detected from the input image."); } } }
private void DetectInRegion(Mat img, Rect region, List <Rect> detectedObjectsInRegions, CascadeClassifier cascade) { Rect r0 = new Rect(new Point(), img.size()); Rect r1 = new Rect(region.x, region.y, region.width, region.height); Rect.inflate(r1, (int)((r1.width * coeffTrackingWindowSize) - r1.width) / 2, (int)((r1.height * coeffTrackingWindowSize) - r1.height) / 2); r1 = Rect.intersect(r0, r1); if ((r1.width <= 0) || (r1.height <= 0)) { Debug.Log("detectInRegion: Empty intersection"); return; } int d = Math.Min(region.width, region.height); d = (int)Math.Round(d * coeffObjectSizeToTrack); using (MatOfRect tmpobjects = new MatOfRect()) using (Mat img1 = new Mat(img, r1)) //subimage for rectangle -- without data copying { cascade.detectMultiScale(img1, tmpobjects, 1.1, 2, 0 | Objdetect.CASCADE_DO_CANNY_PRUNING | Objdetect.CASCADE_SCALE_IMAGE | Objdetect.CASCADE_FIND_BIGGEST_OBJECT, new Size(d, d), new Size()); Rect[] tmpobjectsArray = tmpobjects.toArray(); int len = tmpobjectsArray.Length; for (int i = 0; i < len; i++) { Rect tmp = tmpobjectsArray[i]; Rect r = new Rect(new Point(tmp.x + r1.x, tmp.y + r1.y), tmp.size()); detectedObjectsInRegions.Add(r); } } }
private void DetectInRegion(Mat img, Rect region, List <Rect> detectedObjectsInRegions, FaceLandmarkDetector landmarkDetector) { Rect r0 = new Rect(new Point(), img.size()); Rect r1 = new Rect(region.x, region.y, region.width, region.height); Rect.inflate(r1, (int)((r1.width * coeffTrackingWindowSize) - r1.width) / 2, (int)((r1.height * coeffTrackingWindowSize) - r1.height) / 2); r1 = Rect.intersect(r0, r1); if ((r1.width <= 0) || (r1.height <= 0)) { Debug.Log("detectInRegion: Empty intersection"); return; } using (Mat img1_roi = new Mat(img, r1)) using (Mat img1 = new Mat(r1.size(), img.type())) { img1_roi.copyTo(img1); OpenCVForUnityUtils.SetImage(landmarkDetector, img1); List <UnityEngine.Rect> detectResult = landmarkDetector.Detect(); int len = detectResult.Count; for (int i = 0; i < len; i++) { UnityEngine.Rect tmp = detectResult[i]; Rect r = new Rect((int)(tmp.x + r1.x), (int)(tmp.y + r1.y), (int)tmp.width, (int)tmp.height); detectedObjectsInRegions.Add(r); } } }
public virtual Texture2D UpdateLUTTex(int id, Mat src, Mat dst, List <Vector2> src_landmarkPoints, List <Vector2> dst_landmarkPoints) { if (src_mask != null && (src.width() != src_mask.width() || src.height() != src_mask.height())) { src_mask.Dispose(); src_mask = null; } src_mask = src_mask ?? new Mat(src.rows(), src.cols(), CvType.CV_8UC1, Scalar.all(0)); if (dst_mask != null && (dst.width() != dst_mask.width() || dst.height() != dst_mask.height())) { dst_mask.Dispose(); dst_mask = null; } dst_mask = dst_mask ?? new Mat(dst.rows(), dst.cols(), CvType.CV_8UC1, Scalar.all(0)); // Get facial contour points. GetFacialContourPoints(src_landmarkPoints, src_facialContourPoints); GetFacialContourPoints(dst_landmarkPoints, dst_facialContourPoints); // Get facial contour rect. Rect src_facialContourRect = Imgproc.boundingRect(new MatOfPoint(src_facialContourPoints)); Rect dst_facialContourRect = Imgproc.boundingRect(new MatOfPoint(dst_facialContourPoints)); src_facialContourRect = src_facialContourRect.intersect(new Rect(0, 0, src.width(), src.height())); dst_facialContourRect = dst_facialContourRect.intersect(new Rect(0, 0, dst.width(), dst.height())); Mat src_ROI = new Mat(src, src_facialContourRect); Mat dst_ROI = new Mat(dst, dst_facialContourRect); Mat src_mask_ROI = new Mat(src_mask, src_facialContourRect); Mat dst_mask_ROI = new Mat(dst_mask, dst_facialContourRect); GetPointsInFrame(src_mask_ROI, src_facialContourPoints, src_facialContourPoints); GetPointsInFrame(dst_mask_ROI, dst_facialContourPoints, dst_facialContourPoints); src_mask_ROI.setTo(new Scalar(0)); dst_mask_ROI.setTo(new Scalar(0)); Imgproc.fillConvexPoly(src_mask_ROI, new MatOfPoint(src_facialContourPoints), new Scalar(255)); Imgproc.fillConvexPoly(dst_mask_ROI, new MatOfPoint(dst_facialContourPoints), new Scalar(255)); Texture2D LUTTex; if (LUTTexDict.ContainsKey(id)) { LUTTex = LUTTexDict [id]; } else { LUTTex = new Texture2D(256, 1, TextureFormat.RGB24, false); LUTTexDict.Add(id, LUTTex); } FaceMaskShaderUtils.CalculateLUT(src_ROI, dst_ROI, src_mask_ROI, dst_mask_ROI, LUTTex); return(LUTTex); }
void DoProcess() { if (!(owner.Value is OpenCVForUnityPlayMakerActions.Rect)) { LogError("owner is not initialized. Add Action \"newRect\"."); return; } OpenCVForUnity.CoreModule.Rect wrapped_owner = OpenCVForUnityPlayMakerActionsUtils.GetWrappedObject <OpenCVForUnityPlayMakerActions.Rect, OpenCVForUnity.CoreModule.Rect>(owner); if (!(storeResult.Value is OpenCVForUnityPlayMakerActions.Rect)) { storeResult.Value = new OpenCVForUnityPlayMakerActions.Rect(); } ((OpenCVForUnityPlayMakerActions.Rect)storeResult.Value).wrappedObject = wrapped_owner.intersect(new OpenCVForUnity.CoreModule.Rect((int)rect_x.Value, (int)rect_y.Value, (int)rect_width.Value, (int)rect_height.Value)); }
private void UpdateTrackedObjects(List <Rect> detectedObjects) { int N1 = (int)trackedObjects.Count; int N2 = (int)detectedObjects.Count; for (int i = 0; i < N1; i++) { trackedObjects [i].numDetectedFrames++; } int[] correspondence = new int[N2]; for (int i = 0; i < N2; i++) { correspondence [i] = (int)TrackedState.NEW_RECTANGLE; } for (int i = 0; i < N1; i++) { TrackedObject curObject = trackedObjects [i]; int bestIndex = -1; int bestArea = -1; int numpositions = (int)curObject.lastPositions.Count; //if (numpositions > 0) UnityEngine.Debug.LogError("numpositions > 0 is false"); Rect prevRect = curObject.lastPositions [numpositions - 1]; for (int j = 0; j < N2; j++) { if (correspondence [j] >= 0) { //Debug.Log("DetectionBasedTracker::updateTrackedObjects: j=" + i + " is rejected, because it has correspondence=" + correspondence[j]); continue; } if (correspondence [j] != (int)TrackedState.NEW_RECTANGLE) { //Debug.Log("DetectionBasedTracker::updateTrackedObjects: j=" + j + " is rejected, because it is intersected with another rectangle"); continue; } Rect r = Rect.intersect(prevRect, detectedObjects [j]); if (r != null && (r.width > 0) && (r.height > 0)) { //LOGD("DetectionBasedTracker::updateTrackedObjects: There is intersection between prevRect and detectedRect, r={%d, %d, %d x %d}", // r.x, r.y, r.width, r.height); correspondence [j] = (int)TrackedState.INTERSECTED_RECTANGLE; if (r.area() > bestArea) { //LOGD("DetectionBasedTracker::updateTrackedObjects: The area of intersection is %d, it is better than bestArea=%d", r.area(), bestArea); bestIndex = j; bestArea = (int)r.area(); } } } if (bestIndex >= 0) { //LOGD("DetectionBasedTracker::updateTrackedObjects: The best correspondence for i=%d is j=%d", i, bestIndex); correspondence [bestIndex] = i; for (int j = 0; j < N2; j++) { if (correspondence [j] >= 0) { continue; } Rect r = Rect.intersect(detectedObjects [j], detectedObjects [bestIndex]); if (r != null && (r.width > 0) && (r.height > 0)) { //LOGD("DetectionBasedTracker::updateTrackedObjects: Found intersection between " // "rectangles j=%d and bestIndex=%d, rectangle j=%d is marked as intersected", j, bestIndex, j); correspondence [j] = (int)TrackedState.INTERSECTED_RECTANGLE; } } } else { //LOGD("DetectionBasedTracker::updateTrackedObjects: There is no correspondence for i=%d ", i); curObject.numFramesNotDetected++; } } //LOGD("DetectionBasedTracker::updateTrackedObjects: start second cycle"); for (int j = 0; j < N2; j++) { int i = correspondence [j]; if (i >= 0) //add position //Debug.Log("DetectionBasedTracker::updateTrackedObjects: add position"); { trackedObjects [i].lastPositions.Add(detectedObjects [j]); while ((int)trackedObjects [i].lastPositions.Count > (int)innerParameters.numLastPositionsToTrack) { trackedObjects [i].lastPositions.Remove(trackedObjects [i].lastPositions [0]); } trackedObjects [i].numFramesNotDetected = 0; } else if (i == (int)TrackedState.NEW_RECTANGLE) //new object //Debug.Log("DetectionBasedTracker::updateTrackedObjects: new object"); { trackedObjects.Add(new TrackedObject(detectedObjects [j])); } else { //Debug.Log ("DetectionBasedTracker::updateTrackedObjects: was auxiliary intersection"); } } int t = 0; TrackedObject it; while (t < trackedObjects.Count) { it = trackedObjects [t]; if ((it.numFramesNotDetected > parameters.maxTrackLifetime) || ((it.numDetectedFrames <= innerParameters.numStepsToWaitBeforeFirstShow) && (it.numFramesNotDetected > innerParameters.numStepsToTrackWithoutDetectingIfObjectHasNotBeenShown))) { //int numpos = (int)it.lastPositions.Count; //if (numpos > 0) UnityEngine.Debug.LogError("numpos > 0 is false"); //Rect r = it.lastPositions [numpos - 1]; //Debug.Log("DetectionBasedTracker::updateTrackedObjects: deleted object " + r.x + " " + r.y + " " + r.width + " " + r.height); trackedObjects.Remove(it); } else { t++; } } }
/// <summary> /// Raises the web cam texture to mat helper initialized event. /// </summary> public void OnWebCamTextureToMatHelperInitialized() { Debug.Log("OnWebCamTextureToMatHelperInitialized"); Mat webCamTextureMat = webCamTextureToMatHelper.GetMat(); #if WINDOWS_UWP && !DISABLE_HOLOLENSCAMSTREAM_API // HololensCameraStream always returns image data in BGRA format. texture = new Texture2D(webCamTextureMat.cols(), webCamTextureMat.rows(), TextureFormat.BGRA32, false); #else texture = new Texture2D(webCamTextureMat.cols(), webCamTextureMat.rows(), TextureFormat.RGBA32, false); #endif texture.wrapMode = TextureWrapMode.Clamp; Debug.Log("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation); processingAreaRect = new Rect((int)(webCamTextureMat.cols() * (outsideClippingRatio.x - clippingOffset.x)), (int)(webCamTextureMat.rows() * (outsideClippingRatio.y + clippingOffset.y)), (int)(webCamTextureMat.cols() * (1f - outsideClippingRatio.x * 2)), (int)(webCamTextureMat.rows() * (1f - outsideClippingRatio.y * 2))); processingAreaRect = processingAreaRect.intersect(new Rect(0, 0, webCamTextureMat.cols(), webCamTextureMat.rows())); dstMat = new Mat(webCamTextureMat.rows(), webCamTextureMat.cols(), CvType.CV_8UC1); dstMatClippingROI = new Mat(dstMat, processingAreaRect); // fill all black. //Imgproc.rectangle (dstMat, new Point (0, 0), new Point (dstMat.width (), dstMat.height ()), new Scalar (0, 0, 0, 0), -1); grayMat = new Mat(dstMatClippingROI.rows(), dstMatClippingROI.cols(), CvType.CV_8UC1); lineMat = new Mat(dstMatClippingROI.rows(), dstMatClippingROI.cols(), CvType.CV_8UC1); maskMat = new Mat(dstMatClippingROI.rows(), dstMatClippingROI.cols(), CvType.CV_8UC1); //create a striped background. bgMat = new Mat(dstMatClippingROI.rows(), dstMatClippingROI.cols(), CvType.CV_8UC1, new Scalar(255)); for (int i = 0; i < bgMat.rows() * 2.5f; i = i + 4) { Imgproc.line(bgMat, new Point(0, 0 + i), new Point(bgMat.cols(), -bgMat.cols() + i), new Scalar(0), 1); } grayPixels = new byte[grayMat.cols() * grayMat.rows() * grayMat.channels()]; maskPixels = new byte[maskMat.cols() * maskMat.rows() * maskMat.channels()]; quad_renderer = gameObject.GetComponent <Renderer> () as Renderer; quad_renderer.sharedMaterial.SetTexture("_MainTex", texture); quad_renderer.sharedMaterial.SetVector("_VignetteOffset", new Vector4(clippingOffset.x, clippingOffset.y)); Matrix4x4 projectionMatrix; #if WINDOWS_UWP && !DISABLE_HOLOLENSCAMSTREAM_API projectionMatrix = webCamTextureToMatHelper.GetProjectionMatrix(); quad_renderer.sharedMaterial.SetMatrix("_CameraProjectionMatrix", projectionMatrix); #else //This value is obtained from PhotoCapture's TryGetProjectionMatrix() method.I do not know whether this method is good. //Please see the discussion of this thread.Https://forums.hololens.com/discussion/782/live-stream-of-locatable-camera-webcam-in-unity projectionMatrix = Matrix4x4.identity; projectionMatrix.m00 = 2.31029f; projectionMatrix.m01 = 0.00000f; projectionMatrix.m02 = 0.09614f; projectionMatrix.m03 = 0.00000f; projectionMatrix.m10 = 0.00000f; projectionMatrix.m11 = 4.10427f; projectionMatrix.m12 = -0.06231f; projectionMatrix.m13 = 0.00000f; projectionMatrix.m20 = 0.00000f; projectionMatrix.m21 = 0.00000f; projectionMatrix.m22 = -1.00000f; projectionMatrix.m23 = 0.00000f; projectionMatrix.m30 = 0.00000f; projectionMatrix.m31 = 0.00000f; projectionMatrix.m32 = -1.00000f; projectionMatrix.m33 = 0.00000f; quad_renderer.sharedMaterial.SetMatrix("_CameraProjectionMatrix", projectionMatrix); #endif quad_renderer.sharedMaterial.SetFloat("_VignetteScale", vignetteScale); float halfOfVerticalFov = Mathf.Atan(1.0f / projectionMatrix.m11); float aspectRatio = (1.0f / Mathf.Tan(halfOfVerticalFov)) / projectionMatrix.m00; Debug.Log("halfOfVerticalFov " + halfOfVerticalFov); Debug.Log("aspectRatio " + aspectRatio); // //Imgproc.rectangle (dstMat, new Point (0, 0), new Point (webCamTextureMat.width (), webCamTextureMat.height ()), new Scalar (126, 126, 126, 255), -1); // }