/// <summary>
        /// Recognizes the markers.
        /// </summary>
        /// <param name="grayscale">Grayscale.</param>
        /// <param name="detectedMarkers">Detected markers.</param>
        void recognizeMarkers(Mat grayscale, List <Marker> detectedMarkers)
        {
            List <Marker> goodMarkers = new List <Marker>();

            // Identify the markers
            for (int i = 0; i < detectedMarkers.Count; i++)
            {
                Marker marker = detectedMarkers[i];

                // Find the perspective transformation that brings current marker to rectangular form
                Mat markerTransform = Imgproc.getPerspectiveTransform(new MatOfPoint2f(marker.points.toArray()), m_markerCorners2d);

                // Transform image to get a canonical marker image
                Imgproc.warpPerspective(grayscale, canonicalMarkerImage, markerTransform, markerSize);

                for (int p = 0; p < m_markerDesigns.Count; p++)
                {
                    MatOfInt nRotations = new MatOfInt(0);
                    int      id         = Marker.getMarkerId(canonicalMarkerImage, nRotations, m_markerDesigns[p]);
                    if (id != -1)
                    {
                        marker.id = id;

                        //sort the points so that they are always in the same order no matter the camera orientation
                        List <Point> MarkerPointsList = marker.points.toList();

                        //std::rotate(marker.points.begin(), marker.points.begin() + 4 - nRotations, marker.points.end());
                        MarkerPointsList = MarkerPointsList.Skip(4 - nRotations.toArray()[0]).Concat(MarkerPointsList.Take(4 - nRotations.toArray()[0])).ToList();

                        marker.points.fromList(MarkerPointsList);

                        goodMarkers.Add(marker);
                    }
                    nRotations.Dispose();
                }
            }

            // Refine marker corners using sub pixel accuracy
            if (goodMarkers.Count > 0)
            {
                List <Point> preciseCornersPoint = new List <Point>(4 * goodMarkers.Count);
                for (int i = 0; i < preciseCornersPoint.Capacity; i++)
                {
                    preciseCornersPoint.Add(new Point(0, 0));
                }

                for (int i = 0; i < goodMarkers.Count; i++)
                {
                    Marker marker = goodMarkers[i];

                    List <Point> markerPointsList = marker.points.toList();

                    for (int c = 0; c < 4; c++)
                    {
                        preciseCornersPoint[i * 4 + c] = markerPointsList[c];
                    }
                }

                MatOfPoint2f preciseCorners = new MatOfPoint2f(preciseCornersPoint.ToArray());

                TermCriteria termCriteria = new TermCriteria(TermCriteria.MAX_ITER | TermCriteria.EPS, 30, 0.01);
                Imgproc.cornerSubPix(grayscale, preciseCorners, new Size(5, 5), new Size(-1, -1), termCriteria);

                preciseCornersPoint = preciseCorners.toList();

                // Copy refined corners position back to markers
                for (int i = 0; i < goodMarkers.Count; i++)
                {
                    Marker marker = goodMarkers[i];

                    List <Point> markerPointsList = marker.points.toList();

                    for (int c = 0; c < 4; c++)
                    {
                        markerPointsList[c] = preciseCornersPoint[i * 4 + c];
                    }
                }
                preciseCorners.Dispose();
            }

            detectedMarkers.Clear();
            detectedMarkers.AddRange(goodMarkers);
        }
		/// <summary>
		/// Recognizes the markers.
		/// </summary>
		/// <param name="grayscale">Grayscale.</param>
		/// <param name="detectedMarkers">Detected markers.</param>
		void recognizeMarkers (Mat grayscale, List<Marker> detectedMarkers)
		{
				List<Marker> goodMarkers = new List<Marker> ();
		
				// Identify the markers
				for (int i=0; i<detectedMarkers.Count; i++) {
						Marker marker = detectedMarkers [i];

			
						// Find the perspective transformation that brings current marker to rectangular form
						Mat markerTransform = Imgproc.getPerspectiveTransform (new MatOfPoint2f (marker.points.toArray ()), m_markerCorners2d);
				

						// Transform image to get a canonical marker image
						Imgproc.warpPerspective (grayscale, canonicalMarkerImage, markerTransform, markerSize);
			
						MatOfInt nRotations = new MatOfInt (0);
						int id = Marker.getMarkerId (canonicalMarkerImage, nRotations, m_markerDesign);
						if (id != - 1) {
								marker.id = id;
//				                Debug.Log ("id " + id);

								//sort the points so that they are always in the same order no matter the camera orientation
								List<Point> MarkerPointsList = marker.points.toList ();

								//				std::rotate(marker.points.begin(), marker.points.begin() + 4 - nRotations, marker.points.end());
								MarkerPointsList = MarkerPointsList.Skip (4 - nRotations.toArray () [0]).Concat (MarkerPointsList.Take (4 - nRotations.toArray () [0])).ToList ();

								marker.points.fromList (MarkerPointsList);
				
								goodMarkers.Add (marker);
						}
						nRotations.Dispose ();
				}

//				Debug.Log ("goodMarkers " + goodMarkers.Count);
		
				// Refine marker corners using sub pixel accuracy
				if (goodMarkers.Count > 0) {
						List<Point> preciseCornersPoint = new List<Point> (4 * goodMarkers.Count);
						for (int i = 0; i < preciseCornersPoint.Capacity; i++) {
								preciseCornersPoint.Add (new Point (0, 0));
						}
						

			
						for (int i=0; i<goodMarkers.Count; i++) {
								Marker marker = goodMarkers [i];

								List<Point> markerPointsList = marker.points.toList ();
				
								for (int c = 0; c <4; c++) {
										preciseCornersPoint [i * 4 + c] = markerPointsList [c];
								}
						}

						MatOfPoint2f preciseCorners = new MatOfPoint2f (preciseCornersPoint.ToArray ());

						TermCriteria termCriteria = new TermCriteria (TermCriteria.MAX_ITER | TermCriteria.EPS, 30, 0.01);
						Imgproc.cornerSubPix (grayscale, preciseCorners, new Size (5, 5), new Size (-1, -1), termCriteria);

						preciseCornersPoint = preciseCorners.toList ();
			
						// Copy refined corners position back to markers
						for (int i=0; i<goodMarkers.Count; i++) {
								Marker marker = goodMarkers [i];

								List<Point> markerPointsList = marker.points.toList ();
				
								for (int c=0; c<4; c++) {
										markerPointsList [c] = preciseCornersPoint [i * 4 + c];
								}
						}
						preciseCorners.Dispose ();
				}

				detectedMarkers.Clear ();
				detectedMarkers.AddRange (goodMarkers);

		}
        protected override void postprocess(Mat frame, List <Mat> outs, Net net)
        {
            List <int>     classIdsList    = new List <int>();
            List <float>   confidencesList = new List <float>();
            List <Rect2d>  boxesList       = new List <Rect2d>();
            List <Point[]> pointsList      = new List <Point[]>();

            if (outs.Count == 2)
            {
                // reshape mat : outs[0]:[1, x, 4] to [x, 4], outs[1]:[1, x, 2] to [x, 2]
                Mat boxes_m  = outs[0].reshape(1, new int[] { outs[0].size(1), outs[0].size(2) });
                Mat scores_m = outs[1].reshape(1, new int[] { outs[1].size(1), outs[1].size(2) });

                //Debug.Log("boxes_m: " + boxes_m);
                //Debug.Log("scores_m: " + scores_m);
                //Debug.Log("priors: " + priors);

                convertLocationsToBoxes(boxes_m, priors, 0.1f, 0.2f);
                centerFormToCornerForm(boxes_m);

                Mat     boxes_0_4 = new Mat(boxes_m, new Range(0, boxes_m.rows()), new Range(0, 4));
                float[] boxes_arr = new float[boxes_0_4.rows() * boxes_0_4.cols()];
                MatUtils.copyFromMat(boxes_0_4, boxes_arr);

                Mat     scores_1_2      = new Mat(scores_m, new Range(0, scores_m.rows()), new Range(1, 2));
                float[] confidences_arr = new float[scores_1_2.rows()];
                MatUtils.copyFromMat(scores_1_2, confidences_arr);

                for (int i = 0; i < boxes_m.rows(); i++)
                {
                    float confidence = confidences_arr[i];

                    if (confidence > confThreshold)
                    {
                        int boxes_index = i * 4;

                        float left   = boxes_arr[boxes_index] * frame.cols();
                        float top    = boxes_arr[boxes_index + 1] * frame.rows();
                        float right  = boxes_arr[boxes_index + 2] * frame.cols();
                        float bottom = boxes_arr[boxes_index + 3] * frame.rows();
                        float width  = right - left + 1f;
                        float height = bottom - top + 1f;

                        classIdsList.Add(0);
                        confidencesList.Add(confidence);
                        boxesList.Add(new Rect2d(left, top, width, height));
                    }
                }

                if (boxes_m.cols() > 4 && boxes_m.cols() % 2 == 0)
                {
                    Mat     points     = new Mat(boxes_m, new Range(0, boxes_m.rows()), new Range(4, boxes_m.cols()));
                    float[] points_arr = new float[points.rows() * points.cols()];
                    MatUtils.copyFromMat(points, points_arr);

                    for (int i = 0; i < boxes_m.rows(); i++)
                    {
                        float confidence = confidences_arr[i];

                        if (confidence > confThreshold)
                        {
                            int points_index = i * points.cols();

                            Point[] p_arr = new Point[points.cols() / 2];
                            for (int index = 0; index < points.cols() / 2; index++)
                            {
                                float x = points_arr[points_index + index * 2] * frame.cols();
                                float y = points_arr[points_index + index * 2 + 1] * frame.rows();
                                p_arr[index] = new Point(x, y);
                            }
                            pointsList.Add(p_arr);
                        }
                    }
                }
            }

            MatOfRect2d boxes = new MatOfRect2d();

            boxes.fromList(boxesList);

            MatOfFloat confidences = new MatOfFloat();

            confidences.fromList(confidencesList);

            MatOfInt indices = new MatOfInt();

            Dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices);

            //Debug.Log("indices.dump () " + indices.dump());
            //Debug.Log ("indices.ToString () "+indices.ToString());

            for (int i = 0; i < indices.total(); ++i)
            {
                int    idx = (int)indices.get(i, 0)[0];
                Rect2d box = boxesList[idx];
                drawPred(classIdsList[idx], confidencesList[idx], box.x, box.y,
                         box.x + box.width, box.y + box.height, frame);

                if (pointsList.Count > 0)
                {
                    drawPredPoints(pointsList[idx], frame);
                }
            }

            indices.Dispose();
            boxes.Dispose();
            confidences.Dispose();
        }
    /// <summary>
    /// Postprocess the specified frame, outs and net.
    /// </summary>
    /// <param name="frame">Frame.</param>
    /// <param name="outs">Outs.</param>
    /// <param name="net">Net.</param>
    private void postprocess(Mat frame, List <Mat> outs, Net net)
    {
        string outLayerType = outBlobTypes[0];

        List <int>   classIdsList    = new List <int>();
        List <float> confidencesList = new List <float>();
        List <OpenCVForUnity.CoreModule.Rect> boxesList = new List <OpenCVForUnity.CoreModule.Rect>();

        if (net.getLayer(new DictValue(0)).outputNameToIndex("im_info") != -1)
        {
            // Faster-RCNN or R-FCN
            // Network produces output blob with a shape 1x1xNx7 where N is a number of
            // detections and an every detection is a vector of values
            // [batchId, classId, confidence, left, top, right, bottom]

            if (outs.Count == 1)
            {
                outs[0] = outs[0].reshape(1, (int)outs[0].total() / 7);

                //Debug.Log ("outs[i].ToString() " + outs [0].ToString ());

                float[] data = new float[7];

                for (int i = 0; i < outs[0].rows(); i++)
                {
                    outs[0].get(i, 0, data);

                    float confidence = data[2];

                    if (confidence > confThreshold)
                    {
                        int class_id = (int)(data[1]);

                        int left   = (int)(data[3] * frame.cols());
                        int top    = (int)(data[4] * frame.rows());
                        int right  = (int)(data[5] * frame.cols());
                        int bottom = (int)(data[6] * frame.rows());
                        int width  = right - left + 1;
                        int height = bottom - top + 1;

                        classIdsList.Add((int)(class_id) - 0);
                        confidencesList.Add((float)confidence);
                        boxesList.Add(new OpenCVForUnity.CoreModule.Rect(left, top, width, height));
                    }
                }
            }
        }
        else if (outLayerType == "DetectionOutput")
        {
            // Network produces output blob with a shape 1x1xNx7 where N is a number of
            // detections and an every detection is a vector of values
            // [batchId, classId, confidence, left, top, right, bottom]

            if (outs.Count == 1)
            {
                outs[0] = outs[0].reshape(1, (int)outs[0].total() / 7);

                //Debug.Log ("outs[i].ToString() " + outs [0].ToString ());

                float[] data = new float[7];

                for (int i = 0; i < outs[0].rows(); i++)
                {
                    outs[0].get(i, 0, data);

                    float confidence = data[2];

                    if (confidence > confThreshold)
                    {
                        int class_id = (int)(data[1]);

                        int left   = (int)(data[3] * frame.cols());
                        int top    = (int)(data[4] * frame.rows());
                        int right  = (int)(data[5] * frame.cols());
                        int bottom = (int)(data[6] * frame.rows());
                        int width  = right - left + 1;
                        int height = bottom - top + 1;

                        classIdsList.Add((int)(class_id) - 0);
                        confidencesList.Add((float)confidence);
                        boxesList.Add(new OpenCVForUnity.CoreModule.Rect(left, top, width, height));
                    }
                }
            }
        }
        else if (outLayerType == "Region")
        {
            for (int i = 0; i < outs.Count; ++i)
            {
                // Network produces output blob with a shape NxC where N is a number of
                // detected objects and C is a number of classes + 4 where the first 4
                // numbers are [center_x, center_y, width, height]

                //Debug.Log ("outs[i].ToString() "+outs[i].ToString());

                float[] positionData   = new float[5];
                float[] confidenceData = new float[outs[i].cols() - 5];

                for (int p = 0; p < outs[i].rows(); p++)
                {
                    outs[i].get(p, 0, positionData);

                    outs[i].get(p, 5, confidenceData);

                    int   maxIdx     = confidenceData.Select((val, idx) => new { V = val, I = idx }).Aggregate((max, working) => (max.V > working.V) ? max : working).I;
                    float confidence = confidenceData[maxIdx];

                    if (confidence > confThreshold)
                    {
                        int centerX = (int)(positionData[0] * frame.cols());
                        int centerY = (int)(positionData[1] * frame.rows());
                        int width   = (int)(positionData[2] * frame.cols());
                        int height  = (int)(positionData[3] * frame.rows());
                        int left    = centerX - width / 2;
                        int top     = centerY - height / 2;

                        classIdsList.Add(maxIdx);
                        confidencesList.Add((float)confidence);
                        boxesList.Add(new OpenCVForUnity.CoreModule.Rect(left, top, width, height));
                    }
                }
            }
        }
        else
        {
            Debug.Log("Unknown output layer type: " + outLayerType);
        }


        MatOfRect boxes = new MatOfRect();

        boxes.fromList(boxesList);
        detectionBoxes.AddRange(boxesList);

        MatOfFloat confidences = new MatOfFloat();

        confidences.fromList(confidencesList);


        MatOfInt indices = new MatOfInt();

        Dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices);

        for (int i = 0; i < indices.total(); ++i)
        {
            int idx = (int)indices.get(i, 0)[0];
            if (classNames != null)
            {
                modelOutput.Add(new KeyValuePair <string, float>(classNames[classIdsList[idx]], confidencesList[idx]));
            }

            if (gameObject.GetComponent <Renderer>() != null)
            {
                OpenCVForUnity.CoreModule.Rect box = boxesList[idx];
                drawPred(classIdsList[idx], confidencesList[idx], box.x, box.y,
                         box.x + box.width, box.y + box.height, frame);
            }
        }


        indices.Dispose();
        boxes.Dispose();
        confidences.Dispose();
    }
示例#5
0
        /// <summary>
        /// Process
        /// </summary>
        /// <returns></returns>
        private async void Process()
        {
            float DOWNSCALE_RATIO = 1.0f;

            while (true)
            {
                // Check TaskCancel
                if (tokenSource.Token.IsCancellationRequested)
                {
                    break;
                }

                rgbaMat = webCamTextureToMatHelper.GetMat();
                // Debug.Log ("rgbaMat.ToString() " + rgbaMat.ToString ());

                Mat downScaleRgbaMat = null;
                DOWNSCALE_RATIO = 1.0f;
                if (enableDownScale)
                {
                    downScaleRgbaMat = imageOptimizationHelper.GetDownScaleMat(rgbaMat);
                    DOWNSCALE_RATIO  = imageOptimizationHelper.downscaleRatio;
                }
                else
                {
                    downScaleRgbaMat = rgbaMat;
                    DOWNSCALE_RATIO  = 1.0f;
                }
                Imgproc.cvtColor(downScaleRgbaMat, bgrMat, Imgproc.COLOR_RGBA2BGR);



                await Task.Run(() =>
                {
                    // detect faces on the downscale image
                    if (!enableSkipFrame || !imageOptimizationHelper.IsCurrentFrameSkipped())
                    {
                        if (net == null)
                        {
                            Imgproc.putText(rgbaMat, "model file is not loaded.", new Point(5, rgbaMat.rows() - 30), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255, 255), 2, Imgproc.LINE_AA, false);
                            Imgproc.putText(rgbaMat, "Please read console message.", new Point(5, rgbaMat.rows() - 10), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255, 255), 2, Imgproc.LINE_AA, false);
                        }
                        else
                        {
                            // Create a 4D blob from a frame.
                            Size inpSize = new Size(inpWidth > 0 ? inpWidth : bgrMat.cols(),
                                                    inpHeight > 0 ? inpHeight : bgrMat.rows());
                            Mat blob = Dnn.blobFromImage(bgrMat, scale, inpSize, mean, swapRB, false);


                            // Run a model.
                            net.setInput(blob);

                            if (net.getLayer(new DictValue(0)).outputNameToIndex("im_info") != -1)
                            {  // Faster-RCNN or R-FCN
                                Imgproc.resize(bgrMat, bgrMat, inpSize);
                                Mat imInfo = new Mat(1, 3, CvType.CV_32FC1);
                                imInfo.put(0, 0, new float[] {
                                    (float)inpSize.height,
                                    (float)inpSize.width,
                                    1.6f
                                });
                                net.setInput(imInfo, "im_info");
                            }


                            TickMeter tm = new TickMeter();
                            tm.start();

                            List <Mat> outs = new List <Mat>();
                            net.forward(outs, outBlobNames);

                            tm.stop();
                            //                    Debug.Log ("Inference time, ms: " + tm.getTimeMilli ());


                            postprocess(bgrMat, outs, net);

                            for (int i = 0; i < outs.Count; i++)
                            {
                                outs[i].Dispose();
                            }
                            blob.Dispose();


                            if (enableDownScale)
                            {
                                for (int i = 0; i < _boxesList.Count; ++i)
                                {
                                    var rect      = _boxesList[i];
                                    _boxesList[i] = new OpenCVForUnity.CoreModule.Rect(
                                        (int)(rect.x * DOWNSCALE_RATIO),
                                        (int)(rect.y * DOWNSCALE_RATIO),
                                        (int)(rect.width * DOWNSCALE_RATIO),
                                        (int)(rect.height * DOWNSCALE_RATIO));
                                }
                            }
                        }


                        //Imgproc.rectangle(rgbaMat, new Point(0, 0), new Point(rgbaMat.width(), rgbaMat.height()), new Scalar(0, 0, 0, 0), -1);


                        MatOfRect boxes = new MatOfRect();
                        boxes.fromList(_boxesList);

                        MatOfFloat confidences = new MatOfFloat();
                        confidences.fromList(_confidencesList);


                        MatOfInt indices = new MatOfInt();
                        Dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices);

                        //            Debug.Log ("indices.dump () "+indices.dump ());
                        //            Debug.Log ("indices.ToString () "+indices.ToString());

                        for (int i = 0; i < indices.total(); ++i)
                        {
                            int idx = (int)indices.get(i, 0)[0];
                            OpenCVForUnity.CoreModule.Rect box = _boxesList[idx];
                            drawPred(_classIdsList[idx], _confidencesList[idx], box.x, box.y,
                                     box.x + box.width, box.y + box.height, rgbaMat);
                        }

                        indices.Dispose();
                        boxes.Dispose();
                        confidences.Dispose();
                    }
                });



                Utils.fastMatToTexture2D(rgbaMat, texture);


                Thread.Sleep(10);
            }
        }
示例#6
0
        /// <summary>
        /// Postprocess the specified frame, outs and net.
        /// </summary>
        /// <param name="frame">Frame.</param>
        /// <param name="outs">Outs.</param>
        /// <param name="net">Net.</param>
        private void postprocess(Mat frame, List <Mat> outs, Net net)
        {
            string       outLayerType    = outBlobTypes[0];
            List <int>   classIdsList    = new List <int>();
            List <float> confidencesList = new List <float>();
            List <OpenCVForUnity.CoreModule.Rect> boxesList = new List <OpenCVForUnity.CoreModule.Rect>();

            if (outLayerType == "Region")
            {
                for (int i = 0; i < outs.Count; ++i)
                {
                    // Network produces output blob with a shape NxC where N is a number of
                    // detected objects and C is a number of classes + 4 where the first 4
                    // numbers are [center_x, center_y, width, height]

                    //Debug.Log("outs[i].ToString() " + outs[i].ToString());

                    float[] positionData   = new float[5];
                    float[] confidenceData = new float[outs[i].cols() - 5];

                    for (int p = 0; p < outs[i].rows(); p++)
                    {
                        outs[i].get(p, 0, positionData);

                        outs[i].get(p, 5, confidenceData);

                        int   maxIdx     = confidenceData.Select((val, idx) => new { V = val, I = idx }).Aggregate((max, working) => (max.V > working.V) ? max : working).I;
                        float confidence = confidenceData[maxIdx];

                        if (confidence > confThreshold)
                        {
                            int centerX = (int)(positionData[0] * frame.cols());
                            int centerY = (int)(positionData[1] * frame.rows());
                            int width   = (int)(positionData[2] * frame.cols());
                            int height  = (int)(positionData[3] * frame.rows());
                            int left    = centerX - width / 2;
                            int top     = centerY - height / 2;

                            classIdsList.Add(maxIdx);
                            confidencesList.Add((float)confidence);
                            boxesList.Add(new OpenCVForUnity.CoreModule.Rect(left, top, width, height));
                        }
                    }
                }
            }
            else
            {
                Debug.Log("Unknown output layer type: " + outLayerType);
            }


            MatOfRect boxes = new MatOfRect();

            boxes.fromList(boxesList);

            MatOfFloat confidences = new MatOfFloat();

            confidences.fromList(confidencesList);


            MatOfInt indices = new MatOfInt();

            Dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices);
            //Check the Language selected
            switch (menuVariables.GetLanguage())
            {
            case "EN":
                vocOffset = 0;
                break;

            case "ES":
                vocOffset = 80;
                break;

            case "FR":
                vocOffset = 160;
                break;

            case "DE":
                vocOffset = 240;
                break;

            case "IT":
                vocOffset = 320;
                break;

            default:
                vocOffset = 0;
                break;
            }
            //Draw the bouding box only if its in the center of the image (On Cursor)
            for (int i = 0; i < indices.total(); ++i)
            {
                int idx = (int)indices.get(i, 0)[0];
                OpenCVForUnity.CoreModule.Rect box = boxesList[idx];
                if (isOnCursor(box, cursorObject.GetComponent <Cursor>()))
                {
                    if (minigameList[wordFoundCounter] == classIdsList[idx])
                    {
                        drawPred(vocOffset + classIdsList[idx], confidencesList[idx], box.x, box.y,
                                 box.x + box.width, box.y + box.height, frame);
                        //Update the text summarizing the object encountered
                        vocIDList.Add(classIdsList[idx]);
                        //vocLearn.text += classNames[classIdsList[idx]] + "\t" + classNames[240 + classIdsList[idx]] + "\t" + classNames[160 + classIdsList[idx]] + "\t" + classNames[320 + classIdsList[idx]] + "\n";
                        EnglishText.text += "\n" + classNames[classIdsList[idx]];
                        SpanishText.text += "\n" + classNames[80 + classIdsList[idx]];
                        FrenchText.text  += "\n" + classNames[160 + classIdsList[idx]];
                        GermanText.text  += "\n" + classNames[240 + classIdsList[idx]];
                        ItalianText.text += "\n" + classNames[320 + classIdsList[idx]];
                        wordFound         = true;
                        Debug.Log("You found the" + classNames[classIdsList[idx]]);
                    }
                }
            }
            indices.Dispose();
            boxes.Dispose();
            confidences.Dispose();
        }
示例#7
0
        /// <summary>
        /// Scanning the specified frame, outs and net.
        /// </summary>
        /// <param name="frame">Frame.</param>
        /// <param name="outs">Outs.</param>
        /// <param name="net">Net.</param>
        private void postscan(Mat frame, List <Mat> outs, Net net)
        {
            string       outLayerType    = outBlobTypes[0];
            List <int>   classIdsList    = new List <int>();
            List <float> confidencesList = new List <float>();
            List <OpenCVForUnity.CoreModule.Rect> boxesList = new List <OpenCVForUnity.CoreModule.Rect>();

            if (outLayerType == "Region")
            {
                for (int i = 0; i < outs.Count; ++i)
                {
                    // Network produces output blob with a shape NxC where N is a number of
                    // detected objects and C is a number of classes + 4 where the first 4
                    // numbers are [center_x, center_y, width, height]

                    //Debug.Log("outs[i].ToString() " + outs[i].ToString());

                    float[] positionData   = new float[5];
                    float[] confidenceData = new float[outs[i].cols() - 5];

                    for (int p = 0; p < outs[i].rows(); p++)
                    {
                        outs[i].get(p, 0, positionData);

                        outs[i].get(p, 5, confidenceData);

                        int   maxIdx     = confidenceData.Select((val, idx) => new { V = val, I = idx }).Aggregate((max, working) => (max.V > working.V) ? max : working).I;
                        float confidence = confidenceData[maxIdx];

                        if (confidence > confThreshold)
                        {
                            int centerX = (int)(positionData[0] * frame.cols());
                            int centerY = (int)(positionData[1] * frame.rows());
                            int width   = (int)(positionData[2] * frame.cols());
                            int height  = (int)(positionData[3] * frame.rows());
                            int left    = centerX - width / 2;
                            int top     = centerY - height / 2;

                            classIdsList.Add(maxIdx);
                            confidencesList.Add((float)confidence);
                            boxesList.Add(new OpenCVForUnity.CoreModule.Rect(left, top, width, height));
                        }
                    }
                }
            }
            else
            {
                Debug.Log("Unknown output layer type: " + outLayerType);
            }


            MatOfRect boxes = new MatOfRect();

            boxes.fromList(boxesList);

            MatOfFloat confidences = new MatOfFloat();

            confidences.fromList(confidencesList);


            MatOfInt indices = new MatOfInt();

            Dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices);

            //for-loop for the mini game - if a new class appears, add it to the
            for (int i = 0; i < indices.total(); ++i)
            {
                int idx = (int)indices.get(i, 0)[0];

                if (!minigameList.Contains(classIdsList[idx]))
                {
                    Debug.Log(classNames[classIdsList[idx]]);
                    minigameList.Add(classIdsList[idx]);
                    if (minigameList.Count() > 1)
                    {
                        wordDisplay.text = minigameList.Count().ToString() + " words";
                    }
                    else
                    {
                        wordDisplay.text = minigameList.Count().ToString() + " word";
                    }
                }
            }
            indices.Dispose();
            boxes.Dispose();
            confidences.Dispose();
        }