Beispiel #1
0
    public void takePhoto()
    {
        byte[]            bytes = ReturnTextureAsBytes(webcamTexture);
        Image <Rgb, byte> outputImage;

        activePicture = TakePicture(bytes);
        MemoryStream memoryStream = new MemoryStream(bytes);
        Bitmap       bmp          = new Bitmap(memoryStream);

        outputImage = new Image <Rgb, byte>(bmp);

        // Write to console the amount of BLOBs found
        Debug.Log(activePicture.returnBlobCentres().Count);

        if (activePicture.returnBlobCentres().Count == 3)
        {
            place = new MoustachePlacement(activePicture.returnBlobCentres(), activePicture.returnBlobCentres());
            place.SetMoustacheLocation();
            place.SetXRotationNoReference();

            // Write to console the location and rotation of the BLOBs
            Debug.Log("Position: " + place.GetLocation() + " xRotation: " + place.GetXRotation());
        }

        // Draw a dot on the BLOB centres
        foreach (Point center in activePicture.returnBlobCentres())
        {
            for (int i = 0; i < 5; i++)
            {
                for (int j = 0; j < 5; j++)
                {
                    outputImage[center.Y - i, center.X - j] = new Rgb(0, 0, 255);
                }
            }
        }

        //Rotate the canvas so the image is not upside-down and mirrored
        GetComponent <RectTransform>().Rotate(new Vector3(0, 180, 180));

        //Set the size of the canvas to the same as the image.
        GetComponent <RectTransform>().sizeDelta = new Vector2(outputImage.Width, outputImage.Height);

        //Apply the image taken to the canvas.
        Texture2D tex = ReturnAsTexture(outputImage);

        rawImage.texture = tex;
        rawImage.material.mainTexture = tex;
        webcamActive = false;


        firstPress++;
        outputImage.Save(PhotoName());
    }
Beispiel #2
0
    // Taking a picture
    public BlobDetection TakePicture(byte[] bytes)
    {
        // Go through the skincolor segmentation
        SkinColorSegmentation scs            = new SkinColorSegmentation(bytes);
        Image <Gray, byte>    segmentedImage = scs.GetSkinRegion();

        segmentedImage = segmentedImage.ThresholdBinaryInv(new Gray(150), new Gray(255));

        // Go through the edge detection
        EdgeDetection ed = new EdgeDetection(segmentedImage);

        ed.DetectEdges();

        // Go through the BLOB detection
        BlobDetection blobDetector = new BlobDetection(ed.detectedEdges);

        blobDetector.DetectBlobs();

        return(blobDetector);
    }