public CIImage GaussianGradient() { var centerVector = new CIVector(100, 100); // Default is [150 150] var color1 = CIColor.FromRgba(1, 0, 1, 1); var color0 = CIColor.FromRgba(0, 1, 1, 1); var gaussGradient = new CIGaussianGradient() { Center = centerVector, Color0 = color0, Color1 = color1, Radius = 280f // Default is 300 }; return(Crop(gaussGradient)); }
/// <summary> /// Gets called by the VideoFrameSamplerDelegate if a new image has been captured. Does the rectangle detection. /// </summary> /// <param name="sender">Sender.</param> /// <param name="e">Event arguments</param> void HandleImageCaptured(object sender, ImageCaptureEventArgs e) { // Detect the rectangles in the captured image. // Important: case CGImage to CIImage. There is an implicit cast operator from CGImage to CIImage, but if we // pass the CGImage in to FeaturesInImage(), many many (implicit) CIImage instance will be created because this // method is called very often. The garbage collector cannot keep up with that and we runn out of memory. // By casting manually and using() the CIImage, it will be disposed immediately, freeing up memory. using (CIImage inputCIImage = (CIImage)e.Image) { // Let the detector do its work on the image. var rectangles = detector.FeaturesInImage(inputCIImage); // Find the biggest rectangle. Note: in my tests I have never seen that more than one rectangle would be detected, but better be prepared. nfloat maxWidth = 0f; nfloat maxHeight = 0f; CIRectangleFeature biggestRect = rectangles.Length > 0 ? (CIRectangleFeature)rectangles [0] : null; Console.WriteLine("Found " + rectangles.Length + " rectangles."); foreach (CIRectangleFeature rect in rectangles) { Console.WriteLine("Found rect: " + rect); nfloat minX = (nfloat)Math.Min(rect.TopLeft.X, rect.BottomLeft.X); nfloat minY = (nfloat)Math.Min(rect.TopLeft.Y, rect.TopRight.Y); nfloat maxX = (nfloat)Math.Min(rect.TopRight.X, rect.BottomRight.X); nfloat maxY = (nfloat)Math.Min(rect.BottomLeft.Y, rect.BottomRight.Y); if (maxX - minX > maxWidth && maxY - minY > maxHeight) { maxWidth = maxX - minX; maxHeight = maxY - minY; biggestRect = rect; } } if (biggestRect == null) { this.InvokeOnMainThread(() => { this.imageViewOverlay.Image = null; this.imageViewPerspective.Image = null; }); return; } Console.WriteLine("Highlighting: top left = " + biggestRect.TopLeft + "; top right = " + biggestRect.TopRight + "; bottom left = " + biggestRect.BottomLeft + "; bottom right = " + biggestRect.BottomRight); // We are not on the main thread here. this.InvokeOnMainThread(() => { // Adjust the overlay image to the corners of the detected rectangle with CIPerspectiveTransformWithExtent. using (var dict = new NSMutableDictionary()) { dict.Add(key: new NSString("inputExtent"), value: new CIVector(inputCIImage.Extent)); dict.Add(key: new NSString("inputTopLeft"), value: new CIVector(biggestRect.TopLeft)); dict.Add(key: new NSString("inputTopRight"), value: new CIVector(biggestRect.TopRight)); dict.Add(key: new NSString("inputBottomLeft"), value: new CIVector(biggestRect.BottomLeft)); dict.Add(key: new NSString("inputBottomRight"), value: new CIVector(biggestRect.BottomRight)); // Create a semi-transparent CIImage which will show the detected rectangle. using (var overlayCIImage = new CIImage(color: CIColor.FromRgba(red: 1.0f, green: 0f, blue: 0f, alpha: 0.5f)) // Size it to the source image. .ImageByCroppingToRect(inputCIImage.Extent) // Apply perspective distortion to the overlay rectangle to map it to the current camera picture. .CreateByFiltering("CIPerspectiveTransformWithExtent", dict) // Place overlay on the image. .CreateByCompositingOverImage(inputCIImage)) { // Must convert the CIImage into a CGImage and from there into a UIImage. // Could go directly from CIImage to UIImage but when assigning the result to a UIImageView, the ContentMode of // the image view will be ignored and no proper aspect scaling will take place. using (var ctx = CIContext.FromOptions(null)) using (CGImage convertedCGImage = ctx.CreateCGImage(overlayCIImage, overlayCIImage.Extent)) // This crashes with Xamarin.iOS //using(UIImage convertedUIImage = UIImage.FromImage(convertedCGImage, 1f, UIApplication.SharedApplication.StatusBarOrientation == UIInterfaceOrientation.LandscapeLeft ? UIImageOrientation.DownMirrored : UIImageOrientation.UpMirrored)) // This works. using (UIImage convertedUIImage = UIImage.FromImage(convertedCGImage)) { // Show converted image in UI. this.imageViewOverlay.Image = convertedUIImage; } } } // Apply a perspective correction with CIPerspectiveCorrection to the detected rectangle and display in another UIImageView. using (var dict = new NSMutableDictionary()) { dict.Add(key: new NSString("inputTopLeft"), value: new CIVector(biggestRect.TopLeft)); dict.Add(key: new NSString("inputTopRight"), value: new CIVector(biggestRect.TopRight)); dict.Add(key: new NSString("inputBottomLeft"), value: new CIVector(biggestRect.BottomLeft)); dict.Add(key: new NSString("inputBottomRight"), value: new CIVector(biggestRect.BottomRight)); // Use again CIImage -> CGImage -> UIImage to prevent scaling issues (see above). using (var perspectiveCorrectedImage = inputCIImage.CreateByFiltering("CIPerspectiveCorrection", dict)) using (var ctx = CIContext.FromOptions(null)) using (CGImage convertedCGImage = ctx.CreateCGImage(perspectiveCorrectedImage, perspectiveCorrectedImage.Extent)) using (UIImage convertedUIImage = UIImage.FromImage(convertedCGImage)) { this.imageViewPerspective.Image = convertedUIImage; } } }); } Console.WriteLine("---------------------"); }