// <summary> // Called automatically whenever an activity finishes // </summary> // <param name = "requestCode" ></ param > // < param name="resultCode"></param> /// <param name="data"></param> protected override void OnActivityResult(int requestCode, Result resultCode, Intent data) { base.OnActivityResult(requestCode, resultCode, data); //Make image available in the gallery /* * Intent mediaScanIntent = new Intent(Intent.ActionMediaScannerScanFile); * var contentUri = Android.Net.Uri.FromFile(_file); * mediaScanIntent.SetData(contentUri); * SendBroadcast(mediaScanIntent); */ // Display in ImageView. We will resize the bitmap to fit the display. // Loading the full sized image will consume too much memory // and cause the application to crash. ImageView imageView = FindViewById <ImageView>(Resource.Id.takenPictureImageView); int height = Resources.DisplayMetrics.HeightPixels; int width = imageView.Height; //AC: workaround for not passing actual files Android.Graphics.Bitmap bitmap = (Android.Graphics.Bitmap)data.Extras.Get("data"); //specify where credentails to load are located string credPath = "google_api.json"; Google.Apis.Auth.OAuth2.GoogleCredential credential; //load our cert into the credential using (var stream = Assets.Open(credPath)) { credential = Google.Apis.Auth.OAuth2.GoogleCredential.FromStream(stream); } credential = credential.CreateScoped(Google.Apis.Vision.v1.VisionService.Scope.CloudPlatform); var client = new Google.Apis.Vision.v1.VisionService( new BaseClientService.Initializer() { ApplicationName = "cs480firsttest-195221", HttpClientInitializer = credential } ); // tell google that we want to perform feature analysis var request = new AnnotateImageRequest(); request.Image = new Image(); //converts our bitmap object into a byte[] to send to google using (var stream = new System.IO.MemoryStream()) { bitmap.Compress(Android.Graphics.Bitmap.CompressFormat.Jpeg, 0, stream); request.Image.Content = System.Convert.ToBase64String(stream.ToArray()); } //say we want google to us label detection request.Features = new List <Feature>(); request.Features.Add(new Feature() { Type = "LABEL_DETECTION" }); //add to list of items to send to goolge var batch = new BatchAnnotateImagesRequest(); batch.Requests = new List <AnnotateImageRequest>(); batch.Requests.Add(request); //Finally, make the call var apiResult = client.Images.Annotate(batch).Execute(); List <string> tags = new List <string>(); foreach (var item in apiResult.Responses[0].LabelAnnotations) { tags.Add(item.Description); } if (bitmap != null) { imageView.SetImageBitmap(bitmap); imageView.Visibility = Android.Views.ViewStates.Visible; bitmap = null; } // Dispose of the Java side bitmap. System.GC.Collect(); }
protected override void OnActivityResult(int requestCode, Result resultCode, Intent data) { base.OnActivityResult(requestCode, resultCode, data); // Make it available in the gallery Intent mediaScanIntent = new Intent(Intent.ActionMediaScannerScanFile); var contentUri = Android.Net.Uri.FromFile(_file); mediaScanIntent.SetData(contentUri); SendBroadcast(mediaScanIntent); // Display in ImageView. We will resize the bitmap to fit the display. // Loading the full sized image will consume too much memory // and cause the application to crash. ImageView imageView = FindViewById <ImageView>(Resource.Id.imageView1); int height = Resources.DisplayMetrics.HeightPixels; int width = imageView.Height; _bitmap = _file.Path.LoadAndResizeBitmap(width, height); /* how to convert from Android image to C# image * var byteArray = ByteBuffer.Allocate(_bitmap.ByteCount); * _bitmap.CopyPixelsToBuffer(byteArray); * byte[] bytes = byteArray.ToArray<byte>(); * var image = new Xamarin.Forms.Image(); * image.Source = Xamarin.Forms.ImageSource.FromStream(() => new System.IO.MemoryStream(bytes)); */ var foo = new Google.Apis.Services.BaseClientService.Initializer() { ApplicationName = "Discovery Sample", ApiKey = "[YOUR_API_KEY_HERE]", }; //var client = Google.Cloud.Vision.V1.ImageAnnotatorClient.Create(); //var response = client.DetectLabels(Google.Cloud.Vision.V1.Image.FromFile(_file.Path)); string bitmapString = ""; using (var stream = new System.IO.MemoryStream()) { _bitmap.Compress(Bitmap.CompressFormat.Jpeg, 0, stream); var bytes = stream.ToArray(); bitmapString = System.Convert.ToBase64String(bytes); } //var apiData = new //{ // key = " e07bcdb4138be3fa87a617d39bbd63f8abb193eb", // image = new { content = bitmapString } //}; //var apiResponse = await "https://vision.googleapis.com/v1/images:annotate" // .PostUrlEncodedAsync(apiData) // .ReceiveString(); string credPath = "google_api.json"; Google.Apis.Auth.OAuth2.GoogleCredential cred; // Get active credential using (var stream = Assets.Open(credPath)) { cred = Google.Apis.Auth.OAuth2.GoogleCredential.FromStream(stream); } cred = cred.CreateScoped(Google.Apis.Vision.v1.VisionService.Scope.CloudPlatform); // By default, the Google.Cloud.BigQuery.V2 library client will authenticate // using the service account file (created in the Google Developers // Console) specified by the GOOGLE_APPLICATION_CREDENTIALS // environment variable. If you are running on // a Google Compute Engine VM, authentication is completely // automatic. var client = new Google.Apis.Vision.v1.VisionService(new Google.Apis.Services.BaseClientService.Initializer() { ApplicationName = "subtle-isotope-190917", HttpClientInitializer = cred }); var request = new Google.Apis.Vision.v1.Data.AnnotateImageRequest(); request.Image = new Google.Apis.Vision.v1.Data.Image(); request.Image.Content = bitmapString; request.Features = new List <Google.Apis.Vision.v1.Data.Feature>(); request.Features.Add(new Google.Apis.Vision.v1.Data.Feature() { Type = "LABEL_DETECTION" }); var batch = new Google.Apis.Vision.v1.Data.BatchAnnotateImagesRequest(); batch.Requests = new List <Google.Apis.Vision.v1.Data.AnnotateImageRequest>(); batch.Requests.Add(request); var apiResult = client.Images.Annotate(batch).Execute(); if (_bitmap != null) { imageView.SetImageBitmap(_bitmap); _bitmap = null; } // Dispose of the Java side bitmap. System.GC.Collect(); }
// <summary> // Called automatically whenever an activity finishes // </summary> // <param name = "requestCode" ></ param > // < param name="resultCode"></param> /// <param name="data"></param> protected override void OnActivityResult(int requestCode, Result resultCode, Intent data) { base.OnActivityResult(requestCode, resultCode, data); //send to image gallery Intent mediaScanIntent = new Intent(Intent.ActionMediaScannerScanFile); var contentUri = Android.Net.Uri.FromFile(_file); mediaScanIntent.SetData(contentUri); SendBroadcast(mediaScanIntent); // Display in ImageView. We will resize the bitmap to fit the display. // Loading the full sized image will consume too much memory // and cause the application to crash. ImageView imageView = FindViewById <ImageView>(Resource.Id.takenPictureImageView); int height = Resources.DisplayMetrics.HeightPixels; int width = imageView.Height; //AC: workaround for not passing actual files //Android.Graphics.Bitmap bitmap = (Android.Graphics.Bitmap)data.Extras.Get("data"); //load picture from file Android.Graphics.Bitmap bitmap = _file.Path.LoadAndResizeBitmap(width, height); //convert bitmap into stream to be sent to Google API string bitmapString = ""; using (var stream = new System.IO.MemoryStream()) { bitmap.Compress(Android.Graphics.Bitmap.CompressFormat.Jpeg, 100, stream); var bytes = stream.ToArray(); bitmapString = System.Convert.ToBase64String(bytes); } //credential is stored in "assets" folder string credPath = "google_api.json"; Google.Apis.Auth.OAuth2.GoogleCredential cred; //Load credentials into object form using (var stream = Assets.Open(credPath)) { cred = Google.Apis.Auth.OAuth2.GoogleCredential.FromStream(stream); } cred = cred.CreateScoped(Google.Apis.Vision.v1.VisionService.Scope.CloudPlatform); // By default, the library client will authenticate // using the service account file (created in the Google Developers // Console) specified by the GOOGLE_APPLICATION_CREDENTIALS // environment variable. We are specifying our own credentials via json file. var client = new Google.Apis.Vision.v1.VisionService(new Google.Apis.Services.BaseClientService.Initializer() { ApplicationName = "subtle-isotope-190917", HttpClientInitializer = cred }); // "subtle-isotope-190917", // "fabled-orbit-195800", //set up request var request = new Google.Apis.Vision.v1.Data.AnnotateImageRequest(); request.Image = new Google.Apis.Vision.v1.Data.Image(); request.Image.Content = bitmapString; //tell google that we want to perform label detection request.Features = new List <Google.Apis.Vision.v1.Data.Feature>(); request.Features.Add(new Google.Apis.Vision.v1.Data.Feature() { Type = "LABEL_DETECTION" }); var batch = new Google.Apis.Vision.v1.Data.BatchAnnotateImagesRequest(); batch.Requests = new List <Google.Apis.Vision.v1.Data.AnnotateImageRequest>(); batch.Requests.Add(request); //send request. Note that I'm calling execute() here, but you might want to use //ExecuteAsync instead var apiResult = client.Images.Annotate(batch).Execute(); //var apiResult = client.Images.Annotate(batch).ExecuteAsync(); if (bitmap != null) { imageView.SetImageBitmap(bitmap); imageView.Visibility = Android.Views.ViewStates.Visible; bitmap = null; } if (apiResult != null) { Toast.MakeText(this.ApplicationContext, apiResult.ToString(), ToastLength.Short).Show(); //Adapted from zeefree's repo (Zach Freeman) List <string> labels = new List <string>(); foreach (var item in apiResult.Responses[0].LabelAnnotations) { labels.Add(item.Description); } foreach (string item in labels) { Toast.MakeText(this.ApplicationContext, item + "?", ToastLength.Short).Show(); } } // Dispose of the Java side bitmap. System.GC.Collect(); }
// <summary> // Called automatically whenever an activity finishes // </summary> // <param name = "requestCode" ></ param > // < param name="resultCode"></param> // <param name="data"></param> protected override void OnActivityResult(int requestCode, Result resultCode, Intent data) { base.OnActivityResult(requestCode, resultCode, data); // Display in ImageView. We will resize the bitmap to fit the display. // Loading the full sized image will consume too much memory // and cause the application to crash. ImageView imageView = FindViewById <ImageView>(Resource.Id.takenPictureImageView); Button pressYes = FindViewById <Button>(Resource.Id.yes); Button pressNo = FindViewById <Button>(Resource.Id.no); int height = Resources.DisplayMetrics.HeightPixels; int width = imageView.Height; //AC: workaround for not passing actual files Android.Graphics.Bitmap bitmap = (Android.Graphics.Bitmap)data.Extras.Get("data"); //convert bitmap into stream to be sent to Google API string bitmapString = ""; using (var stream = new System.IO.MemoryStream()) { bitmap.Compress(Android.Graphics.Bitmap.CompressFormat.Jpeg, 0, stream); var bytes = stream.ToArray(); bitmapString = System.Convert.ToBase64String(bytes); } //credential is stored in "assets" folder string credPath = "google_api.json"; Google.Apis.Auth.OAuth2.GoogleCredential cred; //Load credentials into object form using (var stream = Assets.Open(credPath)) { cred = Google.Apis.Auth.OAuth2.GoogleCredential.FromStream(stream); } cred = cred.CreateScoped(Google.Apis.Vision.v1.VisionService.Scope.CloudPlatform); // By default, the library client will authenticate // using the service account file (created in the Google Developers // Console) specified by the GOOGLE_APPLICATION_CREDENTIALS // environment variable. We are specifying our own credentials via json file. var client = new Google.Apis.Vision.v1.VisionService(new Google.Apis.Services.BaseClientService.Initializer() { ApplicationName = "subtle-isotope-190917", HttpClientInitializer = cred }); //set up request var request = new Google.Apis.Vision.v1.Data.AnnotateImageRequest(); request.Image = new Google.Apis.Vision.v1.Data.Image(); request.Image.Content = bitmapString; //tell google that we want to perform label detection request.Features = new List <Google.Apis.Vision.v1.Data.Feature>(); request.Features.Add(new Google.Apis.Vision.v1.Data.Feature() { Type = "LABEL_DETECTION" }); var batch = new Google.Apis.Vision.v1.Data.BatchAnnotateImagesRequest(); batch.Requests = new List <Google.Apis.Vision.v1.Data.AnnotateImageRequest>(); batch.Requests.Add(request); //send request. Note that I'm calling execute() here, but you might want to use //ExecuteAsync instead var apiResult = client.Images.Annotate(batch).Execute(); // Here we're taking all the text box and displaying the api results on the screen var display_text1 = FindViewById <TextView>(Resource.Id.api_response_1); var display_text3 = FindViewById <TextView>(Resource.Id.api_response_3); display_text1.Text = apiResult.Responses[0].LabelAnnotations[0].Description; display_text3.Text = "Is this your picture"; //Buttons will be visible from here if (bitmap != null) { imageView.Visibility = Android.Views.ViewStates.Visible; pressYes.Visibility = Android.Views.ViewStates.Visible; pressNo.Visibility = Android.Views.ViewStates.Visible; imageView.SetImageBitmap(bitmap); bitmap = null; } // whatever the user click displays a different message while making the buttons invisible again pressYes.Click += delegate { display_text3.Text = "We did it"; pressYes.Visibility = Android.Views.ViewStates.Invisible; pressNo.Visibility = Android.Views.ViewStates.Invisible; }; pressNo.Click += delegate { display_text3.Text = "Damn......Next time"; pressYes.Visibility = Android.Views.ViewStates.Invisible; pressNo.Visibility = Android.Views.ViewStates.Invisible; }; // Dispose of the Java side bitmap. System.GC.Collect(); }
// <summary> // Called automatically whenever an activity finishes // </summary> // <param name = "requestCode" ></ param > // < param name="resultCode"></param> /// <param name="data"></param> protected override void OnActivityResult(int requestCode, Result resultCode, Intent data) { base.OnActivityResult(requestCode, resultCode, data); if (requestCode == 100) { SetContentView(Resource.Layout.IsThis); //var txtName = FindViewById<TextView>(Resource.Id.isThis); //these are the variables for the IsThis layout //var yesbtn = FindViewById<Button>(Resource.Id.ybtn); //var nobtn = FindViewById<Button>(Resource.Id.nbtn); } if (resultCode == Result.FirstUser) { data.GetStringExtra("newAnswer"); var myintent = new Intent(this, typeof(InGoogle)); StartActivity(myintent); } // Display in ImageView. We will resize the bitmap to fit the display. // Loading the full sized image will consume too much memory // and cause the application to crash. ImageView imageView = FindViewById <ImageView>(Resource.Id.takenPictureImageView); //TextView googleResponse = FindViewById<TextView>(Resource.Id.whatBe); //TextView googleResp1 = FindViewById<TextView>(Resource.Id.whatBe1); //TextView googleResp2 = FindViewById<TextView>(Resource.Id.whatBe2); //var isItString = FindViewById<TextView>(Resource.Id.isThis); //don't need this var int height = Resources.DisplayMetrics.HeightPixels; int width = imageView.Height; //AC: workaround for not passing actual files Android.Graphics.Bitmap bitmap = (Android.Graphics.Bitmap)data.Extras.Get("data"); //convert bitmap into stream to be sent to Google API string bitmapString = ""; using (var stream = new System.IO.MemoryStream()) { bitmap.Compress(Android.Graphics.Bitmap.CompressFormat.Jpeg, 100, stream); var bytes = stream.ToArray(); bitmapString = System.Convert.ToBase64String(bytes); } //credential is stored in "assets" folder string credPath = "google_api.json"; Google.Apis.Auth.OAuth2.GoogleCredential cred; //Load credentials into object form using (var stream = Assets.Open(credPath)) { cred = Google.Apis.Auth.OAuth2.GoogleCredential.FromStream(stream); } cred = cred.CreateScoped(Google.Apis.Vision.v1.VisionService.Scope.CloudPlatform); // By default, the library client will authenticate // using the service account file (created in the Google Developers // Console) specified by the GOOGLE_APPLICATION_CREDENTIALS // environment variable. We are specifying our own credentials via json file. var client = new Google.Apis.Vision.v1.VisionService(new Google.Apis.Services.BaseClientService.Initializer() { ApplicationName = "subtle-isotope-190917", HttpClientInitializer = cred }); //set up request var request = new Google.Apis.Vision.v1.Data.AnnotateImageRequest(); request.Image = new Google.Apis.Vision.v1.Data.Image(); request.Image.Content = bitmapString; //tell google that we want to perform label detection request.Features = new List <Google.Apis.Vision.v1.Data.Feature>(); request.Features.Add(new Google.Apis.Vision.v1.Data.Feature() { Type = "LABEL_DETECTION" }); var batch = new Google.Apis.Vision.v1.Data.BatchAnnotateImagesRequest(); batch.Requests = new List <Google.Apis.Vision.v1.Data.AnnotateImageRequest>(); batch.Requests.Add(request); //send request. Note that I'm calling execute() here, but you might want to use //ExecuteAsync instead var apiResult = client.Images.Annotate(batch).Execute(); //googleResponse.Text = apiResult.Responses[0].LabelAnnotations[0].Description; //googleResp1.Text = apiResult.Responses[0].LabelAnnotations[1].Description; //googleResp2.Text = apiResult.Responses[0].LabelAnnotations[2].Description; var txtName = FindViewById <TextView>(Resource.Id.isThis); //these are the variables for the IsThis layout var yesbtn = FindViewById <Button>(Resource.Id.ybtn); var nobtn = FindViewById <Button>(Resource.Id.nbtn); // turn confidence float into decimal notation and then to string in percentage. // Using this data for GoogleResponse Activity if user wishes to see the confidence and range of labelAnnotations. for (int i = 0; i <= 3; i++) { float percentConfident = (float)apiResult.Responses[0].LabelAnnotations[i].Score * 100; int confidence = (int)percentConfident; string percent = confidence.ToString() + "%"; string thing = apiResult.Responses[0].LabelAnnotations[i].Description; MainActivity.Items.Add(new Items(thing, percent)); } var isIt = MainActivity.Items[position]; //bug here 0 v position //String whatBe = "Is this a " + apiResult.Responses[0].LabelAnnotations[0].Description + " at " + apiResult.Responses[0].LabelAnnotations[0].Score + " ?!" // "I am " + apiResult.Responses[0].LabelAnnotations[0].Score; String whatBe = "Is this a " + isIt.Thing + " ?! I am " + isIt.Percent + " confident."; txtName.Text = whatBe; //var intent = new Intent(this, typeof(IsItActivity)); //intent.PutExtra("apiResult", myConfidence); //StartActivity(intent); dont need this chunk of code because not passing between activities. Will use a public class instead FindViewById <Button>(Resource.Id.nbtn).Click += DarnActivityClick; FindViewById <Button>(Resource.Id.ybtn).Click += SuccessActivity; // Below are the button onClick methods to direct to the Succeed or Darn layouts. //whatBe = apiResult.Responses[0].LabelAnnotations[0].Description; if (bitmap != null) { imageView.SetImageBitmap(bitmap); imageView.Visibility = Android.Views.ViewStates.Visible; bitmap = null; } // Dispose of the Java side bitmap. System.GC.Collect(); }
private void UploadToCloud() { ImageView imageView = FindViewById <ImageView>(Resource.Id.mainImage); Android.Graphics.Drawables.BitmapDrawable bd = (Android.Graphics.Drawables.BitmapDrawable)imageView.Drawable; Android.Graphics.Bitmap bitmap = bd.Bitmap; string credPath = "google_api.json"; Google.Apis.Auth.OAuth2.GoogleCredential credential; //load our cert into the credential using (var stream = Assets.Open(credPath)) { credential = Google.Apis.Auth.OAuth2.GoogleCredential.FromStream(stream); } credential = credential.CreateScoped(Google.Apis.Vision.v1.VisionService.Scope.CloudPlatform); var client = new Google.Apis.Vision.v1.VisionService( new BaseClientService.Initializer() { ApplicationName = "cs480firsttest-195221", HttpClientInitializer = credential } ); // tell google that we want to perform feature analysis var request = new AnnotateImageRequest(); request.Image = new Image(); //converts our bitmap object into a byte[] to send to google using (var stream = new System.IO.MemoryStream()) { bitmap.Compress(Android.Graphics.Bitmap.CompressFormat.Jpeg, 0, stream); request.Image.Content = System.Convert.ToBase64String(stream.ToArray()); } //say we want google to us label detection request.Features = new List <Feature>(); request.Features.Add(new Feature() { Type = "LABEL_DETECTION" }); //add to list of items to send to goolge var batch = new BatchAnnotateImagesRequest(); batch.Requests = new List <AnnotateImageRequest>(); batch.Requests.Add(request); //Finally, make the call var apiResult = client.Images.Annotate(batch).Execute(); foreach (var item in apiResult.Responses[0].LabelAnnotations) { tags.Add(item.Description); percentage.Add((float)(item.Score)); } AdjustTextView(); SwitchState(); }
// <summary> // Called automatically whenever an activity finishes // </summary> // <param name = "requestCode" ></ param > // < param name="resultCode"></param> /// <param name="data"></param> protected override void OnActivityResult(int requestCode, Result resultCode, Intent data) { Android.Graphics.Bitmap bitmap = null; ImageView imageView = null; base.OnActivityResult(requestCode, resultCode, data); SetContentView(Resource.Layout.TakenPic); // Display in ImageView. We will resize the bitmap to fit the display. // Loading the full sized image will consume too much memory // and cause the application to crash. if (success == true) { Intent mediaScanIntent = new Intent(Intent.ActionMediaScannerScanFile); var contentUri = Android.Net.Uri.FromFile(_file); mediaScanIntent.SetData(contentUri); SendBroadcast(mediaScanIntent); imageView = FindViewById <ImageView>(Resource.Id.takenPicture); imageView = FindViewById <ImageView>(Resource.Id.takenPicture); int height = Resources.DisplayMetrics.HeightPixels; int width = 1024; bitmap = _file.Path.LoadAndResizeBitmap(width, height); } if (bitmap != null) { //SetContentView(Resource.Layout.TakenPic); //imageView = FindViewById<ImageView>(Resource.Id.takenPicture); imageView.SetImageBitmap(bitmap); imageView.Visibility = Android.Views.ViewStates.Visible; } else { SetContentView(Resource.Layout.Main); if (IsThereAnAppToTakePictures() == true) { CreateDirectoryForPictures(); FindViewById <Button>(Resource.Id.launchCameraButton).Click += TakePicture; } } //AC: workaround for not passing actual files // Android.Graphics.Bitmap bitmap = (Android.Graphics.Bitmap)data.Extras.Get("data"); if (bitmap != null) { //convert bitmap into stream to be sent to Google API string bitmapString = ""; using (var stream = new System.IO.MemoryStream()) { bitmap.Compress(Android.Graphics.Bitmap.CompressFormat.Jpeg, 0, stream); var bytes = stream.ToArray(); bitmapString = System.Convert.ToBase64String(bytes); } //credential is stored in "assets" folder string credPath = "google_api.json"; Google.Apis.Auth.OAuth2.GoogleCredential cred; //Load credentials into object form using (var stream = Assets.Open(credPath)) { cred = Google.Apis.Auth.OAuth2.GoogleCredential.FromStream(stream); } cred = cred.CreateScoped(Google.Apis.Vision.v1.VisionService.Scope.CloudPlatform); // By default, the library client will authenticate // using the service account file (created in the Google Developers // Console) specified by the GOOGLE_APPLICATION_CREDENTIALS // environment variable. We are specifying our own credentials via json file. var client = new Google.Apis.Vision.v1.VisionService(new Google.Apis.Services.BaseClientService.Initializer() { ApplicationName = "GoogleApiExample", HttpClientInitializer = cred }); //set up request var request = new Google.Apis.Vision.v1.Data.AnnotateImageRequest(); request.Image = new Google.Apis.Vision.v1.Data.Image(); request.Image.Content = bitmapString; //tell google that we want to perform label detection request.Features = new List <Google.Apis.Vision.v1.Data.Feature>(); request.Features.Add(new Google.Apis.Vision.v1.Data.Feature() { Type = "LABEL_DETECTION" }); var batch = new Google.Apis.Vision.v1.Data.BatchAnnotateImagesRequest(); batch.Requests = new List <Google.Apis.Vision.v1.Data.AnnotateImageRequest>(); batch.Requests.Add(request); //send request. Note that I'm calling execute() here, but you might want to use //ExecuteAsync instead var apiResult = client.Images.Annotate(batch).Execute(); thing = apiResult.Responses[0].LabelAnnotations[0].Description; FindViewById <TextView>(Resource.Id.yourpic).Text += thing; } // Dispose of the Java side bitmap. System.GC.Collect(); }
// <summary> // Called automatically whenever an activity finishes // </summary> // <param name = "requestCode" ></ param > // < param name="resultCode"></param> /// <param name="data"></param> protected override void OnActivityResult(int requestCode, Result resultCode, Intent data) { base.OnActivityResult(requestCode, resultCode, data); // Saves bitmap to image class image.SetBitmap((Android.Graphics.Bitmap)data.Extras.Get("data")); // Hopefully saves bitmap to memory // SaveBitmap(bitmap); // Sets image on GameView layout ImageView takenPic = FindViewById <ImageView>(Resource.Id.gameImage); takenPic.Visibility = ViewStates.Visible; if (image.CheckBitmap() != false) { takenPic.SetImageBitmap(image.GetBitmap()); } //convert bitmap into stream to be sent to Google API string bitmapString = ""; using (var stream = new System.IO.MemoryStream()) { image.GetBitmap().Compress(Android.Graphics.Bitmap.CompressFormat.Jpeg, 0, stream); var bytes = stream.ToArray(); bitmapString = System.Convert.ToBase64String(bytes); } //credential is stored in "assets" folder string credPath = "google_api.json"; Google.Apis.Auth.OAuth2.GoogleCredential cred; //Load credentials into object form using (var stream = Assets.Open(credPath)) { cred = Google.Apis.Auth.OAuth2.GoogleCredential.FromStream(stream); } cred = cred.CreateScoped(Google.Apis.Vision.v1.VisionService.Scope.CloudPlatform); // By default, the library client will authenticate // using the service account file (created in the Google Developers // Console) specified by the GOOGLE_APPLICATION_CREDENTIALS // environment variable. We are specifying our own credentials via json file. var client = new Google.Apis.Vision.v1.VisionService(new Google.Apis.Services.BaseClientService.Initializer() { ApplicationName = "mobile-apps-tutorial", HttpClientInitializer = cred }); //set up request var request = new Google.Apis.Vision.v1.Data.AnnotateImageRequest(); request.Image = new Google.Apis.Vision.v1.Data.Image(); request.Image.Content = bitmapString; //tell google that we want to perform label detection request.Features = new List <Google.Apis.Vision.v1.Data.Feature>(); request.Features.Add(new Google.Apis.Vision.v1.Data.Feature() { Type = "LABEL_DETECTION" }); var batch = new Google.Apis.Vision.v1.Data.BatchAnnotateImagesRequest(); batch.Requests = new List <Google.Apis.Vision.v1.Data.AnnotateImageRequest>(); batch.Requests.Add(request); //send request. Note that I'm calling execute() here, but you might want to use //ExecuteAsync instead var apiResult = client.Images.Annotate(batch).Execute(); List <string> details = new List <string>(); foreach (var item in apiResult.Responses[0].LabelAnnotations) { details.Add(item.Description); } // Sets tags in image object image.SetTags(details); // Dispose of the Java side bitmap. System.GC.Collect(); }
private void apiwork(Android.Graphics.Bitmap bitmap) { string bitmapString = ""; using (var stream = new System.IO.MemoryStream()) { bitmap.Compress(Android.Graphics.Bitmap.CompressFormat.Jpeg, 100, stream); var bytes = stream.ToArray(); bitmapString = System.Convert.ToBase64String(bytes); } //credential is stored in "assets" folder string credPath = "google_api.json"; Google.Apis.Auth.OAuth2.GoogleCredential cred; //Load credentials into object form using (var stream = Assets.Open(credPath)) { cred = Google.Apis.Auth.OAuth2.GoogleCredential.FromStream(stream); } cred = cred.CreateScoped(Google.Apis.Vision.v1.VisionService.Scope.CloudPlatform); // By default, the library client will authenticate // using the service account file (created in the Google Developers // Console) specified by the GOOGLE_APPLICATION_CREDENTIALS // environment variable. We are specifying our own credentials via json file. var client = new Google.Apis.Vision.v1.VisionService(new Google.Apis.Services.BaseClientService.Initializer() { ApplicationName = "subtle-isotope-190917", HttpClientInitializer = cred }); //set up request var request = new Google.Apis.Vision.v1.Data.AnnotateImageRequest(); request.Image = new Google.Apis.Vision.v1.Data.Image(); request.Image.Content = bitmapString; //tell google that we want to perform label detection request.Features = new List <Google.Apis.Vision.v1.Data.Feature>(); request.Features.Add(new Google.Apis.Vision.v1.Data.Feature() { Type = "LABEL_DETECTION" }); var batch = new Google.Apis.Vision.v1.Data.BatchAnnotateImagesRequest(); batch.Requests = new List <Google.Apis.Vision.v1.Data.AnnotateImageRequest>(); batch.Requests.Add(request); //send request. Note that I'm calling execute() here, but you might want to use //ExecuteAsync instead var apiResult = client.Images.Annotate(batch).Execute(); if (bitmap != null) { _imageView.SetImageBitmap(bitmap); _imageView.Visibility = Android.Views.ViewStates.Visible; bitmap = null; } foreach (var item in apiResult.Responses[0].LabelAnnotations) { words.Add(item.Description); } System.GC.Collect(); }
// Called automatically whenever an activity finishes protected override void OnActivityResult(int requestCode, Result resultCode, Intent data) { int rank = -1; float score = 0; bool found = false; base.OnActivityResult(requestCode, resultCode, data); SetContentView(Resource.Layout.Results); TextView result = (TextView)FindViewById(Resource.Id.result); TextView percentage = (TextView)FindViewById(Resource.Id.percentage); ImageButton nextTurn = (ImageButton)FindViewById(Resource.Id.nextTurn); //Test to make sure user took a picture if (data != null) { // Display in ImageView. We will resize the bitmap to fit the display. // Loading the full sized image will consume too much memory // and cause the application to crash. imageView = FindViewById <ImageView>(Resource.Id.takenPictureImageView); int height = Resources.DisplayMetrics.HeightPixels; int width = imageView.Height; //AC: workaround for not passing actual files bitmap = (Android.Graphics.Bitmap)data.Extras.Get("data"); } else { Toast.MakeText(this, "Error: Starting over", ToastLength.Short).Show(); StartMainLayout(); } //Test to make sure we have the bitmap if (bitmap != null) { //convert bitmap into stream to be sent to Google API string bitmapString = ""; using (var stream = new System.IO.MemoryStream()) { bitmap.Compress(Android.Graphics.Bitmap.CompressFormat.Jpeg, 95, stream); var bytes = stream.ToArray(); bitmapString = System.Convert.ToBase64String(bytes); } //credential is stored in "assets" folder string credPath = "API-Game-7e75c497f0b6.json"; Google.Apis.Auth.OAuth2.GoogleCredential cred; //Load credentials into object form using (var stream = Assets.Open(credPath)) { cred = Google.Apis.Auth.OAuth2.GoogleCredential.FromStream(stream); } cred = cred.CreateScoped(Google.Apis.Vision.v1.VisionService.Scope.CloudPlatform); // By default, the library client will authenticate // using the service account file (created in the Google Developers // Console) specified by the GOOGLE_APPLICATION_CREDENTIALS // environment variable. We are specifying our own credentials via json file. var client = new Google.Apis.Vision.v1.VisionService(new Google.Apis.Services.BaseClientService.Initializer() { ApplicationName = "api-game-195221", HttpClientInitializer = cred }); //set up request var request = new Google.Apis.Vision.v1.Data.AnnotateImageRequest(); request.Image = new Google.Apis.Vision.v1.Data.Image(); request.Image.Content = bitmapString; //tell google that we want to perform label detection request.Features = new List <Google.Apis.Vision.v1.Data.Feature>(); request.Features.Add(new Google.Apis.Vision.v1.Data.Feature() { Type = "LABEL_DETECTION" }); //add to list of items to send to google var batch = new Google.Apis.Vision.v1.Data.BatchAnnotateImagesRequest(); batch.Requests = new List <Google.Apis.Vision.v1.Data.AnnotateImageRequest>(); batch.Requests.Add(request); //send request. Note that I'm calling execute() here, but you might want to use //ExecuteAsync instead var apiResult = client.Images.Annotate(batch).Execute(); for (int i = 0; i < apiResult.Responses[0].LabelAnnotations.Count; i++) { if (GivenWord == apiResult.Responses[0].LabelAnnotations[i].Description) { rank = i; found = true; } } //check to make sure the user found the object if (found) { score = (float)(apiResult.Responses[0].LabelAnnotations[rank].Score); score *= 100; result.Text = ("Correct!! +10 Points"); percentage.Text = ("Your picture was " + score + "% accurate!"); total_points += 10; } else { result.Text = ("You did not take a picture of a " + GivenWord); } nextTurn.Click += delegate { myVib.Vibrate(30); if (total_points >= 30) { Gameover(); } else { StartFindIt(); } }; imageView.SetImageBitmap(bitmap); bitmap = null; } // Dispose of the Java side bitmap. System.GC.Collect(); }
// Called automatically whenever an activity finishes protected override void OnActivityResult(int requestCode, Result resultCode, Intent data) { base.OnActivityResult(requestCode, resultCode, data); // Display in ImageView. We will resize the bitmap to fit the display. // Loading the full sized image will consume too much memory // and cause the application to crash. ImageView imageView = FindViewById <ImageView>(Resource.Id.takenPictureImageView); int height = Resources.DisplayMetrics.HeightPixels; int width = imageView.Height; //AC: workaround for not passing actual files Android.Graphics.Bitmap bitmap = (Android.Graphics.Bitmap)data.Extras.Get("data"); //convert bitmap into stream to be sent to Google API string bitmapString = ""; using (var stream = new System.IO.MemoryStream()) { bitmap.Compress(Android.Graphics.Bitmap.CompressFormat.Jpeg, 0, stream); var bytes = stream.ToArray(); bitmapString = System.Convert.ToBase64String(bytes); } //credential is stored in "assets" folder string credPath = "API-Game-5a4cd35415e2.json"; Google.Apis.Auth.OAuth2.GoogleCredential cred; //Load credentials into object form using (var stream = Assets.Open(credPath)) { cred = Google.Apis.Auth.OAuth2.GoogleCredential.FromStream(stream); } cred = cred.CreateScoped(Google.Apis.Vision.v1.VisionService.Scope.CloudPlatform); // By default, the library client will authenticate // using the service account file (created in the Google Developers // Console) specified by the GOOGLE_APPLICATION_CREDENTIALS // environment variable. We are specifying our own credentials via json file. var client = new Google.Apis.Vision.v1.VisionService(new Google.Apis.Services.BaseClientService.Initializer() { ApplicationName = "api-game-195221", HttpClientInitializer = cred }); //set up request var request = new Google.Apis.Vision.v1.Data.AnnotateImageRequest(); request.Image = new Google.Apis.Vision.v1.Data.Image(); request.Image.Content = bitmapString; //tell google that we want to perform label detection request.Features = new List <Google.Apis.Vision.v1.Data.Feature>(); request.Features.Add(new Google.Apis.Vision.v1.Data.Feature() { Type = "LABEL_DETECTION" }); //add to list of items to send to google var batch = new Google.Apis.Vision.v1.Data.BatchAnnotateImagesRequest(); batch.Requests = new List <Google.Apis.Vision.v1.Data.AnnotateImageRequest>(); batch.Requests.Add(request); //send request. Note that I'm calling execute() here, but you might want to use //ExecuteAsync instead var apiResult = client.Images.Annotate(batch).Execute(); if (bitmap != null) { imageView.SetImageBitmap(bitmap); imageView.Visibility = Android.Views.ViewStates.Visible; bitmap = null; } // Dispose of the Java side bitmap. System.GC.Collect(); }