public static async Task <GetRecognisedUserViewModel> IdentifyInPersonGroup(IFaceClient client, List <InPutDictlist> personDictionary, string url, string url2, string sourceImageFileName, string recognitionModel) { GetRecognisedUserViewModel getRecognisedUserViewModel = new GetRecognisedUserViewModel(); string OutPut = "Initializing..... \n Creating new Personal Model \n "; // Create a dictionary for all your images, grouping similar ones under the same key. // A group photo that includes some of the persons you seek to identify from your dictionary. // string sourceImageFileName = "identification1.jpg"; // Create a person group. // Console.WriteLine($"Create a person group ({personGroupId})."); OutPut = OutPut + "person group identity: " + personGroupId + ". \n"; await client.PersonGroup.CreateAsync(personGroupId, personGroupId, recognitionModel : recognitionModel); // The similar faces will be grouped into a single person group person. foreach (var groupedFace in personDictionary) { // Limit TPS await Task.Delay(250); Microsoft.Azure.CognitiveServices.Vision.Face.Models.Person person = await client.PersonGroupPerson.CreateAsync(personGroupId, groupedFace.Name); // Console.WriteLine($"Create a person group person '{groupedFace}'."); // Add face to the person group person. foreach (var similarImage in groupedFace.ImageName) { OutPut = OutPut + "Add face to the person group person( " + groupedFace + ") from image `" + similarImage + "` \n "; string urlzz = url + similarImage; PersistedFace face = await client.PersonGroupPerson.AddFaceFromUrlAsync(personGroupId, person.PersonId, urlzz, similarImage); } } // Start to train the person group. OutPut = OutPut + "\n \n Now training Person Group: " + personGroupId + "\n"; await client.PersonGroup.TrainAsync(personGroupId); // Wait until the training is completed. while (true) { await Task.Delay(1000); var trainingStatus = await client.PersonGroup.GetTrainingStatusAsync(personGroupId); OutPut = OutPut + "Training status: " + trainingStatus.Status + ". \n"; if (trainingStatus.Status == TrainingStatusType.Succeeded) { break; } } Console.WriteLine(); List <Guid> sourceFaceIds = new List <Guid>(); // Detect faces from source image url. List <DetectedFace> detectedFaces = await DetectFaceRecognize(client, $"{url2}{sourceImageFileName}", recognitionModel); // Add detected faceId to sourceFaceIds. foreach (var detectedFace in detectedFaces) { sourceFaceIds.Add(detectedFace.FaceId.Value); } // Identify the faces in a person group. var identifyResults = await client.Face.IdentifyAsync(sourceFaceIds, personGroupId); double numberstream = 0.0; Microsoft.Azure.CognitiveServices.Vision.Face.Models.Person outputperson = new Microsoft.Azure.CognitiveServices.Vision.Face.Models.Person(); foreach (var identifyResult in identifyResults) { Microsoft.Azure.CognitiveServices.Vision.Face.Models.Person person = await client.PersonGroupPerson.GetAsync(personGroupId, identifyResult.Candidates[0].PersonId); OutPut = OutPut + "Person: '" + person.Name + "' is identified for face in: " + sourceImageFileName + " - " + identifyResult.FaceId + " \n"; List <Guid> gruildie = new List <Guid>(); gruildie.Add(identifyResult.FaceId); if (identifyResult.Candidates[0].Confidence > numberstream) { outputperson.Name = person.Name; outputperson.PersonId = person.PersonId; outputperson.PersistedFaceIds = gruildie; getRecognisedUserViewModel.ConfidenceLevel = identifyResult.Candidates[0].Confidence; } } await DeletePersonGroup(client, personGroupId); OutPut = OutPut + "\n \n Person Id group deleted."; getRecognisedUserViewModel.Answers = OutPut; getRecognisedUserViewModel.highestperson = outputperson; // At end, delete person groups in both regions (since testing only) return(getRecognisedUserViewModel); }
private async void Detect_Person_Button_Click(object sender, RoutedEventArgs e) { var fileinfo = new FileInfo(imagepathHolder); BlobConstructors.UploadFile(fileinfo, BlobConstructors.Blobconnectionstring, BlobConstructors.containername2); IFaceClient client = CompareFaceConstructor.Authenticate(ENDPOINT, SUBSCRIPTION_KEY); // Find Similar - find a similar face from a list of faces. // var result = await CompareFaceConstructor.FindSimilar(client, BlobConstructors.blobBaseUrl, imageName, BlobConstructors.blobBaseUrl2, RecognitionModel.Recognition04, imageNames); string _dbPath = Database_Connection._dbpath; //Verify(client, IMAGE_BASE_URL, RECOGNITION_MODEL4).Wait(); //// Identify - recognize a face(s) in a person group (a person group is created in this example). /// GetRecognisedUserViewModel Identity = new GetRecognisedUserViewModel(); try { Identity = await CompareFaceConstructor.IdentifyInPersonGroup(client, personDictionary, BlobConstructors.blobBaseUrl, BlobConstructors.blobBaseUrl2, imageName, RECOGNITION_MODEL4); string Message = ""; var db = new SQLiteConnection(_dbPath); var dz = new SQLiteConnection(_dbPath); db.CreateTable <Models.Person>(); db.CreateTable <Models.Attendance>(); //var p = db.Table<Models.Person>().Where(m => m.ImageName == result.Result).FirstOrDefault(); int way = int.Parse(Identity.highestperson.Name); var p = db.Table <Models.Person>().Where(m => m.Id == way).FirstOrDefault(); var Count = db.Table <Models.Attendance>().Count(); //Books_Label.Text = result.Output.ToString(); //readText(result.ToString()); if (Identity.ConfidenceLevel > 0.7) { Message = "Welcome back " + p.LastName + " " + p.FirstName + " , You have been logged in. Have a pleasant day!"; Books_Label.Text = Identity.Answers; Attendance attendance = new Attendance() { Id = Count + 1, FirstName = p.FirstName, Date_Signed_In_Date_and_Time = DateTime.UtcNow, LastName = p.LastName, User_ID = p.Id }; db.Insert(attendance); readText(Message); } else { var confipercentage = Identity.ConfidenceLevel * 100; Message = "I am not comfortable this is who i think this is. i am only " + confipercentage + " percent sure, that this is" + p.FirstName + " " + p.LastName + "."; readText(Message); } } catch { readText("Face not detected from Image or Server cannot be reached"); } //// LargePersonGroup - create, then get data. //LargePersonGroup(client, IMAGE_BASE_URL, RECOGNITION_MODEL4).Wait(); //// Group faces - automatically group similar faces. //Group(client, IMAGE_BASE_URL, RECOGNITION_MODEL4).Wait(); }