/// <summary> Function which submits a frame to the Face API. </summary> /// <param name="frame"> The video frame to submit. </param> /// <returns> A <see cref="Task{LiveCameraResult}"/> representing the asynchronous API call, /// and containing the faces returned by the API. </returns> private async Task <LiveCameraResult> FacesAnalysisFunction(VideoFrame frame) { // Encode image. var jpg = frame.Image.ToMemoryStream(".jpg", s_jpegParams); // Submit image to API. var attrs = new List <FaceAttributeType> { FaceAttributeType.Age, FaceAttributeType.Gender, FaceAttributeType.HeadPose }; var faces = await _faceClient.DetectAsync(jpg, returnFaceAttributes : attrs); // Count the API call. Properties.Settings.Default.FaceAPICallCount++; // Output. LiveCameraResult _result = new LiveCameraResult { Faces = faces, TimeStamp = DateTime.Now, SelectedCamera = camo }; TotalAPIResults.Add(_result); ApiResult = _result; analysisLog.SaveData(_result); return(_result); }
public Dictionary <Guid, int> ComputeFrameScorePerPlayer(LiveCameraResult apiResult) { var scoresDictionary = new Dictionary <Guid, int>(); if (apiResult.Identities != null && apiResult.Identities.Count > 0) { KeyValuePair <string, float> currDominantEmotion; Guid personId; foreach (var item in apiResult.Identities) { personId = item.Key; currDominantEmotion = getDominantEmotion(apiResult.Identities[personId].FaceAttributes.Emotion); double delta = Math.Abs(currDominantEmotion.Value - this.targetScore); if (currDominantEmotion.Key == this.targetEmotion.ToString() && delta <= Delta) { scoresDictionary[personId] = 10 * (int)Math.Round(1 + 10 * (Delta - delta), 1); } else { scoresDictionary[personId] = 0; } } } return(scoresDictionary); }
/// <summary> Function which submits a frame to the Face API. </summary> /// <param name="frame"> The video frame to submit. </param> /// <returns> A <see cref="Task{LiveCameraResult}"/> representing the asynchronous API call, /// and containing the faces returned by the API. </returns> private async Task <LiveCameraResult> FacesAnalysisFunction(VideoFrame frame) { // Encode image. var jpg = frame.Image.ToMemoryStream(".jpg", s_jpegParams); //TODO Hackathon: // Submit image to API. var result = new LiveCameraResult(); try { var attrs = new List <FaceAttributeType> { FaceAttributeType.Age, FaceAttributeType.Gender, FaceAttributeType.Glasses, FaceAttributeType.FacialHair, FaceAttributeType.Emotion, FaceAttributeType.Smile }; result.Faces = await _faceClient.DetectAsync(jpg, returnFaceAttributes : attrs); } catch (FaceAPIException faceException) { MessageBox.Show(faceException.ErrorMessage, faceException.ErrorCode); } // Count the API call. Properties.Settings.Default.FaceAPICallCount++; // Output. return(result); }
private async Task <string> IdentifyPerson(string personGroupId) { string writeBack = ""; IList <IdentifyResult> result = new List <IdentifyResult>(); LiveCameraResult lc = new LiveCameraResult(); writeBack = $"Face ID: {lc.UserFace.ToString()}"; //IList<Guid> faceId = lc.UserFace.Select(face => face.FaceId.GetValueOrDefault()).ToList(); //var results = await faceClient.Face.IdentifyAsync(faceId, personGroupId); //foreach (var identifyResult in results) //{ // writeBack += ($"Result of face: {identifyResult.FaceId} "); // if (identifyResult.Candidates.Count == 0) // { // writeBack += "No one identified "; // } // else // { // // Get top 1 among all candidates returned // var candidateId = identifyResult.Candidates[0].PersonId; // var person = await faceClient.PersonGroupPerson.GetAsync(personGroupId, candidateId); // writeBack += ($"Identified as {person.Name} "); // } //} return(writeBack); }
/// <summary> Function which submits a frame to the Computer Vision API for celebrity /// detection. </summary> /// <param name="frame"> The video frame to submit. </param> /// <returns> A <see cref="Task{LiveCameraResult}"/> representing the asynchronous API call, /// and containing the celebrities returned by the API. </returns> private async Task <LiveCameraResult> CelebrityAnalysisFunction(VideoFrame frame) { // Encode image. var jpg = frame.Image.ToMemoryStream(".jpg", s_jpegParams); // Submit image to API. var result = await _visionClient.AnalyzeImageInDomainAsync(jpg, "celebrities"); // Count the API call. Properties.Settings.Default.VisionAPICallCount++; // Output. var celebs = JsonConvert.DeserializeObject <CelebritiesResult>(result.Result.ToString()).Celebrities; LiveCameraResult _result = new LiveCameraResult { TimeStamp = DateTime.Now, // Extract face rectangles from results. Faces = celebs.Select(c => CreateFace(c.FaceRectangle)).ToArray(), // Extract celebrity names from results. CelebrityNames = celebs.Select(c => c.Name).ToArray() }; TotalAPIResults.Add(_result); ApiResult = _result; analysisLog.SaveData(_result); return(_result); }
private void SavePlayerImages(BitmapSource image, LiveCameraResult result) { if (result == null || result.Identities == null || this.gameState != GameState.Game) { return; } if (DateTime.Now.AddSeconds(-playerImagesTimeOffsetSec) > this.lastPlayerImagesTime) { this.groupImages.Add(image); SaveImageToFile(image); foreach (var player in result.Identities) { int offset = 0; Int32Rect faceRectangle = new Int32Rect(player.Value.FaceRectangle.Left + offset, player.Value.FaceRectangle.Top + offset, player.Value.FaceRectangle.Width + offset, player.Value.FaceRectangle.Height + offset); CroppedBitmap playerImage = new CroppedBitmap(image, faceRectangle); if (playerImages.ContainsKey(player.Key)) { playerImages[player.Key].Add(playerImage); } else { playerImages[player.Key] = new List <CroppedBitmap>() { playerImage }; } lastPlayerImagesTime = DateTime.Now; } } }
private BitmapSource VisualizeResult(VideoFrame frame) { // Draw any results on top of the image. BitmapSource visImage = frame.Image.ToBitmapSource(); LiveCameraResult result = _latestResultsToDisplay; if (result != null) { // See if we have local face detections for this image. var clientFaces = (OpenCvSharp.Rect[])frame.UserData; if (clientFaces != null && result.Faces != null) { // If so, then the analysis results might be from an older frame. We need to match // the client-side face detections (computed on this frame) with the analysis // results (computed on the older frame) that we want to display. MatchAndReplaceFaceRectangles(result.Faces, clientFaces); } if (this.gameState == GameState.Explain) { this.Dispatcher.BeginInvoke((Action)(() => { RightImage.Source = ImageProvider.Instructions; //visImage = Visualization.DrawExplain(visImage); })); } else if (this.gameState == GameState.RoundBegin) { visImage = VisualizeStartRound(frame); } else if (this.gameState == GameState.RoundEnd) { visImage = VisualizeEndRound(frame); } else if (this.gameState == GameState.Game) { // Compute round score Dictionary <Guid, int> scores = round.ComputeFrameScorePerPlayer(result); scoringSystem.AddToCurrentRound(scores); visImage = Visualization.DrawFaces(visImage, round, result.Identities, scoringSystem, _mode); SavePlayerImages(frame.Image.ToBitmapSource(), result); } else if (this.gameState == GameState.Participants) { visImage = Visualization.DrawParticipants(visImage, result.Faces); } else if (this.gameState == GameState.GameEnd) { _grabber.StopProcessingAsync(); visImage = VisualizeEndGame(frame); } } return(visImage); }
public MainWindow() { InitializeComponent(); // Create grabber. _grabber = new FrameGrabber <LiveCameraResult>(); // Set up a listener for when the client receives a new frame. _grabber.NewFrameProvided += (s, e) => { // The callback may occur on a different thread, so we must use the // MainWindow.Dispatcher when manipulating the UI. this.Dispatcher.BeginInvoke((Action)(() => { // Display the image in the left pane. LeftImage.Source = e.Frame.Image.ToBitmapSource(); })); // See if auto-stop should be triggered. if (Properties.Settings.Default.AutoStopEnabled && (DateTime.Now - _startTime) > Properties.Settings.Default.AutoStopTime) { _grabber.StopProcessingAsync(); } }; // Set up a listener for when the client receives a new result from an API call. _grabber.NewResultAvailable += (s, e) => { this.Dispatcher.BeginInvoke((Action)(() => { if (e.TimedOut) { MessageArea.Text = "API call timed out."; } else if (e.Exception != null) { string apiName = ""; string message = e.Exception.Message; var faceEx = e.Exception as FaceAPIException; if (faceEx != null) { apiName = "Face"; message = faceEx.ErrorMessage; } MessageArea.Text = string.Format("{0} API call failed on frame {1}. Exception: {2}", apiName, e.Frame.Metadata.Index, message); } else { _latestResultsToDisplay = e.Analysis; RightImage.Source = VisualizeResult(e.Frame); } })); }; // Create local face detector. _localFaceDetector.Load("Data/haarcascade_frontalface_alt2.xml"); }
/// <summary> Function which submits a frame to the Emotion API. </summary> /// <param name="frame"> The video frame to submit. </param> /// <returns> A <see cref="Task{LiveCameraResult}"/> representing the asynchronous API call, /// and containing the emotions returned by the API. </returns> private async Task <LiveCameraResult> EmotionAnalysisFunction(VideoFrame frame) { // Encode image. var jpg = frame.Image.ToMemoryStream(".jpg", s_jpegParams); // Submit image to API. Emotion[] emotions = null; // See if we have local face detections for this image. var localFaces = (OpenCvSharp.Rect[])frame.UserData; if (localFaces == null) { // If localFaces is null, we're not performing local face detection. // Use Cognigitve Services to do the face detection. Properties.Settings.Default.EmotionAPICallCount++; emotions = await _emotionClient.RecognizeAsync(jpg); } else if (localFaces.Count() > 0) { // If we have local face detections, we can call the API with them. // First, convert the OpenCvSharp rectangles. var rects = localFaces.Select( f => new Microsoft.ProjectOxford.Common.Rectangle { Left = f.Left, Top = f.Top, Width = f.Width, Height = f.Height }); Properties.Settings.Default.EmotionAPICallCount++; emotions = await _emotionClient.RecognizeAsync(jpg, rects.ToArray()); } else { // Local face detection found no faces; don't call Cognitive Services. emotions = new Emotion[0]; } // Output. LiveCameraResult _result = new LiveCameraResult { TimeStamp = DateTime.Now, Faces = emotions.Select(e => CreateFace(e.FaceRectangle)).ToArray(), // Extract emotion scores from results. EmotionScores = emotions.Select(e => e.Scores).ToArray() }; TotalAPIResults.Add(_result); ApiResult = _result; analysisLog.SaveData(_result); return(_result); }
/// <summary> Function which submits a frame to the Face API. </summary> /// <param name="frame"> The video frame to submit. </param> /// <returns> A <see cref="Task{LiveCameraResult}"/> representing the asynchronous API call, /// and containing the faces returned by the API. </returns> private async Task <LiveCameraResult> FacesAnalysisFunction(VideoFrame frame) { // Encode image. var jpg = frame.Image.ToMemoryStream(".jpg", s_jpegParams); // Submit image to API. var attrs = new List <FaceAPI.FaceAttributeType> { FaceAPI.FaceAttributeType.Age, FaceAPI.FaceAttributeType.Gender, FaceAPI.FaceAttributeType.HeadPose, FaceAPI.FaceAttributeType.Glasses, FaceAPI.FaceAttributeType.FacialHair, }; var faces = await _faceClient.DetectAsync(jpg, returnFaceAttributes : attrs); // Count the API call. Properties.Settings.Default.FaceAPICallCount++; //IRUEL GET LiveCameraResult result = new LiveCameraResult { Faces = faces }; Microsoft.ProjectOxford.Face.Contract.Face xx = new Microsoft.ProjectOxford.Face.Contract.Face(); xx = result.Faces[0]; Microsoft.ProjectOxford.Face.Contract.FaceAttributes dd = new Microsoft.ProjectOxford.Face.Contract.FaceAttributes(); dd = xx.FaceAttributes; Trace.WriteLine(dd.Age.ToString() + ' ' + dd.Gender.ToString() + ' ' + dd.Glasses.ToString()); //return new LiveCameraResult //{ // // Extract face rectangles from results. // Faces = celebs.Select(c => CreateFace(c.FaceRectangle)).ToArray(), // // Extract celebrity names from results. // CelebrityNames = celebs.Select(c => c.Name).ToArray() //}; // Output. return(new LiveCameraResult { Faces = faces }); }
private async Task <LiveCameraResult> DescriptionAnalysisFunction(VideoFrame frame) { // Encode image. var jpg = frame.Image.ToMemoryStream(".jpg", s_jpegParams); // Submit image to API var result = await _visionClient.DescribeAsync(jpg); // Count the API call. Properties.Settings.Default.VisionAPICallCount++; // Output. LiveCameraResult cResult = new LiveCameraResult(); cResult.Caption = result.Description.Captions; return(new LiveCameraResult { Caption = result.Description.Captions }); }
public async Task <bool> FindSimilar(LiveCameraResult liveCameraResult) { try { if (string.IsNullOrWhiteSpace(DocumentImagePath)) { return(false); } IList <Guid?> targetFaceIds = new List <Guid?>(); using (var jpg = File.OpenRead(DocumentImagePath)) { // Detect faces from load image. var detectWithStreamCmd = new DetectWithStreamCmd(); var faces = await detectWithStreamCmd.DetectWithStreamAsync(jpg); //// Add detected faceId to list of GUIDs. if (faces.Count <= 0) { MessageArea.Text = $"No se detectaron rostros en la imagen."; return(false); } targetFaceIds.Add(faces[0].FaceId.Value); } var verifyFaceToFaceCmd = new VerifyFaceToFaceCmd(); var similarResults = await verifyFaceToFaceCmd.VerifyFaceToFaceAsync(liveCameraResult.Faces.First().FaceId.Value, targetFaceIds.First().Value); if (similarResults.IsIdentical) { RightImage.Source = VisualizeResult(liveCameraResult.VideoFrame); MessageArea.Text = $"Los rostros son similares con una confianza de: {similarResults.Confidence}."; return(true); } else { MessageArea.Text = $"Los rostros no son identicos."; return(true); } } catch (Exception ex) { MessageArea.Text = $"Se ha presentado un error: {ex.Message}"; return(false); } }
/// <summary> Function which submits a frame to the Computer Vision API for tagging. </summary> /// <param name="frame"> The video frame to submit. </param> /// <returns> A <see cref="Task{LiveCameraResult}"/> representing the asynchronous API call, /// and containing the tags returned by the API. </returns> private async Task <LiveCameraResult> TaggingAnalysisFunction(VideoFrame frame) { // Encode image. var jpg = frame.Image.ToMemoryStream(".jpg", s_jpegParams); // Submit image to API. var analysis = await _visionClient.GetTagsAsync(jpg); // Count the API call. Properties.Settings.Default.VisionAPICallCount++; // Output. LiveCameraResult _result = new LiveCameraResult { Tags = analysis.Tags, TimeStamp = DateTime.Now }; TotalAPIResults.Add(_result); ApiResult = _result; analysisLog.SaveData(_result); return(_result); }
public Dictionary <Guid, int> ComputeFrameScorePerPlayer(LiveCameraResult apiResult) { var scoresDictionary = new Dictionary <Guid, int>(); if (apiResult.Identities != null && apiResult.Identities.Count > 0 && apiResult.Identities.ContainsKey(leader)) { KeyValuePair <string, float> currDominantEmotion; Guid personId; KeyValuePair <string, float> leaderEmotion; int mimicPlayersCount = 0; leaderEmotion = RoundEmotion.getDominantEmotion(apiResult.Identities[leader].FaceAttributes.Emotion); foreach (var item in apiResult.Identities) { personId = item.Key; if (personId == leader) { continue; } currDominantEmotion = RoundEmotion.getDominantEmotion(apiResult.Identities[personId].FaceAttributes.Emotion); double delta = Math.Abs(currDominantEmotion.Value - leaderEmotion.Value); if (currDominantEmotion.Key == leaderEmotion.Key && delta <= Delta) { scoresDictionary[personId] = 10 * (int)Math.Round(1 + 10 * (Delta - delta), 1); mimicPlayersCount++; } else { scoresDictionary[personId] = 0; } } int totalPlayersCount = apiResult.Identities.Count; // handle leader scoring scoresDictionary[leader] = (totalPlayersCount - mimicPlayersCount) * 10; } return(scoresDictionary); }
/// <summary> Function which submits a frame to the Emotion API. </summary> /// <param name="frame"> The video frame to submit. </param> /// <returns> A <see cref="Task{LiveCameraResult}"/> representing the asynchronous API call, /// and containing the emotions returned by the API. </returns> private async Task <LiveCameraResult> AnalysisFunction(VideoFrame frame) { var jpg = frame.Image.ToMemoryStream(".jpg", s_jpegParams); var attrs = new List <FaceAttributeType> { FaceAttributeType.Age, FaceAttributeType.Emotion }; Face[] faces = await _faceClient.DetectAsync(jpg, returnFaceAttributes : attrs); Guid[] faceIds = faces.Select(face => face.FaceId).ToArray(); var liveCameraResult = new LiveCameraResult { Faces = faces, EmotionScores = faces.Select(f => f.FaceAttributes.Emotion).ToArray() }; try { IdentifyResult[] identities = await _faceClient.IdentifyAsync(currentGroupId, faceIds); var identityDict = new Dictionary <Guid, Face>(); foreach (var identity in identities) { if (identity.Candidates.Length > 0 && identity.Candidates[0].Confidence > 0.6) { identityDict[identity.Candidates[0].PersonId] = faces.First(f => f.FaceId == identity.FaceId); } } liveCameraResult.Identities = identityDict; } catch (Exception e) { } return(liveCameraResult); }
/// <summary> Function which submits a frame to the Computer Vision API for tagging. </summary> /// <param name="frame"> The video frame to submit. </param> /// <returns> A <see cref="Task{LiveCameraResult}"/> representing the asynchronous API call, /// and containing the tags returned by the API. </returns> private async Task <LiveCameraResult> TaggingAnalysisFunction(VideoFrame frame) { // Encode image. var jpg = frame.Image.ToMemoryStream(".jpg", s_jpegParams); //TODO Hackathon: // Submit image to API. // Count the API call. // Output. return null is just a dummy. var result = new LiveCameraResult(); try { var tagsResult = await _visionClient.GetTagsAsync(jpg); result.Tags = tagsResult.Tags; } catch (Exception e) { MessageBox.Show(e.ToString()); } return(result); }
public Dictionary <Guid, int> ComputeFrameScorePerPlayer(LiveCameraResult apiResult) { var scoresDictionary = new Dictionary <Guid, int>(); if (apiResult.Identities != null && apiResult.Identities.Count > 0) { Guid personId; double personAverageAge; double deltaFromAverage; double age; foreach (var item in apiResult.Identities) { personId = item.Key; if (!agesCount.ContainsKey(personId)) { agesCount[personId] = 0; agesSum[personId] = 0.0; agesAverage[personId] = 0.0; } age = apiResult.Identities[personId].FaceAttributes.Age; personAverageAge = this.agesAverage[personId]; deltaFromAverage = age - personAverageAge; if (deltaFromAverage > 0 && personAverageAge > 0) { scoresDictionary[item.Key] = 10 * (int)Math.Round(deltaFromAverage / 2); } this.agesCount[personId]++; this.agesSum[personId] += age; this.agesAverage[personId] = this.agesSum[personId] / this.agesCount[personId]; } } return(scoresDictionary); }
public MainWindow() { InitializeComponent(); // Create grabber. _grabber = new FrameGrabber <LiveCameraResult>(); // Set up a listener for when the client receives a new frame. _grabber.NewFrameProvided += (s, e) => { if (_mode == AppMode.EmotionsWithClientFaceDetect) { // Local face detection. var rects = _localFaceDetector.DetectMultiScale(e.Frame.Image); // Attach faces to frame. e.Frame.UserData = rects; } // The callback may occur on a different thread, so we must use the // MainWindow.Dispatcher when manipulating the UI. this.Dispatcher.BeginInvoke((Action)(() => { // Display the image in the left pane. LeftImage.Source = e.Frame.Image.ToBitmapSource(); // If we're fusing client-side face detection with remote analysis, show the // new frame now with the most recent analysis available. if (_fuseClientRemoteResults) { RightImage.Source = VisualizeResult(e.Frame); } })); // See if auto-stop should be triggered. if (Properties.Settings.Default.AutoStopEnabled && (DateTime.Now - _startTime) > Properties.Settings.Default.AutoStopTime) { _grabber.StopProcessingAsync(); } }; // Set up a listener for when the client receives a new result from an API call. _grabber.NewResultAvailable += (s, e) => { this.Dispatcher.BeginInvoke((Action)(() => { if (e.TimedOut) { MessageArea.Text = "API call timed out."; } else if (e.Exception != null) { string apiName = ""; string message = e.Exception.Message; var faceEx = e.Exception as FaceAPI.FaceAPIException; var emotionEx = e.Exception as Common.ClientException; var visionEx = e.Exception as VisionAPI.ClientException; if (faceEx != null) { apiName = "Face"; message = faceEx.ErrorMessage; } else if (emotionEx != null) { apiName = "Emotion"; message = emotionEx.Error.Message; } else if (visionEx != null) { apiName = "Computer Vision"; message = visionEx.Error.Message; } MessageArea.Text = string.Format("{0} API call failed on frame {1}. Exception: {2}", apiName, e.Frame.Metadata.Index, message); } else { _latestResultsToDisplay = e.Analysis; // Display the image and visualization in the right pane. if (!_fuseClientRemoteResults) { RightImage.Source = VisualizeResult(e.Frame); } } })); }; // Create local face detector. _localFaceDetector.Load("Data/haarcascade_frontalface_alt2.xml"); }
public MainWindow() { InitializeComponent(); int onetime = 0; // Create grabber. _grabber = new FrameGrabber <LiveCameraResult>(); //DB call string queryString = "SELECT imageurl from API_transnet_Secondary"; string connectionString = "Server=tcp:transnetserver.database.windows.net,1433;Initial Catalog=API_transnet;Persist Security Info=False;User ID=admin_server;Password=romir123123@;MultipleActiveResultSets=False;Encrypt=True;TrustServerCertificate=False;Connection Timeout=30;"; string image_link = ""; using (SqlConnection connection = new SqlConnection(connectionString)) { // Create the Command and Parameter objects. SqlCommand command = new SqlCommand(queryString, connection); try { connection.Open(); SqlDataReader reader = command.ExecuteReader(); int x = 0; while (reader.Read()) { image_link = reader[0].ToString(); byte[] binaryUrl = StrToByteArray(image_link); System.Drawing.Image imageUrl = CreateImage(binaryUrl); imageUrl.Save("C:\\Users\\romir\\Documents\\Visual Studio 2017\\Projects\\Transnetwork\\Secondary_DB_images\\THEBUYER" + x + ".bmp"); //NOW WE SHOULD COMPARE BETWEEN THE IMAGE URL (PIC FROM APP) AND STAION URL (PIC FROM STATION) USING AZURE API //MakeAnalysisRequest("C:\\Users\\romir\\Documents\\Visual Studio 2017\\Windows\\VideoFrameAnalyzer\\THEBUYER.bmp"); //MakeAnalysisRequest(stationUrl); x++; } reader.Close(); } catch (Exception ex) { throw new Exception(); } } //DB call ended // Set up a listener for when the client receives a new frame. _grabber.NewFrameProvided += (s, e) => { if (_mode == AppMode.EmotionsWithClientFaceDetect) { // Local face detection. var rects = _localFaceDetector.DetectMultiScale(e.Frame.Image); // Attach faces to frame. e.Frame.UserData = rects; } // The callback may occur on a different thread, so we must use the // MainWindow.Dispatcher when manipulating the UI. this.Dispatcher.BeginInvoke((Action)(() => { // Display the image in the left pane. LeftImage.Source = e.Frame.Image.ToBitmapSource(); // If we're fusing client-side face detection with remote analysis, show the // new frame now with the most recent analysis available. if (_fuseClientRemoteResults) { RightImage.Source = VisualizeResult(e.Frame); } })); // See if auto-stop should be triggered. if (Properties.Settings.Default.AutoStopEnabled && (DateTime.Now - _startTime) > Properties.Settings.Default.AutoStopTime) { _grabber.StopProcessingAsync(); } }; // Set up a listener for when the client receives a new result from an API call. _grabber.NewResultAvailable += (s, e) => { this.Dispatcher.BeginInvoke((Action)(async() => { if (e.TimedOut) { MessageArea.Text = "API call timed out."; } else if (e.Exception != null) { string apiName = ""; string message = e.Exception.Message; var faceEx = e.Exception as FaceAPIException; var emotionEx = e.Exception as Microsoft.ProjectOxford.Common.ClientException; var visionEx = e.Exception as Microsoft.ProjectOxford.Vision.ClientException; if (faceEx != null) { apiName = "Face"; message = faceEx.ErrorMessage; } else if (emotionEx != null) { apiName = "Emotion"; message = emotionEx.Error.Message; } else if (visionEx != null) { apiName = "Computer Vision"; message = visionEx.Error.Message; } MessageArea.Text = string.Format("{0} API call failed on frame {1}. Exception: {2}", apiName, e.Frame.Metadata.Index, message); } else { _latestResultsToDisplay = e.Analysis; double x = 2; double y = 2; // Display the image and visualization in the right pane. if (!_fuseClientRemoteResults) { //onetime++; RightImage.Source = VisualizeResult(e.Frame); var image = e.Frame.Image; image.SaveImage("C:\\Users\\romir\\Documents\\Visual Studio 2017\\Projects\\Transnetwork\\Person\\NEW" + x + ".bmp"); //onetime++; // faceServiceClient = new FaceServiceClient("23c757e5234948b4a67015d008f287b1"); y++; string personGroupId = "romir" + y; y++; await faceServiceClient.CreatePersonGroupAsync(personGroupId, "My Friends"); // Define Anna CreatePersonResult friend1 = await faceServiceClient.CreatePersonAsync( // Id of the person group that the person belonged to personGroupId, // Name of the person "Anna" + y ); const string friend1ImageDir = @"C:\Users\romir\Documents\Visual Studio 2017\Projects\Transnetwork\Person\"; foreach (string imagePath in Directory.GetFiles(friend1ImageDir, "*.jpg")) { using (Stream si = File.OpenRead(imagePath)) { // Detect faces in the image and add to Anna await faceServiceClient.AddPersonFaceAsync( personGroupId, friend1.PersonId, si); } } await faceServiceClient.TrainPersonGroupAsync(personGroupId); TrainingStatus trainingStatus = null; while (true) { trainingStatus = await faceServiceClient.GetPersonGroupTrainingStatusAsync(personGroupId); if (trainingStatus.Status.ToString() != "running") { break; } await Task.Delay(5000); } //call API bool authenticated = false; const string friend2ImageDir = @"C:\Users\romir\Documents\Visual Studio 2017\Projects\Transnetwork\Secondary_DB_images\"; foreach (string imagePath in Directory.GetFiles(friend2ImageDir, "*.jpg")) { string testImageFile = imagePath; using (Stream si = File.OpenRead(testImageFile)) { var faces = await faceServiceClient.DetectAsync(si); var faceIds = faces.Select(face => face.FaceId).ToArray(); var results = await faceServiceClient.IdentifyAsync(personGroupId, faceIds); foreach (var identifyResult in results) { Console.WriteLine("Result of face: {0}", identifyResult.FaceId); if (identifyResult.Candidates.Length == 0) { Console.WriteLine("No one identified"); } else { // Get top 1 among all candidates returned authenticated = true; } } } if (authenticated) { throw new Exception(); //alert } } x++; } } })); }; // Create local face detector. _localFaceDetector.Load("Data/haarcascade_frontalface_alt2.xml"); }
public MainWindow() { currentGroupId = currentGroupName; InitializeComponent(); StartTimer(); this.backgroundMusic = SoundProvider.Ukulele; this.backgroundMusic.Volume = 0.05; this.backgroundMusic.MediaEnded += new EventHandler((object sender, EventArgs e) => { this.backgroundMusic.Position = TimeSpan.Zero; this.backgroundMusic.Play(); }); this.backgroundMusic.Play(); t.Elapsed += T_Elapsed; // Create grabber. _grabber = new FrameGrabber <LiveCameraResult>(); updateMode(AppMode.Participants); // Set up a listener for when the client receives a new frame. _grabber.NewFrameProvided += (s, e) => { if (_mode == AppMode.EmotionsWithClientFaceDetect) { // Local face detection. var rects = _localFaceDetector.DetectMultiScale(e.Frame.Image); // Attach faces to frame. e.Frame.UserData = rects; } // The callback may occur on a different thread, so we must use the // MainWindow.Dispatcher when manipulating the UI. this.Dispatcher.BeginInvoke((Action)(() => { // Display the image in the left pane. LeftImage.Source = e.Frame.Image.ToBitmapSource(); // If we're fusing client-side face detection with remote analysis, show the // new frame now with the most recent analysis available. if (_fuseClientRemoteResults) { RightImage.Source = VisualizeResult(e.Frame); } })); if (DateTime.Now - currentTimeTaskStart > currentTimerTask) { if (gameState == GameState.Explain) { roundStart = DateTime.Now; nextRound(); } else if (gameState == GameState.RoundBegin) { currentTimerTask = TimeSpan.FromSeconds(15); currentTimeTaskStart = DateTime.Now; gameState = GameState.Game; roundStart = DateTime.Now; } else if (gameState == GameState.Game) { currentTimerTask = TimeSpan.FromSeconds(6); currentTimeTaskStart = DateTime.Now; gameState = GameState.RoundEnd; scoringSystem.AddRoundToGameScore(); } else if (gameState == GameState.RoundEnd) { if (roundNumber == NumOfRounds) { this.sound = SoundProvider.TheWinner; this.sound.Play(); currentTimerTask = TimeSpan.FromSeconds(3); gameState = GameState.GameEnd; this.Dispatcher.BeginInvoke((Action)(() => { StartEndImages(); button.Visibility = Visibility.Visible; })); } else { nextRound(); roundStart = DateTime.Now; } } } }; // Set up a listener for when the client receives a new result from an API call. _grabber.NewResultAvailable += (s, e) => { this.Dispatcher.BeginInvoke((Action)(() => { if (e.TimedOut) { MessageArea.Text = "API call timed out."; } else if (e.Exception != null) { string apiName = ""; string message = e.Exception.Message; var faceEx = e.Exception as FaceAPIException; var emotionEx = e.Exception as Microsoft.ProjectOxford.Common.ClientException; var visionEx = e.Exception as Microsoft.ProjectOxford.Vision.ClientException; if (faceEx != null) { apiName = "Face"; message = faceEx.ErrorMessage; } else if (emotionEx != null) { apiName = "Emotion"; message = emotionEx.Error.Message; } else if (visionEx != null) { apiName = "Computer Vision"; message = visionEx.Error.Message; } MessageArea.Text = string.Format("{0} API call failed on frame {1}. Exception: {2}", apiName, e.Frame.Metadata.Index, message); } else { _latestResultsToDisplay = e.Analysis; // Display the image and visualization in the right pane. if (!_fuseClientRemoteResults) { RightImage.Source = VisualizeResult(e.Frame); } if (gameState == GameState.Game || gameState == GameState.RoundBegin) { bool drawIndicator = false; if (gameState == GameState.Game) { drawIndicator = true; } RightImage.Source = VisualizeTimer(drawIndicator); } } })); }; // Create local face detector. _localFaceDetector.Load("Data/haarcascade_frontalface_alt2.xml"); }
public void SaveData(LiveCameraResult AnalysisFunction) { LogHelper.Log(LogTarget.Binary, AnalysisFunction, filePath); }
public MainWindow() { InitializeComponent(); LeftT.Visibility = Visibility.Collapsed; RightT.Visibility = Visibility.Collapsed; VideoT.Visibility = Visibility.Collapsed; Emo.Visibility = Visibility.Collapsed; // Create grabber. _grabber = new FrameGrabber <LiveCameraResult>(); // Set up a listener for when the client receives a new frame. _grabber.NewFrameProvided += (s, e) => { if (_mode == AppMode.EmotionsWithClientFaceDetect) { // Local face detection. var rects = _localFaceDetector.DetectMultiScale(e.Frame.Image); // Attach faces to frame. e.Frame.UserData = rects; } // The callback may occur on a different thread, so we must use the // MainWindow.Dispatcher when manipulating the UI. this.Dispatcher.BeginInvoke((Action)(() => { // Display the image in the left pane. LeftImage.Source = e.Frame.Image.ToBitmapSource(); // If we're fusing client-side face detection with remote analysis, show the // new frame now with the most recent analysis available. if (_fuseClientRemoteResults) { RightImage.Source = VisualizeResult(e.Frame); } })); // See if auto-stop should be triggered. if (Properties.Settings.Default.AutoStopEnabled && (DateTime.Now - _startTime) > Properties.Settings.Default.AutoStopTime) { _grabber.StopProcessingAsync(); } }; // Set up a listener for when the client receives a new result from an API call. _grabber.NewResultAvailable += (s, e) => { this.Dispatcher.BeginInvoke((Action)(() => { if (e.TimedOut) { MessageArea.Text = "API call timed out."; } else if (e.Exception != null) { string apiName = ""; string message = e.Exception.Message; var faceEx = e.Exception as FaceAPIException; var emotionEx = e.Exception as Microsoft.ProjectOxford.Common.ClientException; var visionEx = e.Exception as Microsoft.ProjectOxford.Vision.ClientException; if (faceEx != null) { apiName = "Face"; message = faceEx.ErrorMessage; } else if (emotionEx != null) { apiName = "Emotion"; message = emotionEx.Error.Message; } else if (visionEx != null) { apiName = "Computer Vision"; message = visionEx.Error.Message; } MessageArea.Text = string.Format("{0} API call failed on frame {1}. Exception: {2}", apiName, e.Frame.Metadata.Index, message); } else { if (e.Analysis.EmotionScores.Count() > 0) { var emotion = e.Analysis.EmotionScores.First().ToRankedList().First(); emotions.Add(startTime - DateTime.Now, emotion); Console.WriteLine($"{emotion.Key}: {emotion.Value}"); string src = @"C:\Users\Uddal\Downloads\Compressed\Cognitive-Samples-VideoFrameAnalysis-master\Windows\Emo\"; Emo.Visibility = Visibility.Visible; if (emotion.Key.Equals("Angry")) { Uri uri = new Uri(src + "angry.png", UriKind.Absolute); ImageSource imgSource = new BitmapImage(uri); Emo.Source = imgSource; } else if (emotion.Key.Equals("Happiness")) { Uri uri = new Uri(src + "happy.png", UriKind.Absolute); ImageSource imgSource = new BitmapImage(uri); Emo.Source = imgSource; } else if (emotion.Key.Equals("Contempt")) { Uri uri = new Uri(src + "contempt.jpg", UriKind.Absolute); ImageSource imgSource = new BitmapImage(uri); Emo.Source = imgSource; } else if (emotion.Key.Equals("Disgust")) { Uri uri = new Uri(src + "disrupt.png", UriKind.Absolute); ImageSource imgSource = new BitmapImage(uri); Emo.Source = imgSource; } else if (emotion.Key.Equals("Fear")) { Uri uri = new Uri(src + "fear.png", UriKind.Absolute); ImageSource imgSource = new BitmapImage(uri); Emo.Source = imgSource; } else if (emotion.Key.Equals("Neutral")) { Uri uri = new Uri(src + "neutral.png", UriKind.Absolute); ImageSource imgSource = new BitmapImage(uri); Emo.Source = imgSource; } else if (emotion.Key.Equals("Sadness")) { Uri uri = new Uri(src + "sad.png", UriKind.Absolute); ImageSource imgSource = new BitmapImage(uri); Emo.Source = imgSource; } else if (emotion.Key.Equals("Surprise")) { Uri uri = new Uri(src + "surprise.png", UriKind.Absolute); ImageSource imgSource = new BitmapImage(uri); Emo.Source = imgSource; } } _latestResultsToDisplay = e.Analysis; // Display the image and visualization in the right pane. if (!_fuseClientRemoteResults) { RightImage.Source = VisualizeResult(e.Frame); } /* int milliseconds = 2000; * Thread.Sleep(milliseconds);*/ } })); }; // Create local face detector. _localFaceDetector.Load("Data/haarcascade_frontalface_alt2.xml"); }
public MainWindow() { InitializeComponent(); ServicePointManager.SecurityProtocol = SecurityProtocolType.Tls12; // Create grabber. _grabber = new FrameGrabber <LiveCameraResult>(); // Set up a listener for when the client receives a new frame. _grabber.NewFrameProvided += (s, e) => { // The callback may occur on a different thread, so we must use the // MainWindow.Dispatcher when manipulating the UI. this.Dispatcher.BeginInvoke((Action)(() => { // Display the image in the left pane. LeftImage.Source = e.Frame.Image.ToBitmapSource(); })); // See if auto-stop should be triggered. if (Properties.Settings.Default.AutoStopEnabled && (DateTime.Now - _startTime) > Properties.Settings.Default.AutoStopTime) { _grabber.StopProcessingAsync().GetAwaiter().GetResult(); } }; // Set up a listener for when the client receives a new result from an API call. _grabber.NewResultAvailable += (s, e) => { this.Dispatcher.BeginInvoke((Action)(async() => { if (e.TimedOut) { MessageArea.Text = "API call timed out."; } else if (e.Exception != null) { string apiName = ""; string message = e.Exception.Message; var faceEx = e.Exception as FaceAPI.Models.APIErrorException; if (faceEx != null) { apiName = "Face"; message = faceEx.Message; } MessageArea.Text = string.Format("{0} API call failed on frame {1}. Exception: {2}", apiName, e.Frame.Metadata.Index, message); } else { _latestResultsToDisplay = e.Analysis; if (_latestResultsToDisplay != null && _latestResultsToDisplay.Faces.Any()) { if (!string.IsNullOrWhiteSpace(DocumentImagePath)) { MessageArea.Text = "Verificando Rostro..."; if (await FindSimilar(_latestResultsToDisplay)) { await _grabber.StopProcessingAsync(); } } else { MessageArea.Text = "Por favor seleccione una imagen"; } } } })); }; }