#pragma warning restore 1416 /// <summary> /// Detect objects in the image. /// </summary> /// <param name="image"></param> /// <returns></returns> private async Task DetectObjects(ImageMetaData metadata) { var image = metadata.Image; var fileName = new FileInfo(image.FullPath); if (!fileName.Exists) { return; } try { var thumbSize = ThumbSize.Large; var medThumb = new FileInfo(_thumbService.GetThumbPath(fileName, thumbSize)); bool enableAIProcessing = _configService.GetBool(ConfigSettings.EnableAIProcessing, true); MetaDataService.GetImageSize(medThumb.FullName, out var thumbWidth, out var thumbHeight); var foundObjects = new List <ImageObject>(); var foundFaces = new List <ImageObject>(); if (enableAIProcessing || _azureFaceService.DetectionType == AzureFaceService.AzureDetection.AllImages) { Logging.Log($"Processing AI image detection for {fileName.Name}..."); } if (!File.Exists(medThumb.FullName)) { // The thumb isn't ready yet. return; } var bitmap = SafeLoadBitmap(medThumb.FullName); if (bitmap != null && _imageClassifier != null && enableAIProcessing) { var colorWatch = new Stopwatch("DetectObjects"); var dominant = _imageClassifier.DetectDominantColour(bitmap); var average = _imageClassifier.DetectAverageColor(bitmap); colorWatch.Stop(); image.MetaData.AverageColor = average.ToHex(); image.MetaData.DominantColor = dominant.ToHex(); Logging.LogVerbose($"Image {image.FullPath} has dominant colour {dominant.ToHex()}, average {average.ToHex()}"); } // Next, look for faces. We need to determine if we: // a) Use only local (Accord.Net) detection // b) Use local detection, and then if we find a face, or a person object, submit to Azure // c) Always submit every image to Azure. // This is a user config. bool useAzureDetection = false; // For the object detector, we need a successfully loaded bitmap if (bitmap != null && enableAIProcessing) { var objwatch = new Stopwatch("DetectObjects"); // First, look for Objects var objects = await _objectDetector.DetectObjects(bitmap); objwatch.Stop(); if (objects.Any()) { Logging.Log($" Yolo found {objects.Count()} objects in {fileName}..."); var newTags = await CreateNewTags(objects); var newObjects = objects.Select(x => new ImageObject { RecogntionSource = ImageObject.RecognitionType.MLNetObject, ImageId = image.ImageId, RectX = (int)x.Rect.Left, RectY = (int)x.Rect.Top, RectHeight = (int)x.Rect.Height, RectWidth = (int)x.Rect.Width, TagId = x.IsFace ? 0 : newTags[x.Tag], Type = ImageObject.ObjectTypes.Object.ToString(), Score = x.Score }).ToList(); if (UseAzureForRecogition(objects)) { useAzureDetection = true; } ScaleObjectRects(image, newObjects, thumbWidth, thumbHeight); foundObjects.AddRange(newObjects); } } if (_azureFaceService.DetectionType == AzureFaceService.AzureDetection.AllImages) { // Skip local face detection and just go straight to Azure useAzureDetection = true; } else if (enableAIProcessing) { if (_emguFaceService.ServiceAvailable) { var emguwatch = new Stopwatch("EmguFaceDetect"); var rects = _emguFaceService.DetectFaces(medThumb.FullName); emguwatch.Stop(); if (UseAzureForRecogition(rects)) { // Filter out the faces if we're using Azure rects = rects.Where(x => !x.IsFace).ToList(); useAzureDetection = true; } if (rects.Any()) { // Azure is disabled, so just use what we've got. Logging.Log($" Emgu found {rects.Count} faces in {fileName}..."); var newTags = await CreateNewTags(rects); var newObjects = rects.Select(x => new ImageObject { RecogntionSource = ImageObject.RecognitionType.Emgu, ImageId = image.ImageId, RectX = x.Rect.Left, RectY = x.Rect.Top, RectHeight = x.Rect.Height, RectWidth = x.Rect.Width, TagId = newTags[x.Tag], Type = x.IsFace ? ImageObject.ObjectTypes.Face.ToString() : ImageObject.ObjectTypes.Object.ToString(), Score = 0 }).ToList(); ScaleObjectRects(image, newObjects, thumbWidth, thumbHeight); foundFaces.AddRange(newObjects); } } else { var accordwatch = new Stopwatch("AccordFaceDetect"); // Emgu isn't available, so use Accord.Net instead var rects = _accordFaceService.DetectFaces(bitmap); accordwatch.Stop(); if (rects.Any()) { if (UseAzureForRecogition(rects)) { useAzureDetection = true; } else { // Azure is disabled, so just use what we've got. Logging.Log($" Accord.Net found {rects.Count} faces in {fileName}..."); var newTags = await CreateNewTags(rects); var newObjects = rects.Select(x => new ImageObject { ImageId = image.ImageId, RectX = x.Rect.Left, RectY = x.Rect.Top, RectHeight = x.Rect.Height, RectWidth = x.Rect.Width, Type = ImageObject.ObjectTypes.Face.ToString(), // Accord only does faces. TagId = newTags[x.Tag], RecogntionSource = ImageObject.RecognitionType.Accord, Score = 0 }).ToList(); ScaleObjectRects(image, newObjects, thumbWidth, thumbHeight); foundFaces.AddRange(newObjects); } } } } if (useAzureDetection) { var faceTag = await _metdataService.CreateTagsFromStrings(new List <string> { "Face" }); var faceTagId = faceTag.FirstOrDefault()?.TagId ?? 0; var azurewatch = new Stopwatch("AzureFaceDetect"); Logging.LogVerbose($"Processing {medThumb.FullName} with Azure Face Service"); // We got predictions or we're scanning everything - so now let's try the image with Azure. var azureFaces = await _azureFaceService.DetectFaces(medThumb.FullName, _imageProcessor); azurewatch.Stop(); if (azureFaces.Any()) { Logging.Log($" Azure found {azureFaces.Count} faces in {fileName}..."); // Get a list of the Azure Person IDs var peopleIds = azureFaces.Select(x => x.PersonId.ToString()); // Create any new ones, or pull existing ones back from the cache await CreateMissingPeople(peopleIds); // Now convert into ImageObjects. Note that if the peopleCache doesn't // contain the key, it means we didn't create a person record successfully // for that entry - so we skip it. var newObjects = azureFaces.Select(x => new ImageObject { ImageId = image.ImageId, RectX = x.Left, RectY = x.Top, RectHeight = x.Height, RectWidth = x.Width, Type = ImageObject.ObjectTypes.Face.ToString(), TagId = faceTagId, RecogntionSource = ImageObject.RecognitionType.Azure, Score = x.Score, PersonId = GetPersonIDFromCache(x.PersonId) }).ToList(); ScaleObjectRects(image, newObjects, thumbWidth, thumbHeight); foundFaces.AddRange(newObjects); var peopleToAdd = foundFaces.Select(x => x.Person); // Add them } else { // If we're scanning because local face detection found a face, log the result. if (_azureFaceService.DetectionType == AzureFaceService.AzureDetection.ImagesWithFaces) { Logging.Log($"Azure found no faces in image {fileName}"); } else { Logging.LogVerbose($"Azure found no faces in image {fileName}"); } } } if (foundFaces.Any()) { // We've found some faces. Add a tagID. const string faceTagName = "Face"; var tags = await _metdataService.CreateTagsFromStrings(new List <string> { faceTagName }); var faceTagId = tags.Single().TagId; foundFaces.ForEach(x => x.TagId = faceTagId); } if (foundObjects.Any() || foundFaces.Any()) { var objWriteWatch = new Stopwatch("WriteDetectedObjects"); var allFound = foundObjects.Union(foundFaces).ToList(); using var db = new ImageContext(); // First, clear out the existing faces and objects - we don't want dupes // TODO: Might need to be smarter about this once we add face names and // Object identification details. await db.BatchDelete(db.ImageObjects.Where(x => x.ImageId.Equals(image.ImageId) && x.RecogntionSource != ImageObject.RecognitionType.ExternalApp)); // Now add the objects and faces. await db.BulkInsert(db.ImageObjects, allFound); WriteAITagsToImages(image, allFound); objWriteWatch.Stop(); } } catch (Exception ex) { Logging.LogError($"Exception during AI detection for {fileName}: {ex}"); } }