private async void RunProcess(DetectFacesArgs args) { if (args.MediaItem == null) { return; } try { Log.Info("Image recognition starting...", this); IFaceDetection faceDetection = new AzureFaceDetection(); var mediaStream = args.Stream ?? args.MediaItem.GetMediaStream(); if (mediaStream == null) { Log.Info("Weird.... media item media stream is empty...", this); return; } var memoryStream = new MemoryStream(); await mediaStream.CopyToAsync(memoryStream); var result = await faceDetection.DetectFacesAsync(memoryStream); using (new SecurityDisabler()) { args.MediaItem.InnerItem.Editing.BeginEdit(); args.MediaItem.InnerItem["FaceMetadata"] = JsonConvert.SerializeObject(result); args.MediaItem.InnerItem.Editing.EndEdit(true, false); } PublishManager.PublishItem(args.MediaItem, new[] { Database.GetDatabase("web"), }, new[] { Language.Current }, false, false); var index = ContentSearchManager.GetIndex("face_master_index"); index.Refresh(new SitecoreIndexableItem(args.MediaItem), IndexingOptions.Default); index = ContentSearchManager.GetIndex("face_web_index"); index.Refresh(new SitecoreIndexableItem(args.MediaItem), IndexingOptions.Default); Log.Info("Image recognition completed...", this); } catch (Exception e) { Log.Info("Something went wrong..." + e, this); } }
void Start() { m_StatusManager = FindObjectOfType <StatusManager>(); m_AzureFaceDetection = FindObjectOfType <AzureFaceDetection>(); m_CaptureManager = FindObjectOfType <CaptureManager>(); m_DetectionManager = GetComponent <DetectionManager>(); m_PhotosToRegister = new List <Texture2D>(); m_AzureFaceDetection.OnFacesAddedToPerson += Train; m_AzureFaceDetection.OnTrainingSuccess = null; m_AzureFaceDetection.OnTrainingSuccess += DetectAgain; }
// Start is called before the first frame update void Start() { // Check that all managers are present m_AzureManager = FindObjectOfType <AzureManager>(); m_AzureFaceDetection = FindObjectOfType <AzureFaceDetection>(); m_CaptureManager = FindObjectOfType <CaptureManager>(); if (m_AzureFaceDetection == null || m_AzureManager == null || m_CaptureManager == null) { Debug.Log(Constants.MANAGERS_NOT_PRESENT); } else { Initialize(); } }
// Start is called before the first frame update void Start() { runtimeShot = new Texture2D(1, 1); m_PersonsInGroup = new List <PersonInGroup.Person>(); m_RuntimeImage = Application.dataPath + Constants.PREFIX_DETECTION_IMAGES_PATH + "main.jpg"; m_StatusManager = FindObjectOfType <StatusManager>(); m_SoundManager = FindObjectOfType <SoundManager>(); m_AzureFaceDetection = FindObjectOfType <AzureFaceDetection>(); m_CaptureManager = FindObjectOfType <CaptureManager>(); m_RegistrationManager = GetComponent <RegistrationManager>(); // Subscribe to dropdown UI to get person group ID. Not critical for the demo // TODO // When capture button is pressed, start detection process m_CaptureManager.OnCapture += Entry; // When debug button is pressed, delete all Person Groups m_CaptureManager.OnDebug += DebugReset; // Subscribe to the Azure face detection component m_AzureFaceDetection.OnPersonGroupNotExisted += CreatePersonGroup; m_AzureFaceDetection.OnPersonGroupExists += GetPersonListInGroup; m_AzureFaceDetection.OnPersonGroupCreated += GetPersonListInGroup; m_AzureFaceDetection.OnPersonListNotEmpty += CheckAllPersonsHaveFaces; m_AzureFaceDetection.OnPersonListEmpty += CreatePersonInGroup; m_AzureFaceDetection.OnPersonInGroupDeleted += GetPersonListInGroup; m_AzureFaceDetection.OnPersonCreated += StartRegistration; m_AzureFaceDetection.OnPersonGroupNotTrained += Train; m_AzureFaceDetection.OnPersonGroupTrained += DetermineFaceArea; m_AzureFaceDetection.OnTrainingSuccess += DetermineFaceArea; m_AzureFaceDetection.OnFacesNotFound += RestartFlow; m_AzureFaceDetection.OnFacesFound += Identify; m_AzureFaceDetection.OnFaceNotIdentified += CreatePersonInGroup; m_AzureFaceDetection.OnFaceIdentified += CheckIdentifiedFaceIsKnown; // Debug m_AzureFaceDetection.OnPersonGroupDeleted += DebugSuccessful; }