private void SendFrame(string filePath, int detectedFaces) { // Using Azure Storage SDK to upload the file is another option _storageRepo.CreateFileAsync(Path.GetFileName(filePath), System.IO.File.ReadAllBytes(filePath)).Wait(); // IoT Hub has built-in support for blobs upload. // File will be uploaded to a folder with the device name. This might required some code changes //_iotHub.UploadFile(Path.GetFileName(filePath), new MemoryStream(System.IO.File.ReadAllBytes(filePath))).Wait(); CognitiveRequest req = new CognitiveRequest { CreatedAt = DateTime.UtcNow, DeviceId = deviceId, FileUrl = Path.GetFileName(filePath), Id = Guid.NewGuid().ToString(), IsActive = true, IsDeleted = false, IsProcessed = false, Origin = "Device.Web.V1.0.0", Status = "Submitted", TakenAt = DateTime.UtcNow, TargetAction = CognitiveTargetAction.CamFrameAnalysis.ToString() }; Dictionary <string, string> properties = new Dictionary <string, string> { { "DeviceId", deviceId }, { "DetectedFacesCount", detectedFaces.ToString() } }; _iotHub.SendEventAsync(JsonConvert.SerializeObject(req), properties).Wait(); }
public void CamFrameAnalysis(CognitiveRequest request) { var messageBody = JsonConvert.SerializeObject(request); var message = new Message(Encoding.UTF8.GetBytes(messageBody)); camFrameAnalyzerServiceBus.PublishMessage(message); }
public async Task <IActionResult> SubmitDoc(string deviceId, string docType, IFormFile doc) { if (doc == null || doc.Length == 0) { return(BadRequest("file not selected")); } var proposedDocType = CognitiveTargetAction.Unidentified; var isValidType = Enum.TryParse <CognitiveTargetAction>(docType, out proposedDocType); if (!isValidType || proposedDocType == CognitiveTargetAction.Unidentified) { return(BadRequest("Invalid document type")); } long size = doc.Length; // full path to file in temp location string docName = "NA"; string docUri = null; if (size > 0) { using (var stream = doc.OpenReadStream()) { var docExtention = doc.FileName.Substring(doc.FileName.LastIndexOf('.')); docName = $"{deviceId}-{DateTime.UtcNow.ToString("ddMMyyHHmmss")}{docExtention}"; docUri = await storageRepository.CreateFileAsync(docName, stream); } } else { return(BadRequest("Submitted file size is 0")); } CognitiveRequest req = new CognitiveRequest { CreatedAt = DateTime.UtcNow, DeviceId = deviceId, FileUrl = docName, Id = Guid.NewGuid().ToString(), IsActive = true, IsDeleted = false, IsProcessed = false, Origin = "CognitiveOrchestrator.API.V1.0.0", Status = "Submitted", TakenAt = DateTime.UtcNow, TargetAction = proposedDocType.ToString() }; var sbMessage = new Microsoft.Azure.ServiceBus.Message(Encoding.UTF8.GetBytes(JsonConvert.SerializeObject(req))); var result = await serviceBusRepository.PublishMessage(sbMessage); return(Ok(req)); }
public async Task Run( [ServiceBusTrigger(AppConstants.SBTopic, AppConstants.SBSubscription, Connection = "SB_Connection")] string request, ILogger log) { CognitiveRequest cognitiveRequest = null; try { cognitiveRequest = JsonConvert.DeserializeObject <CognitiveRequest>(request); } catch (Exception ex) { log.LogError($"FUNC (CamFrameAnalyzer): camframe-analysis topic triggered and failed to parse message: {JsonConvert.SerializeObject(request)} with the error: {ex.Message}"); } try { key = GlobalSettings.GetKeyValue("cognitiveKey"); endpoint = GlobalSettings.GetKeyValue("cognitiveEndpoint"); faceClient = new FaceClient( new Microsoft.Azure.CognitiveServices.Vision.Face.ApiKeyServiceClientCredentials(key), new System.Net.Http.DelegatingHandler[] { }) { Endpoint = endpoint }; var data = await filesStorageRepo.GetFileAsync(cognitiveRequest.FileUrl); var frameAnalysis = new CamFrameAnalysis(); } catch (Exception ex) { log.LogError($"FUNC (CamFrameAnalyzer): camframe-analysis topic triggered and failed to parse message: {JsonConvert.SerializeObject(cognitiveRequest)} with the error: {ex.Message}"); } log.LogInformation($"FUNC (CamFrameAnalyzer): camframe-analysis topic triggered and processed message: {JsonConvert.SerializeObject(cognitiveRequest)}"); }
public async Task <string> Run( [ServiceBusTrigger(AppConstants.SBTopic, AppConstants.SBSubscription, Connection = "serviceBusConnection")] string request, ILogger log) { DateTime startTime = DateTime.UtcNow; log.LogInformation($"FUNC (MaskDetector): camframe-analysis topic triggered processing message: {JsonConvert.SerializeObject(request)}"); CognitiveRequest cognitiveRequest = null; try { cognitiveRequest = JsonConvert.DeserializeObject <CognitiveRequest>(request); } catch (Exception ex) { log.LogError($"FUNC (MaskDetector): camframe-analysis topic triggered and failed to parse message: {JsonConvert.SerializeObject(request)} with the error: {ex.Message}"); } //Starting Mask Detection MaskAnalysis result = new MaskAnalysis { Id = $"{Guid.NewGuid()}-{cognitiveRequest.TakenAt.Month}{cognitiveRequest.TakenAt.Year}", Request = cognitiveRequest, CreatedAt = startTime, TimeKey = $"{cognitiveRequest.TakenAt.Month}{cognitiveRequest.TakenAt.Year}", IsDeleted = false, IsSuccessful = false, Origin = "MaskDetector", Status = ProcessingStatus.Processing.ToString() }; var fileName = cognitiveRequest.FileUrl.Substring(cognitiveRequest.FileUrl.LastIndexOf("/") + 1); var data = await filesStorageRepo.GetFileAsync(fileName); var imageStream = new MemoryStream(data); try { var detectionResult = await maskDetectionApi.DetectImage(new StreamPart(imageStream, "image.jpg", "multipart/form-data")); result.DetectionResult = detectionResult; result.DetectionResult.Predictions = detectionResult.Predictions.Where(p => p.Probability >= AppConstants.MaskDetectionThreshold).ToList(); result.IsSuccessful = true; result.Status = ProcessingStatus.Successful.ToString(); result.TotalDetected = detectionResult.Predictions.Where(p => p.Probability >= AppConstants.MaskDetectionThreshold).Count(); result.TotalDetectedWithMasks = detectionResult.Predictions.Where(p => p.TagName == "MASK" && p.Probability >= AppConstants.MaskDetectionThreshold).Count(); result.TotalDetectedWithoutMasks = detectionResult.Predictions.Where(p => p.TagName == "NOMASK" && p.Probability >= AppConstants.MaskDetectionThreshold).Count(); result.MaskDetectionThreshold = AppConstants.MaskDetectionThreshold; result.TotalProcessingTime = (int)(DateTime.UtcNow - startTime).TotalMilliseconds; var dbResult = await maskAnalysisRepo.AddAsync(result); return(JsonConvert.SerializeObject(dbResult)); } catch (Exception ex) { log.LogError($"FUNC (MaskDetector): camframe-analysis topic triggered and failed to parse message: {JsonConvert.SerializeObject(cognitiveRequest)} with the error: {ex.Message}"); } return(null); }
public static void CamFrameAnalysis(CognitiveRequest request) { }
public async Task <string> Run( [ServiceBusTrigger(AppConstants.SBTopic, AppConstants.SBSubscription, Connection = "serviceBusConnection")] string request, ILogger log) { DateTime startTime = DateTime.UtcNow; log.LogInformation($"FUNC (CamFrameAnalyzer): camframe-analysis topic triggered processing message: {JsonConvert.SerializeObject(request)}"); CognitiveRequest cognitiveRequest = null; try { cognitiveRequest = JsonConvert.DeserializeObject <CognitiveRequest>(request); } catch (Exception ex) { log.LogError($"FUNC (CamFrameAnalyzer): camframe-analysis topic triggered and failed to parse message: {JsonConvert.SerializeObject(request)} with the error: {ex.Message}"); } try { key = GlobalSettings.GetKeyValue("cognitiveKey"); endpoint = GlobalSettings.GetKeyValue("cognitiveEndpoint"); faceWorkspaceDataFilter = GlobalSettings.GetKeyValue("faceWorkspaceDataFilter"); FaceServiceHelper.ApiKey = key; FaceServiceHelper.ApiEndpoint = endpoint; FaceListManager.FaceListsUserDataFilter = faceWorkspaceDataFilter; frameAnalysis = new CamFrameAnalysis { Id = $"{Guid.NewGuid()}-{cognitiveRequest.TakenAt.Month}{cognitiveRequest.TakenAt.Year}", Request = cognitiveRequest, CreatedAt = startTime, TimeKey = $"{cognitiveRequest.TakenAt.Month}{cognitiveRequest.TakenAt.Year}", IsDeleted = false, IsSuccessful = false, Origin = "CamFrameAnalyzer", Status = ProcessingStatus.Processing.ToString() }; // Get image data. We need only the filename not the FQDN in FileUrl var fileName = cognitiveRequest.FileUrl.Substring(cognitiveRequest.FileUrl.LastIndexOf("/") + 1); var data = await filesStorageRepo.GetFileAsync(fileName); // Load the analyzer with data CognitiveFacesAnalyzer.PeopleGroupsUserDataFilter = faceWorkspaceDataFilter; cognitiveFacesAnalyzer = new CognitiveFacesAnalyzer(data); await AnalyzeCameFrame(log); UpdateAnalysisSummary(); frameAnalysis.TotalProcessingTime = (int)(DateTime.UtcNow - startTime).TotalMilliseconds; await SaveAnalysisAsync(); log.LogInformation($"FUNC (CamFrameAnalyzer): camframe-analysis COMPLETED: {JsonConvert.SerializeObject(frameAnalysis)}"); //Only publish a new analysis when face detection was successful with faces if (frameAnalysis.IsSuccessful) { return(JsonConvert.SerializeObject(frameAnalysis)); } else { return(null); } } catch (Exception ex) { log.LogError($"FUNC (CamFrameAnalyzer): camframe-analysis topic triggered and failed to parse message: {JsonConvert.SerializeObject(cognitiveRequest)} with the error: {ex.Message}"); } return(null); }