Esempio n. 1
0
        public async Task SaveAudioFileAsync(Stream stream, string boundary, long instanceId, long campaignId, long importNumber)
        {
            Transcription transcription = new Transcription();

            await transactionService.CommitAsync(new[] { TransactionContextScope.Main }, async() =>
            {
                using (var memoryStream = new MemoryStream())
                {
                    await stream.CopyToAsync(memoryStream);
                    transcription.AudioId = await labelingAudioService.SaveAudioFileAsync(memoryStream, boundary, instanceId, campaignId);
                    if (transcription.AudioId == 0)
                    {
                        return;
                    }

                    if (labelingModuleConfiguration.DeepSpeechControl.IsEnabled)
                    {
                        var result = await GetDeepSpeechTranscription(memoryStream, boundary);
                        transcription.DeepSpeechTranscription = result.Transcript;
                        transcription.MetricsId = await SaveTranscriptionMetrics(result.Score);
                    }

                    transcription.ImportNumber = importNumber == 0 ? null : (long?)importNumber;

                    await transcriptionRepository.AddAsync(transcription);
                    return;
                }
            });
        }
Esempio n. 2
0
        public async Task <SaveAudioResult> SaveAudioWithDataAsync(Stream stream, string boundary)
        {
            return(await transactionService.CommitAsync(new[] { TransactionContextScope.Main }, async() =>
            {
                MultipartSection section;
                var reader = new MultipartReader(boundary, stream);
                var momentData = new FCMomentDataModel[] { };
                var audioFileInfo = new FCMomentAudioFileInfo();

                while ((section = await reader.ReadNextSectionAsync()) != null)
                {
                    if (section.HasHeaderInfo("audioFileInfo"))
                    {
                        audioFileInfo = await DeserializeSection <FCMomentAudioFileInfo>(section);
                    }

                    if (section.HasHeaderInfo("momentData"))
                    {
                        momentData = await DeserializeSection <FCMomentDataModel[]>(section);
                    }

                    if (section.HasHeaderFile() && audioFileInfo != null && momentData.Any())
                    {
                        var audioId = await labelingAudioService.SaveAudioFileAsync(section.Body, audioFileInfo);
                        if (audioId == 0)
                        {
                            return SaveAudioResult.UploadingError;
                        }

                        var moments = momentData.Select(item => new FCMoment
                        {
                            InUse = false,
                            AudioId = audioId,
                            PossibleAms = item.PossibleAMs,
                            ImportNumber = audioFileInfo.ImportNumber,
                            SourceInputName = item.SourceInputName
                        }).ToArray();

                        await fcMomentsRepository.AddRangeAsync(moments);

                        foreach (var item in moments)
                        {
                            var amLogs = momentData.Where(model => model.SourceInputName == item.SourceInputName).Single();
                            await SaveAudioMessageLog(amLogs.AudioMessageLog, item.Id);
                        }
                    }
                }

                return SaveAudioResult.Ok;
            }));
        }