Exemple #1
0
        public void Create_NoName_Return_Required()
        {
            // ARRANGE
            var request = new TranscriptionDefinition();

            var mock = new Mock <ISpeechServicesAPIv20>();

            mock.SetupAllProperties();
            //mock
            //    .Setup(
            //        m => m.CreateTranscriptionWithHttpMessagesAsync(request, null, CancellationToken.None)
            //    )
            //    .ReturnsAsync(
            //        new HttpOperationResponse<object, CreateTranscriptionHeaders>() { Body = null }
            //    );

            var app = InitApp(mock.Object);

            // ACT
            var args = CommandIntoArgs("transcript create --model ABCD");

            app.Execute(args);

            // ASSES
            Assert.Equal("The --name field is required.\r\n", ((MockTestWriter)app.Out).ReadAsString());
        }
            int OnExecute()
            {
                var props = SplitProperties(Properties);

                HandleOptionProp(ref props, "AddDiarization", Diarization);
                HandleOptionProp(ref props, "AddWordLevelTimestamps", WordLevelTimestamps);
                HandleOptionProp(ref props, "AddSentiment", Sentiment);

                var definition = new TranscriptionDefinition(Recording, Locale ?? "en-us", Name, properties: props);

                definition.ModelsProperty = new List <ModelIdentity>();

                if (!string.IsNullOrEmpty(Model))
                {
                    definition.ModelsProperty.Add(new ModelIdentity(Guid.Parse(Model)));
                }

                if (!string.IsNullOrEmpty(LanguageModel))
                {
                    definition.ModelsProperty.Add(new ModelIdentity(Guid.Parse(LanguageModel)));
                }

                _console.WriteLine("Creating transcript...");
                var res = CreateAndWait(
                    () => _speechApi.CreateTranscription(definition),
                    Wait,
                    _speechApi.GetTranscription);

                return(res);
            }
        private async Task <Guid> TranscribeEpisode(SrStoredEpisode storedEpisode, SpeechBatchClient speechBatchClient)
        {
            var audioUrl = RemoveQueryString(storedEpisode.AudioUrl);
            var transcriptionDefinition = TranscriptionDefinition.Create(
                $"RadioText - Episode {storedEpisode.Episode.Id}",
                "RadioText",
                storedEpisode.AudioLocale,
                new Uri(audioUrl)
                );

            var transcriptionLocation = await speechBatchClient.PostTranscriptionAsync(transcriptionDefinition);

            return(GetTranscriptionGuid(transcriptionLocation));
        }
Exemple #4
0
        public static async void ProcessRecording(
            [BlobTrigger(containerName + "/{name}", Connection = "AzureWebJobsStorage")] Stream bytes,
            string name,
            Uri uri,
            ILogger log
            )
        {
            var sasUri    = GetBlobSASToken(uri, blobPolicyName, log);
            var recording = new TranscriptionDefinition {
                Name = name, RecordingsUrl = sasUri, Locale = "en-US", Models = new Collection <ModelIdentity> (), Description = "something"
            };
            var postUri = new Uri(baseApiUri, "api/speechtotext/v2.0/transcriptions");

            //submit record: /api/speechtotext/v2.0/transcriptions
            var req = new HttpRequestMessage(HttpMethod.Post, postUri);

            req.Headers.Add("Ocp-Apim-Subscription-Key", speechApiToken);
            req.Content = new System.Net.Http.StringContent(
                Newtonsoft.Json.JsonConvert.SerializeObject(recording),
                Encoding.UTF8,
                "application/json"
                );

            Uri ret = null;

            try {
                var resp = await client.SendAsync(req);

                if (resp.StatusCode == HttpStatusCode.Accepted)
                {
                    ret = resp.Headers.Location;
                    log.LogInformation($"Transcript available at: {ret.AbsoluteUri}");
                }
                else
                {
                    log.LogWarning($"Transcription request not accepted: {resp.StatusCode.ToString()}");
                }
            } catch (HttpRequestException ex) {
                log.LogError($"Speech API request failed", ex);
            }
        }
Exemple #5
0
            int OnExecute()
            {
                var definition = new TranscriptionDefinition(Recording, Locale ?? "en-us", Name, properties: SplitProperties(Properties));

                definition.ModelsProperty = new List <ModelIdentity>();

                if (!string.IsNullOrEmpty(Model))
                {
                    definition.ModelsProperty.Add(new ModelIdentity(Guid.Parse(Model)));
                }

                if (!string.IsNullOrEmpty(LanguageModel))
                {
                    definition.ModelsProperty.Add(new ModelIdentity(Guid.Parse(LanguageModel)));
                }

                _console.WriteLine("Creating transcript...");
                var res = CreateAndWait(
                    () => _speechApi.CreateTranscription(definition),
                    Wait,
                    _speechApi.GetTranscription);

                return(res);
            }
        private async Task StartBatchTranscriptionJobAsync(IEnumerable <Message> messages, string jobName)
        {
            if (messages == null || !messages.Any())
            {
                Logger.LogError($"Invalid service bus message(s).");
                return;
            }

            var fetchingDelay      = GetInitialFetchingDelay(messages.Count());
            var locationString     = string.Empty;
            var serviceBusMessages = messages.Select(message => JsonConvert.DeserializeObject <ServiceBusMessage>(Encoding.UTF8.GetString(message.Body)));

            try
            {
                var properties = GetTranscriptionPropertyBag();

                var sasUrls        = new List <string>();
                var audioFileInfos = new List <AudioFileInfo>();

                foreach (var serviceBusMessage in serviceBusMessages)
                {
                    var sasUrl = StorageConnectorInstance.CreateSas(serviceBusMessage.Data.Url);
                    sasUrls.Add(sasUrl);
                    audioFileInfos.Add(new AudioFileInfo(serviceBusMessage.Data.Url.AbsoluteUri, serviceBusMessage.RetryCount));
                }

                ModelIdentity modelIdentity = null;

                if (Guid.TryParse(StartTranscriptionEnvironmentVariables.CustomModelId, out var customModelId))
                {
                    modelIdentity = ModelIdentity.Create(StartTranscriptionEnvironmentVariables.AzureSpeechServicesRegion, customModelId);
                }

                var transcriptionDefinition = TranscriptionDefinition.Create(jobName, "StartByTimerTranscription", Locale, sasUrls, properties, modelIdentity);

                var transcriptionLocation = await BatchClient.PostTranscriptionAsync(
                    transcriptionDefinition,
                    HostName,
                    SubscriptionKey,
                    Logger).ConfigureAwait(false);

                Logger.LogInformation($"Location: {transcriptionLocation}");

                var transcriptionMessage = new TranscriptionStartedMessage(
                    transcriptionLocation.AbsoluteUri,
                    jobName,
                    Locale,
                    modelIdentity != null,
                    audioFileInfos,
                    0,
                    0);

                await ServiceBusUtilities.SendServiceBusMessageAsync(FetchQueueClientInstance, transcriptionMessage.CreateMessageString(), Logger, fetchingDelay).ConfigureAwait(false);
            }
            catch (WebException e)
            {
                if (BatchClient.IsThrottledOrTimeoutStatusCode(((HttpWebResponse)e.Response).StatusCode))
                {
                    var errorMessage = $"Throttled or timeout while creating post. Error Message: {e.Message}";
                    Logger.LogError(errorMessage);
                    await RetryOrFailMessagesAsync(messages, errorMessage).ConfigureAwait(false);
                }
                else
                {
                    var errorMessage = $"Start Transcription in job with name {jobName} failed with WebException {e} and message {e.Message}";
                    Logger.LogError(errorMessage);

                    using (var reader = new StreamReader(e.Response.GetResponseStream()))
                    {
                        var responseMessage = await reader.ReadToEndAsync().ConfigureAwait(false);

                        errorMessage += "\nResponse message:" + responseMessage;
                    }

                    await WriteFailedJobLogToStorageAsync(serviceBusMessages, errorMessage, jobName).ConfigureAwait(false);
                }

                throw;
            }
            catch (TimeoutException e)
            {
                var errorMessage = $"Timeout while creating post, re-enqueueing transcription start. Message: {e.Message}";
                Logger.LogError(errorMessage);
                await RetryOrFailMessagesAsync(messages, errorMessage).ConfigureAwait(false);

                throw;
            }
            catch (Exception e)
            {
                var errorMessage = $"Start Transcription in job with name {jobName} failed with exception {e} and message {e.Message}";
                Logger.LogError(errorMessage);
                await WriteFailedJobLogToStorageAsync(serviceBusMessages, errorMessage, jobName).ConfigureAwait(false);

                throw;
            }

            Logger.LogInformation($"Fetch transcription queue successfully informed about job at: {jobName}");
        }
Exemple #7
0
        static async Task <string> TranscribeAsync(string SubscriptionKey, string Region, int Port, string Locale, string Name, string Description, string SpeechToTextBasePath, string RecordingsBlobUri)
        {
            Console.WriteLine("Starting transcriptions client...");
            string resultstring = string.Empty;
            // Create the client object and authenticate
            var client = new HttpClient
            {
                Timeout               = TimeSpan.FromMinutes(25),
                BaseAddress           = new UriBuilder(Uri.UriSchemeHttps, $"{Region}.cris.ai", Port).Uri,
                DefaultRequestHeaders =
                {
                    { "Ocp-Apim-Subscription-Key", SubscriptionKey }
                }
            };

            var transcriptionDefinition =
                TranscriptionDefinition.Create(
                    Name,
                    Description,
                    Locale,
                    new Uri(RecordingsBlobUri));

            var res = JsonConvert.SerializeObject(transcriptionDefinition);
            var sc  = new StringContent(res);

            sc.Headers.ContentType = JsonMediaTypeFormatter.DefaultMediaType;

            Uri transcriptionLocation = null;

            using (var response = await client.PostAsync($"{SpeechToTextBasePath}Transcriptions/", sc))
            {
                if (!response.IsSuccessStatusCode)
                {
                    Console.WriteLine("Error {0} starting transcription.", response.StatusCode);
                    return("Error");
                }

                transcriptionLocation = response.Headers.Location;
            }

            Console.WriteLine($"Created transcription at location {transcriptionLocation}.");
            Console.WriteLine("Checking status.");

            var completed = false;

            // Check for the status of our transcriptions periodically
            while (!completed)
            {
                Transcription transcription = null;

                // Get all transcriptions for the user
                using (var response = await client.GetAsync(transcriptionLocation.AbsolutePath))
                {
                    var contentType = response.Content.Headers.ContentType;
                    if (response.IsSuccessStatusCode &&
                        string.Equals(contentType.MediaType, "application/json", StringComparison.OrdinalIgnoreCase))
                    {
                        transcription = await response.Content.ReadAsAsync <Transcription>();
                    }
                    else
                    {
                        Console.WriteLine("Error with status {0} getting transcription result", response.StatusCode);
                        continue;
                    }
                }
                // For each transcription in the list we check the status

                switch (transcription.Status)
                {
                case "Failed":
                    completed = true;
                    Console.WriteLine("Transcription failed. Status: {0}", transcription.StatusMessage);
                    break;

                case "Succeeded":
                    completed = true;
                    var webClient = new WebClient();
                    var filename  = Path.GetTempFileName();
                    webClient.DownloadFile(transcription.ResultsUrls["channel_0"], filename);
                    var results = File.ReadAllText(filename);
                    Console.WriteLine($"Transcription succeeded. Results: {Environment.NewLine}{results}");
                    resultstring = results;
                    File.Delete(filename);
                    break;

                case "Running":
                    Console.WriteLine("Transcription is still running.");
                    break;

                case "NotStarted":
                    Console.WriteLine("Transcription has not started.");
                    break;
                }

                await Task.Delay(TimeSpan.FromSeconds(5));
            }
            return(resultstring);
        }
Exemple #8
0
 public Task Translate(TranscriptionDefinition transcriptionDefinition)
 {
     throw new NotImplementedException();
 }