示例#1
0
            public override async Task <VoiceRespone> Invork(TextRequest textRequest)
            {
                VoiceRespone voiceRespone = new VoiceRespone()
                {
                    Id = textRequest.Id.Value
                };
                var apiSetttings = new SpeechKitClientOptions($"{AppConfig.YandexSpeechApiKey}", "MashaWebApi", Guid.Empty, "server");

                using (var client = new SpeechKitClient(apiSetttings))
                {
                    var options = new SynthesisOptions(textRequest.TextData, 1.1)
                    {
                        AudioFormat = SynthesisAudioFormat.Wav,
                        Language    = _language,
                        Emotion     = Emotion.Good,
                        Quality     = SynthesisQuality.High,
                        Speaker     = Speaker.Omazh
                    };

                    using (var textToSpechResult = await client.TextToSpeechAsync(options, cancellationToken).ConfigureAwait(false))
                    {
                        if (textToSpechResult.TransportStatus != TransportStatus.Ok || textToSpechResult.ResponseCode != HttpStatusCode.OK)
                        {
                            throw new Exception("YandexSpeechKit error: " + textToSpechResult.ResponseCode.ToString());
                        }
                        voiceRespone.VoiceData = textToSpechResult.Result.ToByteArray();
                    }
                }
                return(voiceRespone);
            }
示例#2
0
            public async Task <string> Invork(byte[] voiceData)
            {
                var apiSetttings = new SpeechKitClientOptions($"{AppConfig.YandexSpeechApiKey}", "MashaWebApi", Guid.Empty, "server");

                using (var client = new SpeechKitClient(apiSetttings))
                {
                    var speechRecognitionOptions = new SpeechRecognitionOptions(SpeechModel.Queries, RecognitionAudioFormat.Pcm16K, _language);
                    try
                    {
                        Stream mediaStream = new MemoryStream(voiceData);
                        var    result      = await client.SpeechToTextAsync(speechRecognitionOptions, mediaStream, cancellationToken).ConfigureAwait(false);

                        if (result.TransportStatus != TransportStatus.Ok || result.StatusCode != HttpStatusCode.OK)
                        {
                            throw new Exception("YandexSpeechKit error: " + result.TransportStatus.ToString());
                        }

                        if (!result.Result.Success)
                        {
                            throw new Exception("Unable to recognize speech");
                        }

                        Console.WriteLine(result);

                        var utterances = result.Result.Variants;
                        //Use recognition results
                        return(utterances.First().Text);
                    }
                    catch (OperationCanceledException ex)
                    {
                        throw new Exception(ex.Message);
                    }
                }
            }
示例#3
0
        private static async Task CheckMessagesAsync(Taikandi.Telebot.Types.Message message)
        {
            try
            {
                var fileTask = _telebot.GetFileAsync(message.Voice.FileId).GetAwaiter().GetResult();
                _telebot.DownloadFileAsync(fileTask, Path.GetFullPath(@".\\voice.ogg"), overwrite: true).GetAwaiter().GetResult();
                var        apiSetttings = new SpeechKitClientOptions("1774531e-6a7c-4973-878c-d84d121d9ae1", "Key #1", Guid.Empty, "pc");
                FileStream stream       = new FileStream(@".\\voice.ogg", FileMode.Open);
                using (var client = new SpeechKitClient(apiSetttings))
                {
                    var speechRecognitionOptions = new SpeechRecognitionOptions(SpeechModel.Queries, RecognitionAudioFormat.Ogg, RecognitionLanguage.Russian);
                    try
                    {
                        var result = await client.SpeechToTextAsync(speechRecognitionOptions, stream, CancellationToken.None).ConfigureAwait(false);

                        if (result.TransportStatus != TransportStatus.Ok || result.StatusCode != HttpStatusCode.OK)
                        {
                            await TelegramMessager._telebot.SendMessageAsync(message.Chat.Id, "Ошибка передачи").ConfigureAwait(false);

                            return;
                        }

                        if (!result.Result.Success)
                        {
                            await TelegramMessager._telebot.SendMessageAsync(message.Chat.Id, "Ошибка распознавания").ConfigureAwait(false);

                            return;
                        }

                        var    utterances = result.Result.Variants;
                        string text       = utterances[0].Text;
                        text = text.First().ToString().ToUpper() + text.Substring(1);
                        await TelegramMessager._telebot.SendMessageAsync(message.Chat.Id, text).ConfigureAwait(false);

                        return;
                    }
                    catch (OperationCanceledException)
                    {
                        //Handle operation cancellation
                    }
                }
            }
            catch (Exception exc)
            {
                ;
            }
        }
示例#4
0
        public async System.Threading.Tasks.Task <VoiceMessage> ProcessAsync(TextMessage message)
        {
            switch (message.Language)
            {
            case Core.Enums.Language.English:
                this._language = SynthesisLanguage.English;
                break;

            case Core.Enums.Language.Russian:
                this._language = SynthesisLanguage.Russian;
                break;

            default:
                throw new Exceptions.InvalidMessageException(message.Id, "Invalid Language: " + message.Language.ToString());
            }
            var apiSetttings = new SpeechKitClientOptions($"{YandexCompmnentConfig.YandexSpeechApiKey}", "MashaWebApi", Guid.Empty, "server");

            using (var client = new SpeechKitClient(apiSetttings))
            {
                var options = new SynthesisOptions(message.Text, YandexCompmnentConfig.Speed)
                {
                    AudioFormat = SynthesisAudioFormat.Wav,
                    Language    = _language,
                    Emotion     = Emotion.Good,
                    Quality     = SynthesisQuality.High,
                    Speaker     = Speaker.Omazh
                };

                using (var textToSpechResult = await client.TextToSpeechAsync(options, cancellationToken).ConfigureAwait(false))
                {
                    if (textToSpechResult.TransportStatus != TransportStatus.Ok || textToSpechResult.ResponseCode != HttpStatusCode.OK)
                    {
                        throw new Exception("YandexSpeechKit error: " + textToSpechResult.ResponseCode.ToString());
                    }
                    VoiceMessage result = new VoiceMessage
                    {
                        Id       = message.Id,
                        Language = message.Language,
                        Vioce    = textToSpechResult.Result.ToByteArray()
                    };
                    return(result);
                }
            }
        }
示例#5
0
        public async System.Threading.Tasks.Task <TextMessage> ProcessAsync(VoiceMessage message)
        {
            switch (message.Language)
            {
            case Core.Enums.Language.English:
                this._language = RecognitionLanguage.English;
                break;

            case Core.Enums.Language.Russian:
                this._language = RecognitionLanguage.Russian;
                break;

            default:
                throw new Exceptions.InvalidMessageException(message.Id, "Invalid Language: " + message.Language.ToString());
            }
            var apiSetttings = new SpeechKitClientOptions($"{YandexCompmnentConfig.YandexSpeechApiKey}", "MashaWebApi", Guid.Empty, "server");

            using (var client = new SpeechKitClient(apiSetttings))
            {
                MemoryStream mediaStream = new MemoryStream(message.Vioce);
                var          speechRecognitionOptions = new SpeechRecognitionOptions(SpeechModel.Queries, RecognitionAudioFormat.Wav, RecognitionLanguage.Russian);
                try
                {
                    var result = await client.SpeechToTextAsync(speechRecognitionOptions, mediaStream, cancellationToken).ConfigureAwait(false);

                    if (result.TransportStatus != TransportStatus.Ok || result.StatusCode != HttpStatusCode.OK)
                    {
                        //Handle network and request parameters error
                    }

                    if (!result.Result.Success)
                    {
                        //Unable to recognize speech
                    }

                    var utterances = result.Result.Variants;
                    if (utterances.Count > 0)
                    {
                        var max = utterances[0];
                        foreach (var item in utterances)
                        {
                            if (item.Confidence > max.Confidence)
                            {
                                max = item;
                            }
                        }
                        TextMessage res = new TextMessage()
                        {
                            Id       = message.Id,
                            Language = message.Language,
                            Text     = max.Text
                        };
                        return(res);
                    }
                    throw new Exception("invdlid answer");
                }
                catch (OperationCanceledException)
                {
                    throw new Exception("invdlid answer");
                }
            }
        }