Exemple #1
0
        private Boolean Work()
        {
            string[] lines = textBox1.Text.Trim('\r', '\n').Split(new string[] { "\r\n" }, StringSplitOptions.None);
            Invoke(new Delegate(setMax), lines.Length);
            for (int i = 0; i < lines.Length; i++)
            {
                var line = lines[i];

                if (line == "")
                {
                    continue;
                }

                string[] path_text = line.Split('\t');

                if (path_text.Length != 2)
                {
                    MessageBox.Show((i + 1) + "行目を解析できません。",
                                    "Error",
                                    MessageBoxButtons.OK,
                                    MessageBoxIcon.Error);
                    return(false);
                }

                string path = path_text[0];
                string text = path_text[1];

                var bytes = client.GetVoiceAsync(text);
                try
                {
                    File.WriteAllBytes(".\\output\\" + path + ".mp3", bytes.Result);
                }
                catch (AggregateException e)
                {
                    if (e.Message == "unauthorized")
                    {
                        MessageBox.Show("指定されたAPIキーで認証できません。",
                                        "Error",
                                        MessageBoxButtons.OK,
                                        MessageBoxIcon.Error);
                        return(false);
                    }
                    MessageBox.Show("VoiceText Web APIからエラーを返却されました。\n" + e.InnerException.Message,
                                    "Error",
                                    MessageBoxButtons.OK,
                                    MessageBoxIcon.Error);
                    return(false);
                }
                Invoke(new Delegate(setValue), i + 1);
            }
            return(true);
        }
Exemple #2
0
        public static async Task <HttpResponseMessage> Run(
            [HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = null)] HttpRequestMessage req
            , /* Azure Blob Storage(ファイル置き場) への出力 */ [Blob("mp3/voice.mp3", FileAccess.ReadWrite)] CloudBlockBlob mp3Out
            , TraceWriter log
            )
        {
            log.Info("C# HTTP trigger function processed a request.");
            try
            {
                var data = await req.Content.ReadAsAsync <Models.DialogFlowResponseModel>();

                var say = data.QueryRequest.QueryText;
                log.Info("SAY: " + say);

                // VoiceText Web API に投げる処理
                var key             = ConfigurationManager.AppSettings.Get("VoiceTextAPIKey");
                var voiceTextClient = new VoiceTextClient
                {
                    APIKey       = key,
                    Speaker      = Speaker.Bear,
                    Emotion      = Emotion.Anger,
                    EmotionLevel = EmotionLevel.High,
                    Format       = Format.MP3
                };
                var bytes = await voiceTextClient.GetVoiceAsync(text : say);

                // Azure Blob Storage への書き込み(保存)
                await mp3Out.UploadFromByteArrayAsync(buffer : bytes, index : 0, count : bytes.Length);

                // Azure Blob Storage に書き込まれた mp3 にアクセスするための URL
                var mp3Url = mp3Out.Uri;
                log.Info("MP3: " + mp3Url);

                var response =
                    "{" +
                    "  \"fulfillmentText\": " + $"\"<speak><audio src='{mp3Url}' /></speak>\"" +
                    "}";
                log.Info("Res: " + response);
                var result = req.CreateResponse(HttpStatusCode.OK, response);
                result.Headers.Add("ContentType", "application/json");
                return(result);
            }
            catch (Exception e)
            {
                log.Info(e.GetType().Name + "\n" + e.StackTrace);
                throw e;
            }
        }
        static async Task Main()
        {
            var client = new VoiceTextClient
            {
                APIKey       = "{your API key here.}",
                Speaker      = Speaker.Haruka,
                Emotion      = Emotion.Happiness,
                EmotionLevel = EmotionLevel.High,
                Volume       = 50,
                Speed        = 120,
                Pitch        = 120,
                Format       = Format.WAV
            };
            var bytes = await client.GetVoiceAsync("こんにちは。");

            File.WriteAllBytes(".\\result.wav", bytes);
        }
Exemple #4
0
        public static async Task <HttpResponseMessage> Run(
            [HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = null)] HttpRequestMessage req
            , /* Azure Blob Storage(ファイル置き場) への出力 */ [Blob("mp3/voice.mp3", FileAccess.ReadWrite)] CloudBlockBlob mp3Out
            , TraceWriter log
            )
        {
            log.Info("C# HTTP trigger function processed a request.");
            var data = await req.Content.ReadAsAsync <Models.DialogFlowResponseModel>();

            //log.Info(data);
            var say = data.Result.ResolvedQuery;

            // VoiceText Web API に投げる処理
            var voiceTextClient = new VoiceTextClient
            {
                APIKey       = Keys.APIKeys.VoiceTextWebApiKey,
                Speaker      = Speaker.Bear,
                Emotion      = Emotion.Anger,
                EmotionLevel = EmotionLevel.High,
                Format       = Format.MP3
            };
            var bytes = await voiceTextClient.GetVoiceAsync(text : say);

            // Azure Blob Storage への書き込み(保存)
            await mp3Out.UploadFromByteArrayAsync(buffer : bytes, index : 0, count : bytes.Length);

            // Azure Blob Storage に書き込まれた mp3 にアクセスするための URL
            var mp3Url = mp3Out.Uri;

            var result = req.CreateResponse(HttpStatusCode.OK, new
            {
                // Google Home に喋らせたい文言を渡す。(この場合mp3)
                speech = $"<speak><audio src='{mp3Url}' /></speak>",
                // Google Assistant のチャット画面上に出したい文字列
                displayText = $"「{say}」"
            });

            result.Headers.Add("ContentType", "application/json");
            return(result);
        }
Exemple #5
0
        public async Task<ActionResult> Get(string value)
        {
            var cacheKey = "14a49c010e4043d19ba3a17219b41421/" + value;
            var cache = this.HttpContext.Cache;
            var voiceMP3Bytes = cache.Get(cacheKey) as byte[];
            
            if (voiceMP3Bytes == null)
            {
                // Convert text to voice data (wave format).
                var voiceTextClient = new VoiceTextClient
                {
                    APIKey = AppSettings.VoiceTextAPI.Key,
                    Speaker = Speaker.Hikari,
                    Emotion = Emotion.Happiness,
                    EmotionLevel = EmotionLevel.High
                };
                var voiceWaveBytes = await voiceTextClient.GetVoiceAsync(value);

                // Convert voice data format from wave to MP3.
                AddBinPath();
                var bitRate = int.Parse(AppSettings.Mp3.BitRate);
                using (var msDst = new MemoryStream())
                {
                    using (var msSrc = new MemoryStream(voiceWaveBytes, writable: false))
                    using (var reader = new WaveFileReader(msSrc))
                    using (var writer = new LameMP3FileWriter(msDst, reader.WaveFormat, bitRate))
                        reader.CopyTo(writer);
                    voiceMP3Bytes = msDst.ToArray();
                }

                // Keep in cache.
                cache.Insert(cacheKey, voiceMP3Bytes);
            }
            
            return File(voiceMP3Bytes, "audio/mp3");
        }
Exemple #6
0
        public static async Task <HttpResponseMessage> Run(
            [HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = null)] HttpRequestMessage req
            , /* Azure Blob Storage(ファイル置き場) への出力 */ [Blob("mp3/voice.mp3", FileAccess.ReadWrite)] CloudBlockBlob mp3Out
            , TraceWriter log
            )
        {
            try
            {
                log.Info("C# HTTP trigger function processed a request.");
                var data = await req.Content.ReadAsAsync <Models.DialogFlowRequestModel>();

                //log.Info(data);
                var say = data.QueryResult.QueryText;

                // VoiceText Web API に投げる処理 test
                var voiceTextClient = new VoiceTextClient
                {
                    APIKey       = Keys.APIKeys.VoiceTextWebApiKey,
                    Speaker      = Speaker.Bear,
                    Emotion      = Emotion.Anger,
                    EmotionLevel = EmotionLevel.High,
                    Format       = Format.MP3
                };
                var bytes = await voiceTextClient.GetVoiceAsync(text : say);

                // Azure Blob Storage への書き込み(保存)
                await mp3Out.UploadFromByteArrayAsync(buffer : bytes, index : 0, count : bytes.Length);

                // Azure Blob Storage に書き込まれた mp3 にアクセスするための URL
                var mp3Url = mp3Out.Uri;
                //DialogFlow v2 では、DialogFlow へのwebhookの応答メッセージ 標準データ形式である必要がある。
                //See https://developers.google.com/assistant/actions/build/json/dialogflow-webhook-json and https://cloud.google.com/dialogflow/docs/reference/rpc/google.cloud.dialogflow.v2#webhookresponse
                var response = new Models.DialogFlowResponseModel
                {
                    Payload = new Models.Payload
                    {
                        Google = new Models.Google
                        {
                            ExpectUserResponse = false,
                            RichResponse       = new Models.RichResponse
                            {
                                Items = new Models.Item[]
                                {
                                    new Models.Item
                                    {
                                        SimpleResponse = new Models.SimpleResponse
                                        {
                                            // Google Home に喋らせたい文言を渡す。(この場合mp3)
                                            SSML = $"<speak><audio src='{mp3Url}' /></speak>",
                                            // Google Assistant のチャット画面上に出したい文字列
                                            DisplayText = $"「{say}」"
                                        }
                                    }
                                }
                            }
                        }
                    }
                };
                var result = req.CreateResponse(HttpStatusCode.OK, response);
                result.Headers.Add("ContentType", "application/json");
                return(result);
            }
            catch (Exception ex)
            {
                log.Error("An exception occurred in GoogleHome.Run", ex);
                var result = req.CreateErrorResponse(HttpStatusCode.InternalServerError, ex);
                return(result);
            }
        }