/// <summary>
        /// WAVEファイルを生成する
        /// </summary>
        /// <param name="textToSpeak">
        /// Text to Speak</param>
        /// <param name="wave">
        /// WAVEファイルのパス</param>
        private void CreateWave(
            string textToSpeak,
            string wave)
        {
            var client = new VoiceTextClient()
            {
                APIKey       = Settings.Default.HOYASettings.APIKey,
                Speaker      = Settings.Default.HOYASettings.Speaker,
                Emotion      = Settings.Default.HOYASettings.Emotion,
                EmotionLevel = Settings.Default.HOYASettings.EmotionLevel,
                Volume       = Settings.Default.HOYASettings.Volume,
                Speed        = Settings.Default.HOYASettings.Speed,
                Pitch        = Settings.Default.HOYASettings.Pitch,
                Format       = Format.WAV,
            };

            ServicePointManager.SecurityProtocol =
                SecurityProtocolType.SystemDefault |
                SecurityProtocolType.Tls11 |
                SecurityProtocolType.Tls12;

            var waveData = client.GetVoice(textToSpeak);

            File.WriteAllBytes(wave, waveData);
        }
예제 #2
0
        public static async Task <HttpResponseMessage> Run(
            [HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = null)] HttpRequestMessage req
            , /* Azure Blob Storage(ファイル置き場) への出力 */ [Blob("mp3/voice.mp3", FileAccess.ReadWrite)] CloudBlockBlob mp3Out
            , TraceWriter log
            )
        {
            log.Info("C# HTTP trigger function processed a request.");
            try
            {
                var data = await req.Content.ReadAsAsync <Models.DialogFlowResponseModel>();

                var say = data.QueryRequest.QueryText;
                log.Info("SAY: " + say);

                // VoiceText Web API に投げる処理
                var key             = ConfigurationManager.AppSettings.Get("VoiceTextAPIKey");
                var voiceTextClient = new VoiceTextClient
                {
                    APIKey       = key,
                    Speaker      = Speaker.Bear,
                    Emotion      = Emotion.Anger,
                    EmotionLevel = EmotionLevel.High,
                    Format       = Format.MP3
                };
                var bytes = await voiceTextClient.GetVoiceAsync(text : say);

                // Azure Blob Storage への書き込み(保存)
                await mp3Out.UploadFromByteArrayAsync(buffer : bytes, index : 0, count : bytes.Length);

                // Azure Blob Storage に書き込まれた mp3 にアクセスするための URL
                var mp3Url = mp3Out.Uri;
                log.Info("MP3: " + mp3Url);

                var response =
                    "{" +
                    "  \"fulfillmentText\": " + $"\"<speak><audio src='{mp3Url}' /></speak>\"" +
                    "}";
                log.Info("Res: " + response);
                var result = req.CreateResponse(HttpStatusCode.OK, response);
                result.Headers.Add("ContentType", "application/json");
                return(result);
            }
            catch (Exception e)
            {
                log.Info(e.GetType().Name + "\n" + e.StackTrace);
                throw e;
            }
        }
        static async Task Main()
        {
            var client = new VoiceTextClient
            {
                APIKey       = "{your API key here.}",
                Speaker      = Speaker.Haruka,
                Emotion      = Emotion.Happiness,
                EmotionLevel = EmotionLevel.High,
                Volume       = 50,
                Speed        = 120,
                Pitch        = 120,
                Format       = Format.WAV
            };
            var bytes = await client.GetVoiceAsync("こんにちは。");

            File.WriteAllBytes(".\\result.wav", bytes);
        }
        /// <summary>
        /// WAVEファイルを生成する
        /// </summary>
        /// <param name="textToSpeak">
        /// Text to Speak</param>
        /// <param name="wave">
        /// WAVEファイルのパス</param>
        private void CreateWave(
            string textToSpeak,
            string wave)
        {
            var client = new VoiceTextClient()
            {
                APIKey       = TTSYukkuriConfig.Default.HOYASettings.APIKey,
                Speaker      = TTSYukkuriConfig.Default.HOYASettings.Speaker,
                Emotion      = TTSYukkuriConfig.Default.HOYASettings.Emotion,
                EmotionLevel = TTSYukkuriConfig.Default.HOYASettings.EmotionLevel,
                Volume       = TTSYukkuriConfig.Default.HOYASettings.Volume,
                Speed        = TTSYukkuriConfig.Default.HOYASettings.Speed,
                Pitch        = TTSYukkuriConfig.Default.HOYASettings.Pitch,
            };

            var waveData = client.GetVoice(textToSpeak);

            File.WriteAllBytes(wave, waveData);
        }
예제 #5
0
        public static async Task <HttpResponseMessage> Run(
            [HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = null)] HttpRequestMessage req
            , /* Azure Blob Storage(ファイル置き場) への出力 */ [Blob("mp3/voice.mp3", FileAccess.ReadWrite)] CloudBlockBlob mp3Out
            , TraceWriter log
            )
        {
            log.Info("C# HTTP trigger function processed a request.");
            var data = await req.Content.ReadAsAsync <Models.DialogFlowResponseModel>();

            //log.Info(data);
            var say = data.Result.ResolvedQuery;

            // VoiceText Web API に投げる処理
            var voiceTextClient = new VoiceTextClient
            {
                APIKey       = Keys.APIKeys.VoiceTextWebApiKey,
                Speaker      = Speaker.Bear,
                Emotion      = Emotion.Anger,
                EmotionLevel = EmotionLevel.High,
                Format       = Format.MP3
            };
            var bytes = await voiceTextClient.GetVoiceAsync(text : say);

            // Azure Blob Storage への書き込み(保存)
            await mp3Out.UploadFromByteArrayAsync(buffer : bytes, index : 0, count : bytes.Length);

            // Azure Blob Storage に書き込まれた mp3 にアクセスするための URL
            var mp3Url = mp3Out.Uri;

            var result = req.CreateResponse(HttpStatusCode.OK, new
            {
                // Google Home に喋らせたい文言を渡す。(この場合mp3)
                speech = $"<speak><audio src='{mp3Url}' /></speak>",
                // Google Assistant のチャット画面上に出したい文字列
                displayText = $"「{say}」"
            });

            result.Headers.Add("ContentType", "application/json");
            return(result);
        }
예제 #6
0
        /// <summary>
        /// WAVEファイルを生成する
        /// </summary>
        /// <param name="textToSpeak">
        /// Text to Speak</param>
        /// <param name="wave">
        /// WAVEファイルのパス</param>
        private void CreateWave(
            string textToSpeak,
            string wave)
        {
            var client = new VoiceTextClient()
            {
                APIKey       = Settings.Default.HOYASettings.APIKey,
                Speaker      = Settings.Default.HOYASettings.Speaker,
                Emotion      = Settings.Default.HOYASettings.Emotion,
                EmotionLevel = Settings.Default.HOYASettings.EmotionLevel,
                Volume       = Settings.Default.HOYASettings.Volume,
                Speed        = Settings.Default.HOYASettings.Speed,
                Pitch        = Settings.Default.HOYASettings.Pitch,
                Format       = Format.WAV,
            };

            // TLSプロトコルを設定する
            EnvironmentHelper.SetTLSProtocol();

            var waveData = client.GetVoice(textToSpeak);

            File.WriteAllBytes(wave, waveData);
        }
예제 #7
0
        public async Task<ActionResult> Get(string value)
        {
            var cacheKey = "14a49c010e4043d19ba3a17219b41421/" + value;
            var cache = this.HttpContext.Cache;
            var voiceMP3Bytes = cache.Get(cacheKey) as byte[];
            
            if (voiceMP3Bytes == null)
            {
                // Convert text to voice data (wave format).
                var voiceTextClient = new VoiceTextClient
                {
                    APIKey = AppSettings.VoiceTextAPI.Key,
                    Speaker = Speaker.Hikari,
                    Emotion = Emotion.Happiness,
                    EmotionLevel = EmotionLevel.High
                };
                var voiceWaveBytes = await voiceTextClient.GetVoiceAsync(value);

                // Convert voice data format from wave to MP3.
                AddBinPath();
                var bitRate = int.Parse(AppSettings.Mp3.BitRate);
                using (var msDst = new MemoryStream())
                {
                    using (var msSrc = new MemoryStream(voiceWaveBytes, writable: false))
                    using (var reader = new WaveFileReader(msSrc))
                    using (var writer = new LameMP3FileWriter(msDst, reader.WaveFormat, bitRate))
                        reader.CopyTo(writer);
                    voiceMP3Bytes = msDst.ToArray();
                }

                // Keep in cache.
                cache.Insert(cacheKey, voiceMP3Bytes);
            }
            
            return File(voiceMP3Bytes, "audio/mp3");
        }
예제 #8
0
        public static async Task <HttpResponseMessage> Run(
            [HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = null)] HttpRequestMessage req
            , /* Azure Blob Storage(ファイル置き場) への出力 */ [Blob("mp3/voice.mp3", FileAccess.ReadWrite)] CloudBlockBlob mp3Out
            , TraceWriter log
            )
        {
            try
            {
                log.Info("C# HTTP trigger function processed a request.");
                var data = await req.Content.ReadAsAsync <Models.DialogFlowRequestModel>();

                //log.Info(data);
                var say = data.QueryResult.QueryText;

                // VoiceText Web API に投げる処理 test
                var voiceTextClient = new VoiceTextClient
                {
                    APIKey       = Keys.APIKeys.VoiceTextWebApiKey,
                    Speaker      = Speaker.Bear,
                    Emotion      = Emotion.Anger,
                    EmotionLevel = EmotionLevel.High,
                    Format       = Format.MP3
                };
                var bytes = await voiceTextClient.GetVoiceAsync(text : say);

                // Azure Blob Storage への書き込み(保存)
                await mp3Out.UploadFromByteArrayAsync(buffer : bytes, index : 0, count : bytes.Length);

                // Azure Blob Storage に書き込まれた mp3 にアクセスするための URL
                var mp3Url = mp3Out.Uri;
                //DialogFlow v2 では、DialogFlow へのwebhookの応答メッセージ 標準データ形式である必要がある。
                //See https://developers.google.com/assistant/actions/build/json/dialogflow-webhook-json and https://cloud.google.com/dialogflow/docs/reference/rpc/google.cloud.dialogflow.v2#webhookresponse
                var response = new Models.DialogFlowResponseModel
                {
                    Payload = new Models.Payload
                    {
                        Google = new Models.Google
                        {
                            ExpectUserResponse = false,
                            RichResponse       = new Models.RichResponse
                            {
                                Items = new Models.Item[]
                                {
                                    new Models.Item
                                    {
                                        SimpleResponse = new Models.SimpleResponse
                                        {
                                            // Google Home に喋らせたい文言を渡す。(この場合mp3)
                                            SSML = $"<speak><audio src='{mp3Url}' /></speak>",
                                            // Google Assistant のチャット画面上に出したい文字列
                                            DisplayText = $"「{say}」"
                                        }
                                    }
                                }
                            }
                        }
                    }
                };
                var result = req.CreateResponse(HttpStatusCode.OK, response);
                result.Headers.Add("ContentType", "application/json");
                return(result);
            }
            catch (Exception ex)
            {
                log.Error("An exception occurred in GoogleHome.Run", ex);
                var result = req.CreateErrorResponse(HttpStatusCode.InternalServerError, ex);
                return(result);
            }
        }
예제 #9
0
        public Form1()
        {
            InitializeComponent();

            XmlSerializer serializer  = new XmlSerializer(typeof(Settings));
            Settings      appSettings = new Settings();

            if (!File.Exists("settings.xml"))
            {
                // new
                StreamWriter sw = new StreamWriter("settings.xml", false, new UTF8Encoding(false));
                serializer.Serialize(sw, appSettings);
                sw.Close();
            }
            StreamReader sr = new StreamReader("settings.xml", new UTF8Encoding(false));

            appSettings = (Settings)serializer.Deserialize(sr);
            sr.Close();

            if (appSettings.Apikey == "ENTER YOUR APIKEY HERE")
            {
                MessageBox.Show("settings.xmlでApikeyを設定してください。",
                                "Error",
                                MessageBoxButtons.OK,
                                MessageBoxIcon.Error);
                Close();
                return;
            }

            if (appSettings.Speaker == "show")
            {
                speaker = Speaker.Show;
            }
            else if (appSettings.Speaker == "haruka")
            {
                speaker = Speaker.Haruka;
            }
            else if (appSettings.Speaker == "hikari")
            {
                speaker = Speaker.Hikari;
            }
            else if (appSettings.Speaker == "takeru")
            {
                speaker = Speaker.Takeru;
            }
            else if (appSettings.Speaker == "santa")
            {
                speaker = Speaker.Santa;
            }
            else if (appSettings.Speaker == "bear")
            {
                speaker = Speaker.Bear;
            }
            client = new VoiceTextClient
            {
                APIKey  = appSettings.Apikey,
                Speaker = speaker,
                Volume  = appSettings.Volume,
                Speed   = appSettings.Speed,
                Pitch   = appSettings.Pitch,
                Format  = Format.MP3
            };

            if (!Directory.Exists(".\\output\\"))
            {
                Directory.CreateDirectory(".\\output\\");
            }

            label2.Text = "Setting\n" +
                          "Speaker: " + appSettings.Speaker + "\n" +
                          "Volume: " + appSettings.Volume + "\n" +
                          "Speed: " + appSettings.Speed + "\n" +
                          "Pitch: " + appSettings.Pitch;
        }