예제 #1
0
        public async Task JokeDump(CommandContext ctx)
        {
            var client  = new RestClient("https://icanhazdadjoke.com");
            var request = new RestRequest();

            request.Method = Method.GET;
            request.AddHeader("Accept", "text/plain");
            var response    = client.Execute(request);
            var ActualVoice = VoiceId.FindValue(Settings.PSettings.PollyVoice);
            // resolve a track from youtube
            //var myTrack = await audioService.GetTrackAsync("The noob song", SearchMode.YouTube);
            var SpeechResponse = await Program.Polly.SynthesizeSpeechAsync(new SynthesizeSpeechRequest
            {
                Engine       = Engine.Neural,
                LanguageCode = LanguageCode.EnUS,
                OutputFormat = OutputFormat.Mp3,
                SampleRate   = "24000",
                TextType     = TextType.Text,
                Text         = response.Content,
                VoiceId      = ActualVoice
            });

            var    g    = Guid.NewGuid();
            string path = $@"{g}.Mp3";
            //FileStream f = new IsolatedStorageFileStream(path, FileMode.CreateNew, FileAccess.ReadWrite);
            //await SpeechResponse.AudioStream.CopyToAsync(f);
            await ctx.RespondWithFileAsync(DateTime.UtcNow.ToString("F") + ".Mp3", SpeechResponse.AudioStream);

            await ctx.Message.DeleteAsync();

            //await ctx.Message.CreateReactionAsync(DiscordEmoji.FromName(ctx.Client, ":white_check_mark:"));
        }
예제 #2
0
        public static void setVoice(string name, bool female = true)
        {
            speakerName = name;

            string t = PelicanTTSMod.i18n.Get(name);

            if (t.ToString() == "")
            {
                t = PelicanTTSMod.i18n.Get("default_" + (female ? "female" : "male"));
            }

            if (VoiceId.FindValue(t) is VoiceId vId1)
            {
                currentVoice = vId1;
            }
            else if (VoiceId.FindValue(PelicanTTSMod.i18n.Get("default")) is VoiceId vId2)
            {
                currentVoice = vId2;
            }
            else
            {
                speakerName  = "default";
                currentVoice = VoiceId.Salli;
            }
        }
예제 #3
0
        public static void setVoice(string name, bool female = true)
        {
            speakerName = name;

            string t = PelicanTTSMod.i18n.Get(name);

            if (PelicanTTSMod.i18n.LocaleEnum == LocalizedContentManager.LanguageCode.en && PelicanTTSMod.config.Voices.ContainsKey(name))
            {
                t = PelicanTTSMod.config.Voices[name].Voice;
            }

            if (t.ToString() == "")
            {
                t = PelicanTTSMod.i18n.Get("default_" + (female ? "female" : "male"));
            }

            if (VoiceId.FindValue(t) is VoiceId vId1)
            {
                currentVoice = vId1;
            }
            else if (VoiceId.FindValue(PelicanTTSMod.i18n.Get("default")) is VoiceId vId2)
            {
                currentVoice = vId2;
            }
            else
            {
                speakerName  = "default";
                currentVoice = VoiceId.Salli;
            }
        }
예제 #4
0
        public async Task getVoice(CommandContext ctx)
        {
            var ActualVoice = VoiceId.FindValue(Settings.PSettings.PollyVoice);
            await ctx.RespondAsync("Current voice: " + ActualVoice);

            await ctx.Message.DeleteAsync();
        }
        public PollySpeechSynthesizer(IAmazonPolly amazonPolly, IOptions <PollySpeechSynthesizerConfiguration> configOptions)
        {
            if (configOptions != null && configOptions.Value != null)
            {
                voiceId = VoiceId.FindValue(configOptions.Value.VoiceId);
            }

            _amazonPolly = amazonPolly;
        }
예제 #6
0
 protected internal static void BuildModel(ModelBuilder modelBuilder)
 {
     modelBuilder.Entity <AllowedConversationVoice>(entity =>
     {
         entity.HasKey(nameof(ChannelId), nameof(VoiceId));
         entity.Property(e => e.VoiceId).HasConversion(
             v => v.ToString(),
             v => VoiceId.FindValue(v)
             );
     });
 }
예제 #7
0
        public async Task speakssml(CommandContext ctx, [RemainingText] string textToSpeak)
        {
            try
            {
                var player = Program.audioService.GetPlayer <QueuedLavalinkPlayerV2>(ctx.Guild.Id)
                             ?? await Program.audioService.JoinAsync <QueuedLavalinkPlayerV2>(ctx.Guild.Id,
                                                                                              ctx.Member.VoiceState.Channel.Id);

                var ActualVoice = VoiceId.FindValue(Settings.PSettings.PollyVoice);
                // resolve a track from youtube
                //var myTrack = await audioService.GetTrackAsync("The noob song", SearchMode.YouTube);
                foreach (var user in ctx.Message.MentionedUsers)
                {
                    Console.WriteLine(user.Mention.ToString());
                    var DisMem = await ctx.Guild.GetMemberAsync(user.Id);

                    var callout = DisMem.Nickname.IsNullOrEmpty() ? DisMem.DisplayName : DisMem.Nickname;
                    textToSpeak = textToSpeak.Replace(user.Mention.ToString(), callout);
                }

                var SpeechResponse = await Program.Polly.SynthesizeSpeechAsync(new SynthesizeSpeechRequest
                {
                    Engine       = Engine.Standard,
                    LanguageCode = LanguageCode.EnUS,
                    OutputFormat = OutputFormat.Mp3,
                    SampleRate   = "24000",
                    TextType     = TextType.Ssml,
                    Text         = textToSpeak,
                    VoiceId      = ActualVoice
                });

                var        g    = Guid.NewGuid();
                string     path = $@"C:\temp\{g}.Mp3";
                FileStream f    = new FileStream(path, FileMode.CreateNew, FileAccess.ReadWrite);
                await SpeechResponse.AudioStream.CopyToAsync(f);

                f.Flush();
                f.Close();
                var track = await Program.audioService.GetTrackAsync(HttpUtility.UrlEncode(path));

                // play track
                await player.PlayAsync(track);

                await ctx.Message.DeleteAsync();

                //await ctx.Message.CreateReactionAsync(DiscordEmoji.FromName(ctx.Client, ":white_check_mark:"));
            }
            catch (Exception e)
            {
                Console.WriteLine(textToSpeak);
            }
        }
        public async Task <Stream> SynthesizeTextToStreamAsync(IVoice voice, string text)
        {
            var pollyVoice = (AmazonPollyVoice)voice;
            var request    = new SynthesizeSpeechRequest()
            {
                Text         = text,
                VoiceId      = VoiceId.FindValue(pollyVoice.VoiceId),
                OutputFormat = OutputFormat.Mp3
            };

            var response = await _client.SynthesizeSpeechAsync(request);

            return(response.AudioStream);
        }
예제 #9
0
 public static void setVoiceById(string id)
 {
     if (VoiceId.FindValue(id) is VoiceId vId1)
     {
         currentVoice = vId1;
     }
     else if (VoiceId.FindValue(PelicanTTSMod.i18n.Get("default")) is VoiceId vId2)
     {
         currentVoice = vId2;
     }
     else
     {
         speakerName  = "default";
         currentVoice = VoiceId.Salli;
     }
 }
예제 #10
0
 protected internal static void BuildModel(ModelBuilder modelBuilder)
 {
     modelBuilder.Entity <Reward>(entity =>
     {
         entity.Property(e => e.IsConversation).HasDefaultValue(true);
         entity.Property(e => e.IsSubOnly).HasDefaultValue(false);
         entity.Property(e => e.Cooldown).HasDefaultValue(0);
         entity.Property(e => e.DefaultPlaybackSpeed).HasDefaultValue(1.0f);
         entity.Property(e => e.VoiceId).HasConversion(
             v => v.ToString(),
             v => VoiceId.FindValue(v)
             );
         entity.Property(e => e.VoiceEngine).HasConversion(
             e => e.ToString(),
             e => Engine.FindValue(e)
             );
     });
 }
예제 #11
0
        public async Task speakdump(CommandContext ctx, [RemainingText] string textToSpeak)
        {
            try
            {
                var ActualVoice = VoiceId.FindValue(Settings.PSettings.PollyVoice);
                // resolve a track from youtube
                //var myTrack = await audioService.GetTrackAsync("The noob song", SearchMode.YouTube);
                foreach (var user in ctx.Message.MentionedUsers)
                {
                    Console.WriteLine(user.Mention.ToString());
                    var DisMem = await ctx.Guild.GetMemberAsync(user.Id);

                    var callout = DisMem.Nickname.IsNullOrEmpty() ? DisMem.DisplayName : DisMem.Nickname;
                    textToSpeak = textToSpeak.Replace(user.Mention.ToString(), callout);
                }
                var SpeechResponse = await Program.Polly.SynthesizeSpeechAsync(new SynthesizeSpeechRequest
                {
                    Engine       = Engine.Neural,
                    LanguageCode = LanguageCode.EnUS,
                    OutputFormat = OutputFormat.Mp3,
                    SampleRate   = "24000",
                    TextType     = TextType.Text,
                    Text         = textToSpeak,
                    VoiceId      = ActualVoice
                });

                var    g    = Guid.NewGuid();
                string path = $@"{g}.Mp3";
                //FileStream f = new IsolatedStorageFileStream(path, FileMode.CreateNew, FileAccess.ReadWrite);
                //await SpeechResponse.AudioStream.CopyToAsync(f);
                await ctx.RespondWithFileAsync(DateTime.UtcNow.ToString("F") + ".Mp3", SpeechResponse.AudioStream);

                await ctx.Message.DeleteAsync();

                //await ctx.Message.CreateReactionAsync(DiscordEmoji.FromName(ctx.Client, ":white_check_mark:"));
            }
            catch (Exception e)
            {
                Console.WriteLine(e);
                throw;
            }
        }
예제 #12
0
        public async Task Joke(CommandContext ctx)
        {
            var client  = new RestClient("https://icanhazdadjoke.com");
            var request = new RestRequest();

            request.Method = Method.GET;
            request.AddHeader("Accept", "text/plain");
            var response = client.Execute(request);
            var player   = Program.audioService.GetPlayer <QueuedLavalinkPlayerV2>(ctx.Guild.Id)
                           ?? await Program.audioService.JoinAsync <QueuedLavalinkPlayerV2>(ctx.Guild.Id, ctx.Member.VoiceState.Channel.Id);

            var ActualVoice = VoiceId.FindValue(Settings.PSettings.PollyVoice);
            // resolve a track from youtube
            //var myTrack = await audioService.GetTrackAsync("The noob song", SearchMode.YouTube);
            var SpeechResponse = await Program.Polly.SynthesizeSpeechAsync(new SynthesizeSpeechRequest
            {
                Engine       = Engine.Neural,
                LanguageCode = LanguageCode.EnUS,
                OutputFormat = OutputFormat.Mp3,
                SampleRate   = "24000",
                TextType     = TextType.Text,
                Text         = response.Content,
                VoiceId      = ActualVoice
            });

            var        g    = Guid.NewGuid();
            string     path = $@"C:\temp\{g}.Mp3";
            FileStream f    = new FileStream(path, FileMode.CreateNew, FileAccess.ReadWrite);
            await SpeechResponse.AudioStream.CopyToAsync(f);

            f.Flush();
            f.Close();
            var track = await Program.audioService.GetTrackAsync(HttpUtility.UrlEncode(path));

            // play track
            await player.PlayAsync(track);

            await ctx.Message.DeleteAsync();

            //await ctx.Message.CreateReactionAsync(DiscordEmoji.FromName(ctx.Client, ":white_check_mark:"));
        }
예제 #13
0
        public async Task <byte[]> Speech(string query, string languageCode) // Получение аудио-файла озвученного слова (через api Amazon Polly)
        {
            var lang = _languageService.GetLanguage(languageCode);

            var client = new AmazonPollyClient(_awsPollyOptions.Value.awsAccessKeyId, _awsPollyOptions.Value.awsSecretAccessKey, RegionEndpoint.USEast2);

            var synthesizeSpeechRequest = new SynthesizeSpeechRequest()
            {
                OutputFormat = OutputFormat.Mp3,
                LanguageCode = lang.LanguageCode,
                VoiceId      = VoiceId.FindValue(lang.VoiceId),
                Text         = query
            };

            var synthesizeSpeechResponse = await client.SynthesizeSpeechAsync(synthesizeSpeechRequest);

            var inputStream = synthesizeSpeechResponse.AudioStream;

            MemoryStream mem = new MemoryStream();

            inputStream.CopyTo(mem);
            return(mem.ToArray());
        }
예제 #14
0
        /// <summary>
        /// PollyNotes-DictateFunction
        ///
        /// This lambda function is integrated with the following API methods:
        /// /notes/{id}/POST
        ///
        /// This function does the following:
        ///
        /// 1. Takes a JSON payload from API gateway and converts it into a DictateRequest
        /// 2. Queries DynamoDB for the note from the request to fetch the note text
        /// 3. Calls the Polly synthensize_speech API to convert text to speech
        /// 4. Stores the resulting audio in an MP3 file
        /// 5. Uploads the MP3 file to S3
        /// 6. Creates a pre-signed URL for the MP3 file
        /// 7. Returns the pre-signed URL to API Gateway
        /// </summary>
        /// <param name="request">DictateRequest containing the voiceId and the note to create an mp3 file from</param>
        /// <param name="context">Lambda context</param>
        /// <returns>string of the URL for the pre-signed mp3 file from S3</returns>
        public string FunctionHandler(DictateRequest request, ILambdaContext context)
        {
            // The note object contains the voiceId, userId and noteId from the /notes/{id}/POST
            // {
            //   "voiceId": "...",
            //     "note": {
            //       "userId": "...",
            //       "noteId": "..."
            //     }
            // }

            Console.WriteLine("Initiating PollyNotes-DictateFunction...");
            Console.WriteLine("DictateRequest received: " + JsonConvert.SerializeObject(request));

            // Get the name of the bucketName from the environment variable MP3_BUCKET_NAME
            string bucketName = Environment.GetEnvironmentVariable("MP3_BUCKET_NAME");

            // Create the DynamoDB client and the Context for Object Persistence Model
            AmazonDynamoDBClient client     = new AmazonDynamoDBClient();
            DynamoDBContext      ddbcontext = new DynamoDBContext(client);

            // Use the LoadAsync method to fetch all of the attributes of the note from the request from DynamoDB and wait
            // This is really to get the note attribute from the userId and noteId of the request
            var ddbTask = ddbcontext.LoadAsync(request.note);

            ddbTask.Wait();

            // The result will be stored in note
            Note note;

            // If there are no Exceptions
            if (ddbTask.Exception == null)
            {
                Console.WriteLine("Successfully executed LoadAsync with userId: " + request.note.userId + " and noteId: " + request.note.noteId);

                // Set the note variable to the result of the LoadAsync from DynamoDB
                note = ddbTask.Result;
            }
            else
            {
                // There was an exception, log the entry data and the exception
                Console.WriteLine("Unable to LoadAsync note with userId: " + request.note.userId + " and noteId: " + request.note.noteId);
                Console.WriteLine(ddbTask.Exception);

                // Return an empty string
                return("");
            }

            // Invoke Polly API, which will transform text into audio using the note we fetched from DynamoDB and the voiceId from the request
            var polly = new AmazonPollyClient();
            SynthesizeSpeechRequest speechRequest = new SynthesizeSpeechRequest
            {
                OutputFormat = OutputFormat.Mp3,
                Text         = note.note,
                VoiceId      = VoiceId.FindValue(request.voiceId)
            };
            var pollyTask = polly.SynthesizeSpeechAsync(speechRequest);

            pollyTask.Wait();
            Console.WriteLine("Successfully synthesized the Note text");

            // Save the audio stream returned by Amazon Polly on Lambda's temp directory '/tmp'
            string path = Path.Combine(
                Path.GetTempPath(),
                bucketName,
                request.note.userId);
            string filename = Path.Combine(path, request.note.noteId + ".mp3");

            Directory.CreateDirectory(path);
            using (FileStream file = new FileStream(filename, FileMode.Create, System.IO.FileAccess.Write))
            {
                pollyTask.Result.AudioStream.CopyTo(file);
            }
            Console.WriteLine("Successfully saved the Polly AudioStream to " + filename);

            // Upload our local file to S3
            var s3     = new AmazonS3Client();
            var s3Task = s3.PutObjectAsync(new PutObjectRequest
            {
                BucketName = bucketName,
                Key        = String.Format("{0}/{1}.mp3", request.note.userId, request.note.noteId),
                FilePath   = filename
            });

            s3Task.Wait();
            Console.WriteLine("Successfully uploaded the MP3 file to S3");

            // Generate a pre-signed URL so that we can securely access our MP3
            string url = s3.GetPreSignedURL(new GetPreSignedUrlRequest
            {
                BucketName = bucketName,
                Key        = String.Format("{0}/{1}.mp3", request.note.userId, request.note.noteId),
                Expires    = DateTime.Now + TimeSpan.FromHours(1)
            });

            Console.WriteLine("Successfully generated a pre-signed URL for the MP3 file: " + url);

            // Return the presigned URL to API Gateway
            return(url);
        }
예제 #15
0
        internal static void configSay(string name, string voice, string text, int rate = -1, float pitch = -1, float volume = -1)
        {
            Task.Run(() =>
            {
                currentVoice = VoiceId.FindValue(voice);
                tmppath      = Path.Combine(PelicanTTSMod._helper.DirectoryPath, "TTS");

                if (pc == null)
                {
                    pc = AWSHandler.getPollyClient();
                }

                bool mumbling    = PelicanTTSMod.config.MumbleDialogues;
                string language1 = "<lang xml:lang=\"" + getLanguageCode() + "\">";
                string language2 = "</lang>";

                text = text.Replace("0g", "0 gold").Replace("< ", " ").Replace("` ", "  ").Replace("> ", " ").Replace('^', ' ').Replace(Environment.NewLine, " ").Replace("$s", "").Replace("$h", "").Replace("$g", "").Replace("$e", "").Replace("$u", "").Replace("$b", "").Replace("$8", "").Replace("$l", "").Replace("$q", "").Replace("$9", "").Replace("$a", "").Replace("$7", "").Replace("<", "").Replace("$r", "").Replace("[", "<").Replace("]", ">");
                text = language1 + text + language2;

                bool neural = shouldUseNeuralEngine(voice, out string v);

                if (!neural && voice != v)
                {
                    currentVoice = VoiceId.FindValue(v);
                }

                bool useNeuralEngine = !mumbling && neural;

                var amzeffectIn  = mumbling ? "<amazon:effect phonation='soft'><amazon:effect vocal-tract-length='-20%'>" : "<amazon:auto-breaths><amazon:effect phonation='soft'>";
                var amzeffectOut = mumbling ? "</amazon:effect></amazon:effect>" : "</amazon:effect></amazon:auto-breaths>";

                if (mumbling)
                {
                    text = @"<speak>" + (useNeuralEngine ? "" : amzeffectIn) + Dialogue.convertToDwarvish(text) + (useNeuralEngine ? "" : amzeffectOut) + @"</speak>";
                }
                else
                {
                    text = @"<speak>" + (useNeuralEngine ? "" : amzeffectIn) + "<prosody rate='" + (rate == -1 ? PelicanTTSMod.config.Rate : rate) + "%'>" + text + @"</prosody>" + (useNeuralEngine ? "" : amzeffectOut) + "</speak>";
                }

                int hash = (text + (useNeuralEngine ? "-neural" : "")).GetHashCode();
                if (!Directory.Exists(Path.Combine(tmppath, name)))
                {
                    Directory.CreateDirectory(Path.Combine(tmppath, name));
                }

                string file            = Path.Combine(Path.Combine(tmppath, name), "speech_" + currentVoice.Value + (mumbling ? "_mumble_" : "_") + hash + ".wav");
                SoundEffect nextSpeech = null;

                if (!File.Exists(file))
                {
                    SynthesizeSpeechRequest sreq = new SynthesizeSpeechRequest();
                    sreq.Text                     = text;
                    sreq.TextType                 = TextType.Ssml;
                    sreq.OutputFormat             = OutputFormat.Ogg_vorbis;
                    sreq.Engine                   = useNeuralEngine ? Engine.Neural : Engine.Standard;
                    sreq.VoiceId                  = currentVoice;
                    SynthesizeSpeechResponse sres = pc.SynthesizeSpeech(sreq);
                    using (var memStream = new MemoryStream())
                    {
                        sres.AudioStream.CopyTo(memStream);
                        nextSpeech = Convert(memStream, file);
                    }
                }
                using (FileStream stream = new FileStream(file, FileMode.Open))
                    nextSpeech = SoundEffect.FromStream(stream);

                if (currentSpeech != null)
                {
                    currentSpeech.Stop();
                }

                currentSpeech = nextSpeech.CreateInstance();

                speak = false;
                currentSpeech.Pitch  = (mumbling ? 0.5f : pitch == -1 ? PelicanTTSMod.config.Voices[name].Pitch : pitch);
                currentSpeech.Volume = volume == -1 ? PelicanTTSMod.config.Volume : volume;
                currentSpeech.Play();
            });
        }
예제 #16
0
        internal static void configSay(string name, string voice, string text)
        {
            Task.Run(() =>
            {
                currentVoice = VoiceId.FindValue(voice);
                tmppath      = Path.Combine(PelicanTTSMod._helper.DirectoryPath, "TTS");

                if (pc == null)
                {
                    pc = AWSHandler.getPollyClient();
                }

                bool mumbling = PelicanTTSMod.config.MumbleDialogues;

                text = text.Replace("< ", " ").Replace("` ", "  ").Replace("> ", " ").Replace('^', ' ').Replace(Environment.NewLine, " ").Replace("$s", "").Replace("$h", "").Replace("$g", "").Replace("$e", "").Replace("$u", "").Replace("$b", "").Replace("$8", "").Replace("$l", "").Replace("$q", "").Replace("$9", "").Replace("$a", "").Replace("$7", "").Replace("<", "").Replace("$r", "").Replace("[", "<").Replace("]", ">");

                if (mumbling)
                {
                    text = @"<speak><amazon:effect phonation='soft'><amazon:effect vocal-tract-length='-20%'>" + Dialogue.convertToDwarvish(text) + @"</amazon:effect></amazon:effect></speak>";
                }
                else
                {
                    text = @"<speak><amazon:auto-breaths><amazon:effect phonation='soft'>" + text + @"</amazon:effect></amazon:auto-breaths></speak>";
                }


                int hash = text.GetHashCode();
                if (!Directory.Exists(Path.Combine(tmppath, name)))
                {
                    Directory.CreateDirectory(Path.Combine(tmppath, name));
                }

                string file            = Path.Combine(Path.Combine(tmppath, name), "speech_" + currentVoice.Value + (mumbling ? "_mumble_" : "_") + hash + ".wav");
                SoundEffect nextSpeech = null;

                if (!File.Exists(file))
                {
                    SynthesizeSpeechRequest sreq = new SynthesizeSpeechRequest();
                    sreq.Text                     = text;
                    sreq.TextType                 = TextType.Ssml;
                    sreq.OutputFormat             = OutputFormat.Ogg_vorbis;
                    sreq.VoiceId                  = currentVoice;
                    SynthesizeSpeechResponse sres = pc.SynthesizeSpeech(sreq);
                    using (var memStream = new MemoryStream())
                    {
                        sres.AudioStream.CopyTo(memStream);
                        nextSpeech = Convert(memStream, file);
                    }
                }
                using (FileStream stream = new FileStream(file, FileMode.Open))
                    nextSpeech = SoundEffect.FromStream(stream);

                if (currentSpeech != null)
                {
                    currentSpeech.Stop();
                }

                currentSpeech = nextSpeech.CreateInstance();

                speak = false;
                currentSpeech.Pitch  = (mumbling ? 0.5f : PelicanTTSMod.config.Voices[name].Pitch);
                currentSpeech.Volume = PelicanTTSMod.config.Volume;

                currentSpeech.Play();
            });
        }