Beispiel #1
0
        private SkillResponse Increment(AudioSessionAttributes attributes, int offset)
        {
            int index = attributes.Index + offset;

            if (index == _feedItems.Count || index == -1)
            {
                if (attributes.Loop)
                {
                    index = offset > 0 ? 0 : _feedItems.Count - 1;
                }
                else
                {
                    attributes.State = Constants.States.StartMode;

                    var response = ResponseBuilder.AudioPlayerStop();
                    Speech.AddOutputSpeech(response, $"You have reached the {(offset > 0 ? "last" : "first")} episode.");
                    return(response);
                }
            }

            attributes.Index = index;
            attributes.OffsetInMilliseconds = 0;
            attributes.PlaybackIndexChanged = true;

            return(Play(attributes, false));
        }
 public void StopAudioPlayer()
 {
     Skill = ResponseBuilder.AudioPlayerStop();
 }
Beispiel #3
0
        public SkillResponse FunctionHandler(JObject inputObj, ILambdaContext context)
        {
            Log.logger = context.Logger;
            APLSkillRequest input   = new APLSkillRequest();
            SkillResponse   respond = ResponseBuilders.BuildResponse(null, false, null, null, null);

            try
            {
                new SystemRequestTypeConverter().AddToRequestConverter();
                new UserEventRequestHandler().AddToRequestConverter();
                //new APLRequestTypeConverter().AddToRequestConverter();

                //Getting input
                string inputString = JsonConvert.SerializeObject(inputObj);
                input = JsonConvert.DeserializeObject <APLSkillRequest>(inputString);
                //Logging input
                Log.Output("---INPUT---");
                context.Logger.LogLine(JsonConvert.SerializeObject(input));

                // Initialise data
                var  requestType  = input.GetRequestType(); // Get type of request
                bool VideoSupport = input.Context.System.Device.IsInterfaceSupported("VideoApp");
                bool APLSupport   = input.Context.System.Device.IsInterfaceSupported("Alexa.Presentation.APL");
                Log.Output("Video Support - APL - is: " + APLSupport);

                // ***REQUESTS*** //

                if (input.Request is LaunchRequest && APLSupport) // Launch Request for Video
                {
                    // Launch request for echo spot/show -> Return APL + Ask
                    Log.Output("Video App Launch Request");
                    respond = Dependencies.CreateAPL();
                }
                else if (input.Request is LaunchRequest && !APLSupport) // Launch Request for speakers
                {
                    Log.Output("Launch Request for smart speaker");
                    Reprompt reprompt = new Reprompt("How can I help you today?");
                    respond = ResponseBuilder.Ask("Welcome to ASMR video. Please ask for the list of songs or ask me to play a song", reprompt);
                }
                else if (input.Request is SessionEndedRequest) // SessionEndedRequest
                {
                    // End Session by playing message
                    Log.Output("Session Ended Request Called");
                    respond = ResponseBuilder.Tell("Thank you for using this skill. Goodbye.");
                    respond.Response.ShouldEndSession = true;
                }
                else if (input.Request is UserEventRequest usrEvent && APLSupport) // User Event Request for TouchWrappers
                {
                    Log.Output("User Event Launch requst");
                    var Id = Convert.ToInt32(usrEvent.Source.ComponentId); // Take ID as integer
                    Id = Id - 1;
                    Log.Output("ID of touchwrapper is : " + (Id + 1) + " , Index of number is: " + Id);
                    respond = Dependencies.BuildVideoResonse(videoUrls[Id]);
                    context.Logger.LogLine(JsonConvert.SerializeObject(respond));
                }
                else if (input.Request is PlaybackControllerRequest) // Playback controller request
                {
                    Log.Output("Playback Controller Request Called");
                    var playbackReq = input.Request as PlaybackControllerRequest;
                    switch (playbackReq.PlaybackRequestType)
                    {
                    case PlaybackControllerRequestType.Next:
                        break;

                    case PlaybackControllerRequestType.Pause:
                        break;

                    case PlaybackControllerRequestType.Play:
                        break;

                    case PlaybackControllerRequestType.Previous:
                        break;
                    }
                    respond = ResponseBuilder.AudioPlayerStop();
                }
                // ***INTENTS***
                else if (requestType == typeof(IntentRequest))          // INTENTS
                {
                    var intentRequest = input.Request as IntentRequest; // Get intent request
                    var intentName    = intentRequest.Intent.Name;
                    Log.Output("Intent Requests");

                    //Check request
                    switch (intentName)
                    {
                    // Play Song Intent
                    case "PlaySongIntent":
                        Log.Output("Play a song Intent");
                        var songSlot = intentRequest.Intent.Slots["songName"].Value;     // get slot

                        //int songNumIndex= Dependencies.SlotConverter(songSlot);
                        int songNumIndex = Convert.ToInt32(songSlot);
                        songNumIndex -= 1;
                        Log.Output("Song Slot is: " + songSlot + " , song Number index is : " + songNumIndex);

                        if (songNumIndex != -1)      // -1 = NOT FOUND
                        {
                            var audioRes = ResponseBuilders.AudioPlayerPlay(Alexa.NET.Response.Directive.PlayBehavior.ReplaceAll, audioUrls[songNumIndex], names[songNumIndex], null, 0);
                            respond = audioRes;
                            respond.Response.OutputSpeech = new PlainTextOutputSpeech {
                                Text = "Playing the song."
                            };
                        }
                        else     //Found
                        {
                            respond.Response.OutputSpeech = new PlainTextOutputSpeech {
                                Text = "I did not understand which song you asked me to pplay. Could you please repeat?"
                            };
                        }
                        break;

                    // ListSongsIntent
                    case "ListSongsIntent":
                        Log.Output("List Song Intent Request Called");
                        string text = "The ASMR songs are: ";
                        for (int i = 0; i < names.Length; i++)
                        {
                            string ch = " , ";
                            if (i == (names.Length - 1))
                            {
                                ch = ".";
                            }
                            text += ((i + 1) + ". " + names[i] + ch);
                        }
                        text += " Which song do you want me to play? Say \"Alexa, play song 1 \".";
                        Reprompt reprompt = new Reprompt("Which song should I play?");
                        respond = ResponseBuilder.Ask(text, reprompt);
                        break;

                    // Help Intent
                    case "AMAZON.HelpIntent":
                        Log.Output("Help Intent Request Called");
                        respond = ResponseBuilder.Tell("You can ask me 'What is ASMR' or ask me to play one of ASMR Darling's top ten videos or ask for a list of ASMR's top ten videos");
                        break;

                    //AMAZON StopIntent
                    case "AMAZON.StopIntent":
                        Log.Output("Stop Intent Request Called");
                        if (APLSupport)
                        {
                            // Stop when Video Present
                            respond = Dependencies.CreateAPL();
                        }
                        else
                        {
                            // Stop when Audio Present
                            Reprompt re = new Reprompt("How can I help you today?");
                            respond = ResponseBuilder.Ask("Welcome to ASMR video. Please ask for the list of songs or ask me to play a song", re);
                            respond.Response.Directives.Add(new StopDirective());
                        }
                        break;

                    case "AMAZON.CancelIntent":
                        if (APLSupport)
                        {
                            Log.Output("---CancelIntent with video Support---");
                            respond = Dependencies.CreateAPL();;
                        }
                        else
                        {
                            Log.Output("Cancel Intent Request(Audio Player) Called");
                            Reprompt re = new Reprompt("How can I help you today?");
                            respond = ResponseBuilder.Ask("Welcome to ASMR video. Please ask for the list of songs or ask me to play a song", re);
                            respond.Response.Directives.Add(new StopDirective());
                        }
                        break;

                    case "AMAZON.PauseIntent":
                        Log.Output("Pause Intent Request Called");
                        respond = ResponseBuilder.AudioPlayerStop();
                        break;

                    case "WhatIsASMRIntent":
                        // What is ASMR?
                        if (APLSupport)
                        {
                            Log.Output("What is ASMR Intent - VideoApp played");
                            respond = Dependencies.BuildVideoResonse(whatIsASMRvideo);     // Return response to play Video
                        }
                        else
                        {
                            Log.Output("What is ASMR- Audio played");
                            respond = ResponseBuilders.AudioPlayerPlay(Alexa.NET.Response.Directive.PlayBehavior.ReplaceAll, whatIsASMRaudio, "What is ASMR?", null, 0);;
                        }
                        break;

                    case "PlayVideoIntent":
                        Log.Output("Play a Video Intent - \"Alexa play video\"");
                        var videoSlot     = intentRequest.Intent.Slots["songName"].Value; // get slot
                        int videoNumIndex = Dependencies.SlotConverter(videoSlot);

                        respond = Dependencies.BuildVideoResonse(videoUrls[videoNumIndex]);
                        context.Logger.LogLine(JsonConvert.SerializeObject(respond));

                        break;

                    default:
                        Log.Output("Did not understand the intent request / Unexpected intent request");
                        respond = ResponseBuilder.Tell("I dont understand. Please ask me to list all songs or you can ask for help");
                        break;
                    }
                }
                else
                {
                    Log.Output("Unknown Request or Intent.");
                    Log.Output(JsonConvert.SerializeObject(input));
                    respond = ResponseBuilder.Tell("I dont understand. Please ask me to list all songs or ask for help");
                }

                return(respond);
            }
Beispiel #4
0
        public static async Task <IActionResult> Run(
            [HttpTrigger(AuthorizationLevel.Anonymous, "post", Route = null)] HttpRequest req,
            ILogger log)
        {
            string json = await req.ReadAsStringAsync();

            var     skillRequest = JsonConvert.DeserializeObject <SkillRequest>(json);
            var     requestType  = skillRequest.GetRequestType();
            Session session      = skillRequest.Session;

            SkillResponse response = null;

            if (requestType == typeof(LaunchRequest))
            {
                response = ResponseBuilder.Tell("Welcome to Gorilla Logic!");
                response.Response.ShouldEndSession = false;
            }
            else if (requestType == typeof(IntentRequest))
            {
                var intentRequest = skillRequest.Request as IntentRequest;

                switch (intentRequest.Intent.Name.ToLower())
                {
                case "gorillalocation":
                    response = ResponseBuilder.Tell("Gorilla Logic is located in Ruta N medellin Colombia oficina 2020");
                    var speech = new SsmlOutputSpeech();
                    //speech.Ssml = "<speak>Gorilla Logic is located in <lang xml:lang=\"es-ES\">Ruta Ene Medellin Colombia oficina 2020</lang></speak>";
                    response = ResponseBuilder.Tell(speech);
                    break;

                case "gorillamusic":
                case "amazon.resumeintent":
                    string audioUrl    = "{url}";
                    string audioToken  = "Gorillaz song 19 - 20000";
                    var    speechMusic = new SsmlOutputSpeech();
                    //speech.Ssml = $"<speak>{audioToken}<audio src=\"{audioUrl}\"/></speak>";
                    //response = ResponseBuilder.Tell(speechMusic);
                    response = ResponseBuilder.AudioPlayerPlay(PlayBehavior.ReplaceAll, audioUrl, audioToken, (int)skillRequest.Context.AudioPlayer.OffsetInMilliseconds);
                    break;

                case "gorillainvitation":
                    var speechInvitation = new SsmlOutputSpeech();
                    speechInvitation.Ssml = "<speak><voice name=\"Enrique\"><lang xml:lang=\"es-ES\">Estan todos invitados al meetup del 25 de julio donde yo alexa seré la protagonista. Los esperamos en Ruta N</lang></voice></speak>";
                    response = ResponseBuilder.Tell(speechInvitation);
                    break;

                case "gorillacalculation":
                    if (intentRequest.Intent.Slots.Count > 0)
                    {
                        if (intentRequest.Intent.Slots["year"] != null &&
                            intentRequest.Intent.Slots["year"].Value != null &&
                            intentRequest.Intent.Slots["date"] != null &&
                            intentRequest.Intent.Slots["date"].Value != null)
                        {
                            DateTime dateValue = DateTime.Parse(intentRequest.Intent.Slots["date"].Value.ToString());

                            dateValue = dateValue.AddYears(int.Parse(intentRequest.Intent.Slots["year"].Value.ToString()) - dateValue.Year);

                            int result = (DateTime.Now - dateValue).Days / 365;

                            response = ResponseBuilder.Tell($"you are {result} years old");
                            response.Response.ShouldEndSession = true;
                        }
                        else
                        {
                            response = ResponseBuilder.Ask("Please tell me, when were you born?", null);
                        }
                    }
                    else
                    {
                        response = ResponseBuilder.Ask("Please tell me, when were you born?", null);
                    }
                    break;

                case "amazon.pauseintent":
                    response = ResponseBuilder.AudioPlayerStop();
                    break;

                default:
                    break;
                }
            }
            else if (requestType == typeof(SessionEndedRequest))
            {
                response = ResponseBuilder.Tell("bye");
                response.Response.ShouldEndSession = true;
            }
            else if (requestType == typeof(AudioPlayerRequest))
            {
                // do some audio response stuff
                var audioRequest = skillRequest.Request as AudioPlayerRequest;

                //
                //if (audioRequest.AudioRequestType == AudioRequestType.PlaybackStopped)

                //
                //if (audioRequest.AudioRequestType == AudioRequestType.PlaybackNearlyFinished)
            }
            else
            {
                response = ResponseBuilder.Empty();
            }

            return(new OkObjectResult(response));
        }
        public async Task <SkillResponse> FunctionHandler(SkillRequest input, ILambdaContext context)
        {
            var log = context.Logger;
            // I use the following lines to log and validate my input
            //      but this isn't a requirement for the skill
            //log.LogLine($"Skill Request Object...");
            //log.LogLine(JsonConvert.SerializeObject(input));

            SkillResponse returnResponse = new SkillResponse();
            var           audioItems     = AudioAssets.GetSampleAudioFiles();

            // initialize a connection to the database
            //  this also initialized the context for the DynamoDB helper
            var audioStateHelper = new AudioStateHelper();
            await audioStateHelper.VerifyTable();

            string userId = "";

            if (input.Session != null)
            {
                userId = input.Session.User.UserId;
            }
            else
            {
                userId = input.Context.System.User.UserId;
            }

            var lastState = await audioStateHelper.GetAudioState(userId);

            var currentState = new AudioState()
            {
                UserId = userId
            };

            currentState.State = lastState.State;

            // For an intent
            if (input.GetRequestType() == typeof(LaunchRequest))
            {
                log.LogLine($"Default LaunchRequest made");
                var output = new PlainTextOutputSpeech()
                {
                    Text = "Welcome to the Alexa audio sample. "
                           + "You can say, play the audio, to begin."
                };
                var reprompt = new Reprompt()
                {
                    OutputSpeech = new PlainTextOutputSpeech()
                    {
                        Text = "You can say, play the audio, to begin."
                    }
                };
                returnResponse = ResponseBuilder.Ask(output, reprompt);

                await audioStateHelper.SaveAudioState(currentState);
            }
            else if (input.GetRequestType() == typeof(IntentRequest))
            {
                var intentRequest = (IntentRequest)input.Request;
                var output        = new PlainTextOutputSpeech();
                var reprompt      = new Reprompt();
                log.LogLine($"Triggered " + intentRequest.Intent.Name);
                switch (intentRequest.Intent.Name)
                {
                case "PlayAudio":
                    currentState.State.Token     = audioItems.FirstOrDefault().Title;
                    currentState.State.State     = "PLAY_MODE";
                    currentState.State.Index     = 0;
                    currentState.State.playOrder = new List <int> {
                        0, 1, 2, 3, 4
                    };
                    returnResponse = ResponseBuilder.AudioPlayerPlay(
                        PlayBehavior.ReplaceAll,
                        audioItems[currentState.State.Index].Url,
                        currentState.State.Token);

                    break;

                case BuiltInIntent.Help:
                    output.Text           = "You can say, play the audio, to begin.";
                    reprompt.OutputSpeech = new PlainTextOutputSpeech()
                    {
                        Text = "You can say, play the audio, to begin."
                    };
                    returnResponse = ResponseBuilder.Ask(output, reprompt);
                    break;

                case BuiltInIntent.Cancel:
                    currentState.State.OffsetInMS = Convert.ToInt32(input.Context.AudioPlayer.OffsetInMilliseconds);
                    currentState.State.Token      = input.Context.AudioPlayer.Token;
                    currentState.State.State      = "PAUSE_MODE";
                    returnResponse = ResponseBuilder.AudioPlayerStop();
                    break;

                case BuiltInIntent.Next:
                    var thisFile = lastState.State.Token;
                    // get the last state, get the index, add 1
                    // or start from the beginning if you're doing a loop
                    currentState.State.Index++;
                    if (currentState.State.Index >= audioItems.Count)
                    {
                        currentState.State.Index = 0;
                    }
                    currentState.State.Token      = audioItems[currentState.State.Index].Title;
                    currentState.State.OffsetInMS = 0;
                    currentState.State.State      = "PLAY_MODE";
                    returnResponse = ResponseBuilder.AudioPlayerPlay(PlayBehavior.ReplaceAll,
                                                                     audioItems[currentState.State.Index].Url,
                                                                     currentState.State.Token);
                    break;

                case BuiltInIntent.Previous:
                    // get the last state, get the index, subtract 1
                    currentState.State.Index = currentState.State.Index - 1;
                    if (currentState.State.Index < 0)
                    {
                        currentState.State.Index = 0;
                    }

                    currentState.State.Token      = audioItems[currentState.State.Index].Title;
                    currentState.State.OffsetInMS = 0;
                    currentState.State.State      = "PLAY_MODE";
                    returnResponse = ResponseBuilder.AudioPlayerPlay(PlayBehavior.ReplaceAll,
                                                                     audioItems[currentState.State.Index].Url,
                                                                     currentState.State.Token);
                    break;

                case BuiltInIntent.Repeat:
                    // get the last state, get the index, start over at offset = 0
                    currentState.State.Token      = audioItems[currentState.State.Index].Title;
                    currentState.State.OffsetInMS = 0;
                    currentState.State.State      = "PLAY_MODE";
                    returnResponse = ResponseBuilder.AudioPlayerPlay(PlayBehavior.ReplaceAll,
                                                                     audioItems[currentState.State.Index].Url,
                                                                     currentState.State.Token,
                                                                     0);
                    break;

                case BuiltInIntent.StartOver:
                    // start everything from the beginning
                    currentState.State.Token      = audioItems[0].Title;
                    currentState.State.OffsetInMS = 0;
                    currentState.State.State      = "PLAY_MODE";
                    returnResponse = ResponseBuilder.AudioPlayerPlay(PlayBehavior.ReplaceAll,
                                                                     audioItems[0].Url,
                                                                     currentState.State.Token,
                                                                     0);
                    break;

                case BuiltInIntent.Stop:
                    currentState.State.OffsetInMS = Convert.ToInt32(input.Context.AudioPlayer.OffsetInMilliseconds);
                    currentState.State.Token      = input.Context.AudioPlayer.Token;
                    currentState.State.State      = "PAUSE_MODE";
                    returnResponse = ResponseBuilder.AudioPlayerStop();
                    break;

                case BuiltInIntent.Resume:
                    // Get the last state, start from the offest in milliseconds

                    returnResponse = ResponseBuilder.AudioPlayerPlay(PlayBehavior.ReplaceAll,
                                                                     audioItems[currentState.State.Index].Url,
                                                                     currentState.State.Token,
                                                                     currentState.State.OffsetInMS);
                    // If there was an enqueued item...
                    if (currentState.State.EnqueuedToken != null)
                    {
                        returnResponse.Response.Directives.Add(new AudioPlayerPlayDirective()
                        {
                            PlayBehavior = PlayBehavior.Enqueue,
                            AudioItem    = new Alexa.NET.Response.Directive.AudioItem()
                            {
                                Stream = new AudioItemStream()
                                {
                                    Url   = audioItems[currentState.State.Index + 1].Url,
                                    Token = audioItems[currentState.State.Index + 1].Title,
                                    ExpectedPreviousToken = currentState.State.Token,
                                    OffsetInMilliseconds  = 0
                                }
                            }
                        });
                    }

                    currentState.State.EnqueuedToken = audioItems[currentState.State.Index + 1].Title;
                    currentState.State.State         = "PLAY_MODE";
                    break;

                case BuiltInIntent.Pause:
                    currentState.State.OffsetInMS = Convert.ToInt32(input.Context.AudioPlayer.OffsetInMilliseconds);
                    currentState.State.Token      = input.Context.AudioPlayer.Token;
                    currentState.State.State      = "PAUSE_MODE";
                    returnResponse = ResponseBuilder.AudioPlayerStop();
                    break;

                default:
                    log.LogLine($"Unknown intent: " + intentRequest.Intent.Name);
                    output.Text           = "Welcome to Pocast Player";
                    reprompt.OutputSpeech = new PlainTextOutputSpeech()
                    {
                        Text = "This is your reprompt. Please do something."
                    };
                    returnResponse = ResponseBuilder.TellWithReprompt(output, reprompt);
                    break;
                }
            }
            else if (input.GetRequestType() == typeof(AudioPlayerRequest))
            {
                var audioRequest = input.Request as AudioPlayerRequest;

                if (audioRequest.AudioRequestType == AudioRequestType.PlaybackStarted)
                {
                    log.LogLine($"PlaybackStarted Triggered ");
                    // respond with Stop or ClearQueue
                    returnResponse = ResponseBuilder.AudioPlayerClearQueue(ClearBehavior.ClearEnqueued);
                }
                else if (audioRequest.AudioRequestType == AudioRequestType.PlaybackFinished)
                {
                    // Audio comes to an end on its own
                    log.LogLine($"PlaybackFinished Triggered ");
                    if (currentState.State.EnqueuedToken != null)
                    {
                        int itemIndex = audioItems.IndexOf(audioItems.Where(i => i.Title == currentState.State.EnqueuedToken).FirstOrDefault());
                        currentState.State.Token = audioItems[itemIndex].Title;
                        currentState.State.Index = itemIndex;
                        returnResponse           = ResponseBuilder.AudioPlayerPlay(PlayBehavior.ReplaceAll,
                                                                                   audioItems[itemIndex].Url,
                                                                                   currentState.State.Token);
                    }
                    else
                    {
                        // respond with Stop or ClearQueue
                        returnResponse = ResponseBuilder.AudioPlayerClearQueue(ClearBehavior.ClearEnqueued);
                    }
                }
                else if (audioRequest.AudioRequestType == AudioRequestType.PlaybackStopped)
                {
                    // This is when your audio is explicitly stopped
                    log.LogLine($"PlaybackStopped Triggered ");
                    currentState.State.State         = "PAUSE_MODE";
                    currentState.State.Token         = audioRequest.Token;
                    currentState.State.EnqueuedToken = audioRequest.EnqueuedToken;
                    currentState.State.OffsetInMS    = Convert.ToInt32(audioRequest.OffsetInMilliseconds);
                    log.LogLine($"Saving AudioState: " + currentState.State.Token + " at " + currentState.State.OffsetInMS.ToString() + "ms");
                    returnResponse = null;
                }
                else if (audioRequest.AudioRequestType == AudioRequestType.PlaybackNearlyFinished)
                {
                    log.LogLine($"PlaybackNearlyFinished Triggered ");

                    // we'll want to hand back the "next" item in the queue
                    //  First we check to see if there is an enqueued item and, if there is
                    //  we can respond with nothing
                    if (audioRequest.HasEnqueuedItem)
                    {
                        return(null);
                    }

                    // let's get the current token
                    var currentPlay = audioRequest.Token;
                    // get the index of that current item
                    int itemIndex = audioItems.IndexOf(audioItems.Where(i => i.Title == audioRequest.Token).FirstOrDefault());
                    if (itemIndex == -1)
                    {
                        log.LogLine($"Could not get the index of: " + audioRequest.Token);
                    }
                    itemIndex++;
                    if (itemIndex == audioItems.Count)
                    {
                        itemIndex = 0;
                    }

                    currentState.State.EnqueuedToken = audioItems[itemIndex].Title;
                    currentState.State.Token         = audioRequest.Token;
                    // if there is not, we send a play intent with "ENQUEUE"
                    returnResponse = ResponseBuilder.AudioPlayerPlay(
                        PlayBehavior.Enqueue,
                        audioItems[itemIndex].Url,
                        currentState.State.EnqueuedToken,
                        currentState.State.Token,
                        0);
                }
                else if (audioRequest.AudioRequestType == AudioRequestType.PlaybackFailed)
                {
                    log.LogLine($"PlaybackFailed Triggered");
                    // atm, we basically pretend nothing happened and play the first
                    //  file again on a failure
                    //  THIS IS A TERRIBLE SOLUTION
                    //  Figure out a better one for your skill
                    currentState.State.Token = audioItems.FirstOrDefault().Title;
                    currentState.State.Index = 0;
                    currentState.State.State = "PLAY_MODE";
                    returnResponse           = ResponseBuilder.AudioPlayerPlay(PlayBehavior.ReplaceAll, audioItems.FirstOrDefault().Url, currentState.State.Token);
                }
            }

            // I use the following code to validate and log my outputs for
            //      later investigation
            //log.LogLine($"Skill Response Object...");
            //string responseJson = "no response is given";
            //try
            //{
            //    responseJson = JsonConvert.SerializeObject(returnResponse);
            //}
            //catch
            //{
            //    log.LogLine(responseJson);
            //    return null;
            //}
            //log.LogLine(responseJson);

            // Save our state
            await audioStateHelper.SaveAudioState(currentState);

            // return our response
            return(returnResponse);
        }
Beispiel #6
0
        public static async Task <IActionResult> Run(
            [HttpTrigger(AuthorizationLevel.Anonymous, "post", Route = null)] HttpRequest req,
            ILogger log)
        {
            string json = await req.ReadAsStringAsync();

            var skillRequest = JsonConvert.DeserializeObject <SkillRequest>(json);

            bool isValid = await ValidateRequest(req, log, skillRequest);

            if (!isValid)
            {
                return(new BadRequestResult());
            }

            var requestType = skillRequest.GetRequestType();

            log.LogInformation("Request received, type: {0} id: {1}", requestType, skillRequest.Request.RequestId);

            SkillResponse response = null;

            if (requestType == typeof(LaunchRequest))
            {
                response = ResponseBuilder.Tell("Welcome to KEXP 90.3FM");
                response.Response.ShouldEndSession = false;
            }
            else if (requestType == typeof(IntentRequest))
            {
                var intentRequest = skillRequest.Request as IntentRequest;

                log.LogInformation("-> IntentRequest, name: {0}", intentRequest.Intent.Name);

                if (intentRequest.Intent.Name == "Play" || intentRequest.Intent.Name == "AMAZON.ResumeIntent" || intentRequest.Intent.Name == "AMAZON.StartOverIntent")
                {
                    //handle the intent
                    // https://live-aacplus-64.streamguys1.com/ -- NOTE should we go with higher bitrate?
                    response = ResponseBuilder.AudioPlayerPlay(Alexa.NET.Response.Directive.PlayBehavior.ReplaceAll, "https://live-aacplus-64.streamguys1.com/", "token");
                }
                else if (intentRequest.Intent.Name == "AMAZON.CancelIntent")
                {
                    response = ResponseBuilder.AudioPlayerStop();
                }
                else if (intentRequest.Intent.Name == "AMAZON.HelpIntent")
                {
                    response = ResponseBuilder.Tell("Say PLAY to play the KEXP live stream");

                    response.Response.ShouldEndSession = false;
                }
                else if (intentRequest.Intent.Name == "AMAZON.PauseIntent")
                {
                    response = ResponseBuilder.AudioPlayerStop();
                }
                else if (intentRequest.Intent.Name == "AMAZON.StopIntent")
                {
                    response = ResponseBuilder.AudioPlayerStop();
                }
                else if (intentRequest.Intent.Name == "AMAZON.NextIntent" || intentRequest.Intent.Name == "AMAZON.PreviousIntent")
                {
                    response = ResponseBuilder.Tell("Sorry, Next and Previous are not supported.");
                }
                else if (intentRequest.Intent.Name == "AMAZON.LoopOnIntent" || intentRequest.Intent.Name == "AMAZON.LoopOffIntent")
                {
                    response = ResponseBuilder.Tell("Sorry, looping is not supported.");
                }
                else if (intentRequest.Intent.Name == "AMAZON.ShuffleOnIntent" || intentRequest.Intent.Name == "AMAZON.ShuffleOffIntent")
                {
                    response = ResponseBuilder.Tell("Sorry, shuffle is not supported.");
                }
                else if (intentRequest.Intent.Name == "AMAZON.RepeatIntent")
                {
                    response = ResponseBuilder.Tell("Sorry, Repeat is not supported.");
                }
            }
            else if (requestType == typeof(AudioPlayerRequest))
            {
                // Don't do anything with these for now, but handle them gracefully.
                // More info: https://developer.amazon.com/en-US/docs/alexa/custom-skills/audioplayer-interface-reference.html
                var audioPlayerRequest = skillRequest.Request as AudioPlayerRequest;
                log.LogInformation("-> AudioPlayerRequest: {0}", audioPlayerRequest.AudioRequestType);
                response = ResponseBuilder.Empty();
            }
            else if (requestType == typeof(SessionEndedRequest))
            {
                log.LogInformation("-> Session ended");
                response = ResponseBuilder.Empty();
                response.Response.ShouldEndSession = true;
            }

            if (response == null)
            {
                log.LogError("*** Unhandled request:");
                log.LogError(json);
            }

            return(new OkObjectResult(response));
        }
Beispiel #7
0
 public SkillResponse Stop(AudioSessionAttributes attributes)
 {
     return(ResponseBuilder.AudioPlayerStop());
 }