public async Task HandleRequest() { await RequestProcessHelper.ProcessRequest("SessionEndedRequestHandler.HandleRequest()", "Session Ended Request", async() => { Core.Logger.Write("SessionEndedRequestHandler.HandleRequest()", "Session eneded"); }); }
public async Task HandleRequest() { await RequestProcessHelper.ProcessRequest("AlexaRequestHandler.HandleRequest()", "Alexa Request", (System.Func <Task>)(async() => { string subRequestType = Core.Request.GetSubRequestType(); switch (subRequestType) { // handle user gui event request case AlexaRequestType.UserEventRequest: UserEventRequest userEventRequest = Core.Request.GetRequest().Request as UserEventRequest; string title = userEventRequest.Arguments[0] as string; string url = userEventRequest.Arguments[1] as string; Core.Logger.Write("AlexaRequestHandler.HandleRequest()", $"Media file title: {title}"); Core.Logger.Write("AlexaRequestHandler.HandleRequest()", $"Media file source url: {url}"); Core.State.UserState.Stage = Stage.Video; Core.Response.AddVideoApp(url, title, title); break; // handle unknown intent default: Core.State.UserState.Stage = Stage.Menu; Core.Response.SetSpeech(false, false, (string)SpeechTemplate.NoUnderstand); break; } })); }
public async Task HandleRequest() { await RequestProcessHelper.ProcessRequest("LaunchRequestHandler.HandleRequest()", "Launch Request", () => { // set user stage Core.State.UserState.Stage = Stage.Menu; // compose output speech StringBuilder sb = new StringBuilder(); string welcome = Core.State.UserState.NumTimesPlayed > 0 ? SpeechTemplate.WelcomeBack : SpeechTemplate.WelcomeNew; sb.Append(welcome); string intro = SpeechTemplate.Intro; if (Core.State.UserState.NumTimesPlayed == 5 || Core.State.UserState.NumTimesPlayed == 10) { sb.Append(AskForReview()); } else { sb.Append(intro); } //sb.Append(welcome + intro); // increment play count Core.State.UserState.NumTimesPlayed++; Core.Response.SetSpeech(false, false, sb.ToString()); // set apl response if has display if (Core.Device.HasScreen) { Core.Logger.Write("LaunchRequestHandler.HandleRequest()", "Generating visual response for display interface"); if (Core.Device.IsRound) { Core.Response.AddAplPage("SpotAplMenu", AplTemplate.GetSpotMenu()); // spot device } else { Core.Response.AddAplPage("ShowAplMenu", AplTemplate.GetShowMenu()); // show device } } return(Task.CompletedTask); }); }
public async Task HandleRequest() { await RequestProcessHelper.ProcessRequest("IntentRequestHandler.HandleRequest()", "Intent Request", async() => { // get main intent type string mainIntentType = Core.Request.GetMainIntentType(); // direct intent into a matching handler switch (mainIntentType) { // handle built in intent case AlexaRequestType.BuiltInIntent: BuiltInIntentHandler builtInIntentHandler = new BuiltInIntentHandler(); await builtInIntentHandler.HandleRequest(); break; // handle custom intent case AlexaRequestType.CustomIntent: CustomIntentHandler customIntentHandler = new CustomIntentHandler(); await customIntentHandler.HandleRequest(); break; // handle unknown intent default: bool endSession = Core.State.UserState.NumReprompt > 5 ? true : false; Core.Logger.Write("IntentRequestHandler.HandleRequest()", "Intent was not recognized, directing into the default case handler"); Core.Response.SetSpeech(false, endSession, SpeechTemplate.NoUnderstand); Core.State.UserState.Stage = Stage.Menu; Core.State.UserState.NumReprompt++; if (endSession) { Core.State.UserState.NumReprompt = 0; } break; } }); }
public async Task HandleRequest() { await RequestProcessHelper.ProcessRequest("CustomIntentHandler.HandleRequest()", "Custom Intent", async() => { IntentRequest request = Core.Request.GetRequest().Request as IntentRequest; // get slot resolution string slotValue; ProgressiveResponse progressiveResponse = new ProgressiveResponse(Core.Request.GetRequest()); Slot slot = request.Intent.Slots[AlexaRequestType.CustomSlot]; string rawValue = slot.Value; Core.Logger.Write("CustomIntentHandler.HandleRequest()", $"Sub intent raw slot value: {rawValue}"); try { ResolutionAuthority[] resolution = slot.Resolution.Authorities; ResolutionValueContainer[] container = resolution[0].Values; slotValue = container[0].Value.Name; Core.Logger.Write("CustomIntentHandler.HandleRequest()", $"Sub intent processed slot value: {slotValue}"); } catch { slotValue = rawValue; } // if user says list if (slotValue.Equals("List")) { Core.State.UserState.Stage = Stage.Menu; Core.Response.SetSpeech(false, false, SpeechTemplate.ListItems); } // if user says random or shuffle else if (slotValue.Equals("Random") || (slotValue.Equals("Shuffle"))) { Random mediaRandom = new Random(); List <MediaItem> mediaItems = MediaItems.GetMediaItems(); MediaItem currentMediaItem = mediaItems[mediaRandom.Next(mediaItems.Count)]; // update database context Core.State.UserState.Shuffle = true; Core.State.UserState.OffsetInMS = 0; Core.State.UserState.Index = currentMediaItem.Id; Core.State.UserState.Token = currentMediaItem.FileName; await progressiveResponse.SendSpeech($"Playing a random recording. {currentMediaItem.Title}. To call me back, say cancel. "); if (Core.Device.HasScreen) { Core.State.UserState.Stage = Stage.Video; Core.Response.AddVideoApp(currentMediaItem.VideoSource, currentMediaItem.Title, currentMediaItem.FileName); } else { Core.State.UserState.Stage = Stage.Audio; Core.Response.AddAudioPlayer(PlayBehavior.ReplaceAll, currentMediaItem.AudioSource, currentMediaItem.FileName); } } else { if (slotValue != null && slotValue != string.Empty) { List <MediaItem> mediaItems = MediaItems.GetMediaItems(); List <MediaItem> selectedMedia = ItemSelectHelper.SelectItems(mediaItems, "Title", slotValue); // query possible entries if (selectedMedia.Any()) { if (selectedMedia.Count > 1) { StringBuilder sb = new StringBuilder(); sb.Append($"I found {selectedMedia.Count} recordings matching {slotValue}. "); Core.Logger.Write("CustomIntentHandler.HandleRequest()", $"{selectedMedia.Count} matching items found for {slotValue}"); foreach (MediaItem mediaItem in selectedMedia) { sb.Append(mediaItem.Title + ". "); } sb.Append("Please choose one of the following. "); Core.Response.SetSpeech(false, false, sb.ToString()); } else { MediaItem mediaItem = selectedMedia[0]; Core.Logger.Write("CustomIntentHandler.HandleRequest()", $"Item requested: {mediaItem.Title}"); Core.State.UserState.Index = mediaItem.Id; Core.State.UserState.Token = mediaItem.FileName; // source type will differ based on the display interface string url = Core.Device.HasScreen ? mediaItem.VideoSource : mediaItem.AudioSource; Core.Logger.Write("CustomIntentHandler.HandleRequest()", $"File source url: {url}"); await progressiveResponse.SendSpeech($"Playing {mediaItem.Title}. "); if (Core.Device.HasScreen) { Core.State.UserState.Stage = Stage.Video; Core.Response.AddVideoApp(url, mediaItem.Title, mediaItem.FileName); } else { Core.State.UserState.Stage = Stage.Audio; Core.Response.AddAudioPlayer(PlayBehavior.ReplaceAll, url, Core.State.UserState.Token); } } } else { Core.State.UserState.Stage = Stage.Menu; Core.Response.SetSpeech(false, false, $"I could not find any recording matching {slotValue}. Try saying list, or shuffle. "); } } } }); }
public async Task HandleRequest() { await RequestProcessHelper.ProcessRequest("BuiltInIntentHandler.HandleRequest()", "Built In Intent", async() => { // get the most recent media index & sub intent int currentMediaIndex = Core.State.UserState.Index; string subIntentType = Core.Request.GetSubIntentType(); Core.Logger.Write("BuiltInIntentHandler.HandleRequest()", $"Current media index: {currentMediaIndex}"); Core.Logger.Write("BuiltInIntentHandler.HandleRequest()", $"User is at: {Core.State.UserState.Stage}"); // get media item components List <MediaItem> mediaItems = MediaItems.GetMediaItems(); MediaItem currentMediaItem = mediaItems[currentMediaIndex]; // set response components Random random = new Random(); ProgressiveResponse progressiveResponse = new ProgressiveResponse(Core.Request.GetRequest()); List <string> stopSpeeches = new List <string> { SpeechTemplate.GoodBye, SpeechTemplate.SeeYouSoon, SpeechTemplate.SeeYouNextTime }; // direct sub intent into a matching handler switch (subIntentType) { #warning check if help is accessible from audioplayer and what happens // handle help intent case AlexaRequestType.BuiltInHelp: Core.State.UserState.Stage = Stage.Menu; Core.Response.SetSpeech(false, false, SpeechTemplate.Help); break; // handle stop intent case AlexaRequestType.BuiltInStop: Core.State.UserState.Stage = Stage.Menu;; Core.Response.StopAudioPlayer(); Core.Response.SetSpeech(false, true, stopSpeeches[random.Next(stopSpeeches.Count)]); break; // handle pause intent case AlexaRequestType.BuiltInPause: Core.State.UserState.Stage = Stage.Menu; Core.State.UserState.Token = Core.Request.GetRequest().Context.AudioPlayer.Token; Core.State.UserState.OffsetInMS = Convert.ToInt32(Core.Request.GetRequest().Context.AudioPlayer.OffsetInMilliseconds); Core.Response.StopAudioPlayer(); Core.Response.SetSpeech(false, true, stopSpeeches[random.Next(stopSpeeches.Count)]); break; // handle cancel intent case AlexaRequestType.BuiltInCancel: if (Core.State.UserState.Stage.Equals(Stage.Audio)) { Core.State.UserState.Stage = Stage.Menu; Core.State.UserState.Token = Core.Request.GetRequest().Context.AudioPlayer.Token; Core.State.UserState.OffsetInMS = Convert.ToInt32(Core.Request.GetRequest().Context.AudioPlayer.OffsetInMilliseconds); Core.Response.StopAudioPlayer(); Core.Response.SetSpeech(false, false, SpeechTemplate.Intro); } else { Core.Response.SetSpeech(false, false, SpeechTemplate.NoUnderstand); } break; // handle next intent case AlexaRequestType.BuiltInNext: if (Core.State.UserState.Stage.Equals(Stage.Audio)) { MediaItem nextMediaItem = mediaItems.Find(m => m.Id.Equals(currentMediaIndex + 1)); if (nextMediaItem != null) { await progressiveResponse.SendSpeech($"Playing next recording, {nextMediaItem.Title}. "); Core.Logger.Write("BuiltInIntentHandler.HandleRequest()", $"Next media item name: {nextMediaItem.Title}"); Core.State.UserState.OffsetInMS = 0; Core.State.UserState.Stage = Stage.Audio; Core.State.UserState.Index = nextMediaItem.Id; Core.State.UserState.Token = nextMediaItem.FileName; Core.Response.AddAudioPlayer(PlayBehavior.ReplaceAll, nextMediaItem.AudioSource, nextMediaItem.FileName); } else { Core.Logger.Write("BuiltInIntentHandler.HandleRequest()", "Next media item is not available"); Core.Response.SetSpeech(false, false, SpeechTemplate.NoNext); Core.State.UserState.Stage = Stage.Pause; } } else { Core.Response.SetSpeech(false, false, SpeechTemplate.NoUnderstand); } break; // handle previous intent case AlexaRequestType.BuiltInPrevious: if (Core.State.UserState.Stage.Equals(Stage.Audio)) { MediaItem previousMediaItem = mediaItems.Find(m => m.Id.Equals(currentMediaIndex - 1)); if (previousMediaItem != null) { await progressiveResponse.SendSpeech($"Playing previous recording, {previousMediaItem.Title}. "); Core.Logger.Write("BuiltInIntentHandler.HandleRequest()", $"Previous media item name: {previousMediaItem.Title}"); Core.State.UserState.OffsetInMS = 0; Core.State.UserState.Stage = Stage.Audio; Core.State.UserState.Index = previousMediaItem.Id; Core.State.UserState.Token = previousMediaItem.FileName; Core.Response.AddAudioPlayer(PlayBehavior.ReplaceAll, previousMediaItem.AudioSource, previousMediaItem.FileName); } else { Core.Logger.Write("BuiltInIntentHandler.HandleRequest()", "Previous media item is not available"); Core.Response.SetSpeech(false, false, SpeechTemplate.NoPrevious); Core.State.UserState.Stage = Stage.Pause; } } else { Core.Response.SetSpeech(false, false, SpeechTemplate.NoUnderstand); } break; // handle start over intent case AlexaRequestType.BuiltInStartOver: if (Core.State.UserState.Stage.Equals(Stage.Audio)) { await progressiveResponse.SendSpeech($"Playing from the first recording. {mediaItems[0].Title}. "); Core.State.UserState.OffsetInMS = 0; Core.State.UserState.Stage = Stage.Audio; Core.State.UserState.Token = mediaItems[0].Title; Core.Response.AddAudioPlayer(PlayBehavior.ReplaceAll, mediaItems[0].AudioSource, mediaItems[0].Title, 0); } else { Core.Response.SetSpeech(false, false, SpeechTemplate.NoUnderstand); } break; // handle resume intent case AlexaRequestType.BuiltInResume: if (Core.State.UserState.Stage.Equals(Stage.Pause)) { Core.State.UserState.Stage = Stage.Audio; Core.Response.AddAudioPlayer(PlayBehavior.ReplaceAll, currentMediaItem.AudioSource, Core.State.UserState.Token, Core.State.UserState.OffsetInMS); if (Core.State.UserState.EnqueuedToken != null) { Core.Response.AddDirective(new AudioPlayerPlayDirective() { PlayBehavior = PlayBehavior.Enqueue, AudioItem = new AudioItem() { Stream = new AudioItemStream() { OffsetInMilliseconds = 0, Url = mediaItems[currentMediaIndex + 1].AudioSource, Token = mediaItems[currentMediaIndex + 1].Title, ExpectedPreviousToken = Core.State.UserState.Token } } }); } } else { Core.Response.SetSpeech(false, false, SpeechTemplate.NoUnderstand); } break; // handle repeat intent case AlexaRequestType.BuiltInRepeat: if (Core.State.UserState.Stage.Equals(Stage.Audio)) { await progressiveResponse.SendSpeech($"Repeating {currentMediaItem.Title}"); Core.State.UserState.OffsetInMS = 0; Core.State.UserState.Stage = Stage.Audio; Core.State.UserState.Token = currentMediaItem.Title; Core.Response.AddAudioPlayer(PlayBehavior.ReplaceAll, currentMediaItem.AudioSource, currentMediaItem.Title, 0); } else { Core.Response.SetSpeech(false, false, SpeechTemplate.Intro); } break; // handle unknown intent default: bool endSession = Core.State.UserState.NumReprompt > 5 ? true : false; Core.Logger.Write("BuiltInIntentHandler.HandleRequest()", "Intent was not recognized, directing into the default case handler"); Core.Response.SetSpeech(false, endSession, SpeechTemplate.NoUnderstand); Core.State.UserState.Stage = Stage.Menu; Core.State.UserState.NumReprompt++; if (endSession) { Core.State.UserState.NumReprompt = 0; } break; } }); }
public async Task HandleRequest() { await RequestProcessHelper.ProcessRequest("AudioPlayerRequestHandler.HandleRequest()", "Audio Player Request", async() => { AudioPlayerRequest request = Core.Request.GetRequest().Request as AudioPlayerRequest; List <MediaItem> mediaItems = MediaItems.GetMediaItems(); MediaItem currentMediaItem = mediaItems.Find(m => m.Title.Contains(request.Token)); switch (Core.Request.GetSubRequestType()) { // handle playback started case AlexaRequestType.PlaybackStarted: Core.Response.ClearAudioPlayer(); break; // handle playback stopped case AlexaRequestType.PlaybackStopped: Core.State.UserState.Token = request.Token; Core.State.UserState.EnqueuedToken = request.EnqueuedToken; Core.State.UserState.OffsetInMS = Convert.ToInt32(request.OffsetInMilliseconds); break; // handle playback nearly finished case AlexaRequestType.PlaybackNearlyFinished: if (!request.HasEnqueuedItem) { var currentPlay = request.Token; int index = mediaItems.IndexOf(mediaItems.Where(m => m.Title == request.Token).FirstOrDefault()); if (index == -1) { index++; } if (index == MediaItems.GetMediaItems().Count) { index = 0; } index = index == 0 ? 0 : index + 1; Core.State.UserState.Token = request.Token; Core.State.UserState.EnqueuedToken = mediaItems[index].Title; Core.Response.AddAudioPlayer(PlayBehavior.Enqueue, mediaItems[index].AudioSource, Core.State.UserState.EnqueuedToken, Core.State.UserState.Token, 0); } break; // handle playback finished case AlexaRequestType.PlaybackFinished: if (Core.State.UserState.EnqueuedToken != null) { int index = mediaItems.IndexOf(mediaItems.Where(m => m.Title == Core.State.UserState.EnqueuedToken).FirstOrDefault()); Core.State.UserState.Index = index; Core.State.UserState.Token = mediaItems[index].Title; Core.Response.AddAudioPlayer(PlayBehavior.ReplaceAll, mediaItems[index].AudioSource, Core.State.UserState.Token); } else { Core.Response.ClearAudioPlayer(); } break; // handle playback failed case AlexaRequestType.PlaybackFailed: Core.State.UserState.Index = 0; Core.State.UserState.Stage = Stage.Menu; Core.State.UserState.Token = mediaItems.FirstOrDefault().Title; Core.Response.AddAudioPlayer(PlayBehavior.ReplaceAll, mediaItems.FirstOrDefault().AudioSource, Core.State.UserState.Token); break; // handle unknown request default: break; } }); }