/// <summary>
        /// Responds when we navigate to this page.
        /// </summary>
        /// <param name="e">Event data</param>
        protected override async void OnNavigatedTo(NavigationEventArgs e)
        {
            //get device settings here
            await UpdateDeviceSettings();

            this.vp = await Infrastructure.VoicePackageService.VoicePlayerFactory();

            // VerbaliseSystemInformation
            if (Settings.GetBool(DeviceSettingKeys.VerbaliseSystemInformationOnBootKey))
            {
                LogStatusMessage($"The IP address is: {GetLocalIp()}", StatusSeverity.Info, true);
                LogStatusMessage($"The exhibit is {Settings.GetString(DeviceSettingKeys.DeviceExhibitKey)}", StatusSeverity.Info, true);
                LogStatusMessage($"The device label is {Settings.GetString(DeviceSettingKeys.DeviceLabelKey)}", StatusSeverity.Info, true);
            }

            // Only check microphone enabled and create speech objects if we're running in interactive (QnA) mode
            if (Settings.GetBool(DeviceSettingKeys.InteractiveKey))
            {
                // Prompt for permission to access the microphone. This request will only happen
                // once, it will not re-prompt if the user rejects the permission.
                if (!await AudioCapturePermissions.RequestMicrophonePermission())
                {
                    Say(AppSettings.GetString("MicrophonePrivacyDeclined"));
                }
                else
                {
                    try
                    {
                        Debug.WriteLine($"Initialising speech recognizer"); //This can fail randomly
                        SpeechRecognizer = new SpeechRecognizer();
                        SpeechRecognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromMilliseconds(NumberMilliSecsForSpeechRecognitionTimeout);
                        await SpeechRecognizer.CompileConstraintsAsync();

                        Debug.WriteLine($"Speech recognizer initialised");
                    }
                    catch (Exception exp)
                    {
                        Say($"There was an error initialising the speech recognizer: {exp.Message}");
                    }
                }
            }

            if (faceTracker == null)
            {
                faceTracker = await FaceTracker.CreateAsync();

                ChangeDetectionState(DetectionStates.Startup);
            }
        }
        private async void ProcessCurrentStateAsync(ThreadPoolTimer timer)
        {
            try
            {
                Debug.WriteLine($"State machine is: {CurrentState.State}");

                switch (CurrentState.State)
                {
                case DetectionStates.Idle:
                    break;

                case DetectionStates.Startup:

                    break;

                case DetectionStates.OnBoarding:
                    var result = await ProcessCurrentVideoFrameForQRCodeAsync();

                    //if we now have a GUID, store it and then change the state
                    if (!string.IsNullOrEmpty(result))
                    {
                        //store the device id guid
                        Settings.Set(DeviceSettingKeys.DeviceIdKey, result);
                        LogStatusMessage($"Found a QR code with device id {result}.", StatusSeverity.Info, true);

                        // Get device settings
                        await this.UpdateDeviceSettings();

                        // Update voice package
                        LogStatusMessage("Downloading the voice package.", StatusSeverity.Info, true);
                        await Infrastructure.VoicePackageService.DownloadUnpackVoicePackage(Settings.GetString(DeviceSettingKeys.VoicePackageUrlKey));

                        LogStatusMessage("Got the voice package.", StatusSeverity.Info, true);

                        this.vp = await Infrastructure.VoicePackageService.VoicePlayerFactory(Settings.GetString(DeviceSettingKeys.VoicePackageUrlKey));

                        ChangeDetectionState(DetectionStates.WaitingForFaces);
                    }
                    break;

                case DetectionStates.WaitingForFaces:
                    CurrentState.ApiRequestParameters = await ProcessCurrentVideoFrameAsync();

                    if (CurrentState.ApiRequestParameters != null)
                    {
                        ChangeDetectionState(DetectionStates.FaceDetectedOnDevice);
                    }
                    break;

                case DetectionStates.FaceDetectedOnDevice:

                    //Should we play? MORE DESC REQUIRED
                    if (CurrentState.LastImageApiPush.AddMilliseconds(ApiIntervalMs) < DateTimeOffset.UtcNow &&
                        CurrentState.TimeVideoWasStopped.AddMilliseconds(NumberMillSecsBeforeWePlayAgain) < DateTimeOffset.UtcNow)
                    {
                        //ThreadPoolTimer.CreateTimer(
                        //    new TimerElapsedHandler(HelloAudioHandler),
                        //    TimeSpan.FromMilliseconds(NumberMilliSecsToWaitForHello));
                        if (Settings.GetBool(DeviceSettingKeys.InteractiveKey))
                        {
                            // Check we're not already running a speech recognition
                            if (!IsSpeechRecognitionInProgress)
                            {
                                // Kick off new speech recognizer
                                await SayAsync("I'm listening, talk to me");
                            }
                        }
                        else
                        {
                            HelloAudio();
                        }

                        CurrentState.LastImageApiPush = DateTimeOffset.UtcNow;
                        CurrentState.FacesFoundByApi  = await PostImageToApiAsync(CurrentState.ApiRequestParameters.Image);

                        LogStatusMessage($"Sending faces to api", StatusSeverity.Info, false);

                        ChangeDetectionState(DetectionStates.ApiResponseReceived);
                    }

                    break;

                case DetectionStates.ApiResponseReceived:

                    if (CurrentState.FacesFoundByApi != null && CurrentState.FacesFoundByApi.Any())
                    {
                        LogStatusMessage("Face(s) detected", StatusSeverity.Info, false);
                        ChangeDetectionState(DetectionStates.InterpretingApiResults);
                        CurrentState.FacesStillPresent = true;
                        break;
                    }
                    //ChangeDetectionState(DetectionStates.WaitingForFaces);
                    ChangeDetectionState(DetectionStates.WaitingForFacesToDisappear);
                    break;

                case DetectionStates.InterpretingApiResults:
                    // We have faces and data, so decide what to do here (play a sound etc).
                    // You'd probably kick this off in a background thread and track it by putting a
                    // reference into the CurrentState object (new property).

                    //play media if we are not currently playing
                    CurrentState.FacesStillPresent = true;

                    if (!vp.IsCurrentlyPlaying)
                    {
                        // For time being use the interactive flag to determine whether to play narration
                        // If this is played here it will be detected by the speech recognition
                        if (!Settings.GetBool(DeviceSettingKeys.InteractiveKey))
                        {
                            LogStatusMessage("Starting playlist", StatusSeverity.Info);
                            var play = Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, async() =>
                            {
                                //TODO This needs
                                vp.Play(CurrentState);
                            });
                        }
                    }

                    // Check here if the media has finished playing or the people have walked away.
                    //ChangeDetectionState(DetectionStates.WaitingForFaces);
                    ChangeDetectionState(DetectionStates.WaitingForFacesToDisappear);

                    break;

                //Some faces are on the device and the api has been called, and maybe the audio
                //  is now playing
                case DetectionStates.WaitingForFacesToDisappear:

                    CurrentState.FacesStillPresent = await AreFacesStillPresent();

                    LogStatusMessage($"Faces present: {CurrentState.FacesStillPresent} Speech Recognition active: {IsSpeechRecognitionInProgress}", StatusSeverity.Info, false);

                    //we dont have a face
                    if (!CurrentState.FacesStillPresent)
                    {
                        //TODO Refactor this out.
                        await Task.Delay(NumberMilliSecsForFacesToDisappear)
                        .ContinueWith((t =>
                        {
                            CurrentState.FacesStillPresent = AreFacesStillPresent().Result;
                            if (!CurrentState.FacesStillPresent)
                            {
                                LogStatusMessage($"Faces have gone for a few or more secs, stop the audio playback", StatusSeverity.Info, false);
                                ChangeDetectionState(DetectionStates.WaitingForFaces);
                                vp.Stop();
                                CurrentState.TimeVideoWasStopped = DateTimeOffset.UtcNow;
                                return;
                            }
                        }
                                       ));
                    }
                    break;

                default:
                    ChangeDetectionState(DetectionStates.Idle);
                    break;
                }
            }
            catch (Exception ex)
            {
                LogStatusMessage("Unable to process current frame. " + ex.ToString(), StatusSeverity.Error, false);
            }
            finally
            {
                // Check we're not already running a speech recognition
                if (!IsSpeechRecognitionInProgress)
                {
                    RunTimer();
                }
            }
        }