Speech Response to an audio file
Exemplo n.º 1
0
        public static string Process(byte[] audioRequest)
        {
            byte[] buffer = Deserialize(audioRequest);

            using (MemoryStream msg = new MemoryStream())
            {
                buffer = AddWavHeader(buffer);
                string text = GSpeechToText(buffer);

                if (text.Length > 0)
                {
                    string[] requestPosibilities = text.Split('\n');
                    if (requestPosibilities.Length > 0)
                    {
                        string         RequestJson = requestPosibilities[1];
                        SpeechResponse m           = JsonConvert.DeserializeObject <SpeechResponse>(RequestJson);
                        if (m != null)
                        {
                            return(m.Result.First().Alternative.First().Transcript);
                        }
                    }
                }
            }
            return("");
        }
Exemplo n.º 2
0
        private async void Transcript(object parameter)
        {
            try
            {
                IsEditableEnabled = false;
                TranscriptMessage = String.Empty;
                ErrorMessage      = String.Empty;
                SpeechResponse response = null;

                if (File != null)
                {
                    response = await _speechService.Send(File);
                }

                TranscriptMessage = response != null ? response.TranscriptText : String.Empty;
            }
            catch (Exception ex)
            {
                HandleException(ex);
            }
            finally
            {
                IsEditableEnabled = true;
            }
        }
Exemplo n.º 3
0
    /// <summary>
    /// Displays the result onto the page
    /// </summary>
    /// <param name="speechResponse">SpeechResponse received from api</param>
    private void DisplayResult(SpeechResponse speechResponse)
    {
        statusPanel.Visible = true;
        lblStatus.Text      = speechResponse.Recognition.Status;
        lblResponseId.Text  = speechResponse.Recognition.Responseid;
        foreach (NBest nbest in speechResponse.Recognition.NBest)
        {
            lblHypothesis.Text = nbest.Hypothesis;
            lblLanguageId.Text = nbest.LanguageId;
            lblResultText.Text = nbest.ResultText;
            lblGrade.Text      = nbest.Grade;
            lblConfidence.Text = nbest.Confidence.ToString();
            string words = "[ ";
            if (null != nbest.Words)
            {
                foreach (string word in nbest.Words)
                {
                    words += "\"" + word + "\", ";
                }
                words = words.Substring(0, words.LastIndexOf(","));
                words = words + " ]";
            }

            lblWords.Text = nbest.Words != null ? words : string.Empty;

            if (null != nbest.WordScores)
            {
                lblWordScores.Text = "[ " + string.Join(", ", nbest.WordScores.ToArray()) + " ]";
            }
        }
    }
Exemplo n.º 4
0
        private static SpeechResponse IgnoreResponse(ProcedureBase procedure)
        {
            var resp = new SpeechResponse {
                res = "ignored"
            };

            if (procedure != null)
            {
                resp.procedure = procedure.GetType().Name;
            }
            return(resp);
        }
Exemplo n.º 5
0
    /// <summary>
    /// Method that calls SpeechToText Custom method of RequestFactory when user clicked on submit button
    /// </summary>
    /// <param name="sender">sender that invoked this event</param>
    /// <param name="e">event args of the button</param>
    protected void SpeechToTextButton_Click(object sender, EventArgs e)
    {
        try
        {
            string fileToConvert = this.SpeechFilesDir + "/" + ddlAudioFile.SelectedValue;

            XSpeechCustomContext speechContext = XSpeechCustomContext.GenericHints;
            string contentLanguage             = string.Empty;

            switch (ddlSpeechContext.SelectedValue)
            {
            case "GenericHints":
                speechContext = XSpeechCustomContext.GenericHints;
                break;

            case "GrammarList":
                speechContext = XSpeechCustomContext.GrammarList;
                break;
            }

            string dictionaryFile = Request.MapPath(this.dictionaryFilePath);
            string grammarFile    = Request.MapPath(this.grammarFilePath);

            SpeechResponse response = this.requestFactory.SpeechToTextCustom(fileToConvert, dictionaryFile, grammarFile, speechContext);

            if (null != response)
            {
                resultsPanel.Visible = true;
                this.DrawPanelForSuccess(statusPanel, "Response Parameters listed below");
                this.DisplayResult(response);
            }
        }
        catch (InvalidScopeException invalidscope)
        {
            this.DrawPanelForFailure(statusPanel, invalidscope.Message);
        }
        catch (ArgumentException argex)
        {
            this.DrawPanelForFailure(statusPanel, argex.Message);
        }
        catch (InvalidResponseException ie)
        {
            this.DrawPanelForFailure(statusPanel, ie.Body);
        }
        catch (Exception ex)
        {
            this.DrawPanelForFailure(statusPanel, ex.Message);
        }
    }
Exemplo n.º 6
0
        private async void btnRecord_Click(object sender, RoutedEventArgs e)
        {
            if (!_isRecording)
            {
                _isRecording      = true;
                btnRecord.Content = "Go";
                _soundRecorder.StartRecording();
            }
            else
            {
                btnSendSMS.IsEnabled      = false;
                statusProgress.Visibility = Visibility.Visible;
                _isRecording               = false;
                btnRecord.Content          = "Converting...";
                btnRecord.IsEnabled        = false;
                txtSpeechOutput.IsReadOnly = true;
                _soundRecorder.StopRecording();

                //clientId = "your client id here";
                //clientSecret = "your client secret";
                try
                {
                    ContentInfo speechContentInfo = new ContentInfo();
                    speechContentInfo.Content = await _soundRecorder.GetBytes();

                    speechContentInfo.Name = _soundRecorder.FilePath;

                    SpeechService  speechService  = new SpeechService(new SDK.Entities.AttServiceSettings(clientId, clientSecret, new Uri(uriString)));
                    SpeechResponse speechResponse = await speechService.SpeechToText(speechContentInfo);

                    txtSpeechOutput.IsReadOnly = false;
                    if (null != speechResponse)
                    {
                        txtSpeechOutput.Text = speechResponse.GetTranscription();
                    }
                }
                catch (Exception ex)
                {
                    MessageBox.Show(ex.Message);
                }

                btnSendSMS.IsEnabled      = true;
                statusProgress.Visibility = Visibility.Collapsed;
                btnRecord.Content         = "Speak";
                btnRecord.IsEnabled       = true;
            }
        }
Exemplo n.º 7
0
        /// <summary>
        /// The main conversion API. Calling this function sends an audio file to AT&amp;T service for translation to text.
        /// </summary>
        /// <param name="attachment">Audio filename as it is stored on disk.</param>
        /// <param name="speechContext">Speech content.</param>
        /// <returns>Instance of <see cref="SpeechResponse"/> with sent speech response information.</returns>
        /// <exception cref="System.ArgumentNullException">Throws an exception when attachment is null.</exception>
        public async Task <SpeechResponse> SpeechToText(StorageFile attachment, XSpeechContext speechContext = XSpeechContext.Generic)
        {
            Argument.ExpectNotNull(() => attachment);

            byte[] audioFileBytes = await BinaryFileUtils.ReadAllBytes(attachment);

            var restEndPoint = new Uri(Settings.EndPoint, SendRelativeUrl);

            var    content     = new ByteArrayContent(audioFileBytes);
            string contentType = ContentTypeMapping.MapContentTypeFromExtension(attachment.FileType);

            content.Headers.ContentType = new MediaTypeHeaderValue(contentType);
            content.Headers.Add("X-SpeechContext", Enum.GetName(typeof(XSpeechContext), speechContext));

            string strResponse = await SendContentRequest(HttpMethod.Post, restEndPoint.ToString(), content);

            return(SpeechResponse.Parse(strResponse));
        }
Exemplo n.º 8
0
        private async void btnRecord_Click(object sender, RoutedEventArgs e)
        {
            if (!_isRecording)
            {
                _isRecording      = true;
                btnRecord.Content = "Stop";
                _soundRecorder.StartRecording();
            }
            else
            {
                statusProgress.Visibility = Visibility.Visible;
                _isRecording        = false;
                btnRecord.Content   = "Converting...";
                btnRecord.IsEnabled = false;
                _soundRecorder.StopRecording();

                clientId     = "your_att_app_key";
                clientSecret = "your_att_secret_key";
                uriString    = "https://api.att.com";
                try
                {
                    ContentInfo speechContentInfo = new ContentInfo();
                    speechContentInfo.Content = await _soundRecorder.GetBytes();

                    speechContentInfo.Name = _soundRecorder.FilePath;

                    SpeechService  speechService  = new SpeechService(new SDK.Entities.AttServiceSettings(clientId, clientSecret, new Uri(uriString)));
                    SpeechResponse speechResponse = await speechService.SpeechToText(speechContentInfo);

                    if (null != speechResponse)
                    {
                        txtSpeechOutput.Text = speechResponse.GetTranscription();
                    }
                }
                catch (Exception ex)
                {
                    txtSpeechOutput.Text = ex.Message;
                }

                statusProgress.Visibility = Visibility.Collapsed;
                btnRecord.Content         = "Start";
                btnRecord.IsEnabled       = true;
            }
        }
        /// <summary>
        /// Sends audio file to Speech service API and retrieves the recognition of call.
        /// </summary>
        /// <param name="attachment">Audio file.</param>
        /// <returns>Recognition of call.</returns>
        public async Task <SpeechResponse> Send(StorageFile attachment)
        {
            Argument.ExpectNotNull(() => attachment);

            var recognition = new StringBuilder();

            WinRTSDK.Entities.SpeechResponse taskResp = await _service.SpeechToText(attachment);

            foreach (NBest nBest in taskResp.Recognition.NBest)
            {
                foreach (var text in nBest.ResultText)
                {
                    recognition.Append(text);
                }
            }

            var response = new SpeechResponse(recognition.ToString());

            return(response);
        }
Exemplo n.º 10
0
    /// <summary>
    /// Displays the result onto the page
    /// </summary>
    /// <param name="speechResponse">SpeechResponse received from api</param>
    private void DisplayResult(SpeechResponse speechResponse)
    {
        lblResponseId.Text = speechResponse.Recognition.ResponseId;
        lblStatus.Text     = speechResponse.Recognition.Status;
        if ((speechResponse.Recognition.NBest != null) && (speechResponse.Recognition.NBest.Count > 0))
        {
            foreach (NBest nbest in speechResponse.Recognition.NBest)
            {
                lblHypothesis.Text = nbest.Hypothesis;
                lblLanguageId.Text = nbest.LanguageId;
                lblResultText.Text = nbest.ResultText;
                lblGrade.Text      = nbest.Grade;
                lblConfidence.Text = nbest.Confidence.ToString();

                string strText = "[";
                foreach (string word in nbest.Words)
                {
                    strText += "\"" + word + "\", ";
                }
                strText = strText.Substring(0, strText.LastIndexOf(","));
                strText = strText + "]";

                lblWords.Text = nbest.Words != null ? strText : string.Empty;

                lblWordScores.Text = "[" + string.Join(", ", nbest.WordScores.ToArray()) + "]";
            }
        }
        else
        {
            hypoRow.Visible       = false;
            langRow.Visible       = false;
            confRow.Visible       = false;
            gradeRow.Visible      = false;
            resultRow.Visible     = false;
            wordsRow.Visible      = false;
            wordScoresRow.Visible = false;
        }
    }
Exemplo n.º 11
0
    /// <summary>
    /// Displays the result onto the page
    /// </summary>
    /// <param name="speechResponse">SpeechResponse received from api</param>
    private void DisplayResult(SpeechResponse speechResponse)
    {
        lblResponseId.Text = speechResponse.Recognition.ResponseId;
        foreach (NBest nbest in speechResponse.Recognition.NBest)
        {
            lblHypothesis.Text = nbest.Hypothesis;
            lblLanguageId.Text = nbest.LanguageId;
            lblResultText.Text = nbest.ResultText;
            lblGrade.Text      = nbest.Grade;
            lblConfidence.Text = nbest.Confidence.ToString();

            string strText = "[";
            foreach (string word in nbest.Words)
            {
                strText += "\"" + word + "\", ";
            }
            strText = strText.Substring(0, strText.LastIndexOf(","));
            strText = strText + "]";

            lblWords.Text = nbest.Words != null ? strText : string.Empty;

            lblWordScores.Text = "[" + string.Join(", ", nbest.WordScores.ToArray()) + "]";
        }
    }
Exemplo n.º 12
0
        private static SpeechResponse HandleSpeechResult(SpeechMessageSchema data,
                                                         IWebSocketConnection clientConn)
        {
            if (_handler != null)
            {
                _handler.Update(data.results, data.result_index);
            }
            else
            {
                _handler = new MessageHandler(data.results, data.result_index);
            }

            bool          executed  = _handler.Execute(clientConn);
            ProcedureBase procedure = _handler.GetProcedure();

            if (!executed)
            {
                var resp = new SpeechResponse {
                    res = "ignored"
                };
                if (procedure != null)
                {
                    resp.procedure = procedure.GetType().Name;
                }
                return(resp);
            }

            if (procedure == null)
            {
                _handler = null;
            }
            Console.WriteLine("Command executed, sending response to client");
            return(new SpeechResponse {
                res = "executed", index = data.result_index
            });
        }
Exemplo n.º 13
0
    /// <summary>
    /// Displays the result onto the page
    /// </summary>
    /// <param name="speechResponse">SpeechResponse received from api</param>
    private void DisplayResult(SpeechResponse speechResponse)
    {
        lblResponseId.Text = speechResponse.Recognition.ResponseId;
        foreach (NBest nbest in speechResponse.Recognition.NBest)
        {
            lblHypothesis.Text = nbest.Hypothesis;
            lblLanguageId.Text = nbest.LanguageId;
            lblResultText.Text = nbest.ResultText;
            lblGrade.Text = nbest.Grade;
            lblConfidence.Text = nbest.Confidence.ToString();

            string strText = "[";
            foreach (string word in nbest.Words)
            {
                strText += "\"" + word + "\", ";
            }
            strText = strText.Substring(0, strText.LastIndexOf(","));
            strText = strText + "]";

            lblWords.Text = nbest.Words != null ? strText : string.Empty;

            lblWordScores.Text = "[" + string.Join(", ", nbest.WordScores.ToArray()) + "]";
        }
    }
Exemplo n.º 14
0
    /// <summary>
    /// This function invokes api SpeechToText to convert the given wav amr file and displays the result.
    /// </summary>
    private void ConvertToSpeech()
    {
        Stream     postStream      = null;
        FileStream audioFileStream = null;

        try
        {
            string mmsFilePath = this.fileToConvert;
            audioFileStream = new FileStream(mmsFilePath, FileMode.Open, FileAccess.Read);
            BinaryReader reader     = new BinaryReader(audioFileStream);
            byte[]       binaryData = reader.ReadBytes((int)audioFileStream.Length);
            reader.Close();
            audioFileStream.Close();
            if (null != binaryData)
            {
                HttpWebRequest httpRequest = (HttpWebRequest)WebRequest.Create(string.Empty + this.fqdn + "/rest/1/SpeechToText");
                httpRequest.Headers.Add("Authorization", "Bearer " + this.accessToken);
                httpRequest.Headers.Add("X-SpeechContext", "Generic");

                string contentType = this.MapContentTypeFromExtension(Path.GetExtension(mmsFilePath));
                httpRequest.ContentLength = binaryData.Length;
                httpRequest.ContentType   = contentType;
                httpRequest.Accept        = "application/json";
                httpRequest.Method        = "POST";
                httpRequest.KeepAlive     = true;

                postStream = httpRequest.GetRequestStream();
                postStream.Write(binaryData, 0, binaryData.Length);
                postStream.Close();

                HttpWebResponse speechResponse = (HttpWebResponse)httpRequest.GetResponse();
                using (StreamReader streamReader = new StreamReader(speechResponse.GetResponseStream()))
                {
                    string speechResponseData = streamReader.ReadToEnd();
                    if (!string.IsNullOrEmpty(speechResponseData))
                    {
                        JavaScriptSerializer deserializeJsonObject = new JavaScriptSerializer();
                        SpeechResponse       deserializedJsonObj   = (SpeechResponse)deserializeJsonObject.Deserialize(speechResponseData, typeof(SpeechResponse));
                        if (null != deserializedJsonObj)
                        {
                            resultsPanel.Visible = true;
                            this.DrawPanelForSuccess(statusPanel, "Response Parameters listed below");
                            this.DisplayResult(deserializedJsonObj);
                        }
                        else
                        {
                            this.DrawPanelForFailure(statusPanel, "Empty speech to text response");
                        }
                    }
                    else
                    {
                        this.DrawPanelForFailure(statusPanel, "Empty speech to text response");
                    }

                    streamReader.Close();
                }
            }
            else
            {
                this.DrawPanelForFailure(statusPanel, "Empty speech to text response");
            }
        }
        catch (WebException we)
        {
            string errorResponse = string.Empty;

            try
            {
                using (StreamReader sr2 = new StreamReader(we.Response.GetResponseStream()))
                {
                    errorResponse = sr2.ReadToEnd();
                    sr2.Close();
                }
            }
            catch
            {
                errorResponse = "Unable to get response";
            }

            this.DrawPanelForFailure(statusPanel, errorResponse + Environment.NewLine + we.ToString());
        }
        catch (Exception ex)
        {
            this.DrawPanelForFailure(statusPanel, ex.ToString());
        }
        finally
        {
            if ((this.deleteFile == true) && (File.Exists(this.fileToConvert)))
            {
                File.Delete(this.fileToConvert);
                this.deleteFile = false;
            }
            if (null != postStream)
            {
                postStream.Close();
            }
        }
    }
    /// <summary>
    /// This function invokes api SpeechToText to convert the given wav amr file and displays the result.
    /// </summary>
    private void ConvertToSpeech(string parEndPoint, string parAccessToken, string parXspeechContext, string parXArgs, string parSpeechFilePath, bool parChunked)
    {
        Stream postStream = null;
        FileStream audioFileStream = null;
        try
        {
            audioFileStream = new FileStream(parSpeechFilePath, FileMode.Open, FileAccess.Read);
            BinaryReader reader = new BinaryReader(audioFileStream);
            byte[] binaryData = reader.ReadBytes((int)audioFileStream.Length);
            reader.Close();
            audioFileStream.Close();
            if (null != binaryData)
            {
                HttpWebRequest httpRequest = (HttpWebRequest)WebRequest.Create(string.Empty + parEndPoint );
                httpRequest.Headers.Add("Authorization", "Bearer " + parAccessToken);
                httpRequest.Headers.Add("X-SpeechContext", parXspeechContext);
                if (!string.IsNullOrEmpty(parXArgs))
                {
                    httpRequest.Headers.Add("X-Arg", parXArgs);
                }
                string contentType = this.MapContentTypeFromExtension(Path.GetExtension(parSpeechFilePath));
                httpRequest.ContentLength = binaryData.Length;
                httpRequest.ContentType = contentType;
                httpRequest.Accept = "application/json";
                httpRequest.Method = "POST";
                httpRequest.KeepAlive = true;
                httpRequest.SendChunked = parChunked;
                postStream = httpRequest.GetRequestStream();
                postStream.Write(binaryData, 0, binaryData.Length);
                postStream.Close();

                HttpWebResponse speechResponse = (HttpWebResponse)httpRequest.GetResponse();
                using (StreamReader streamReader = new StreamReader(speechResponse.GetResponseStream()))
                {
                    string speechRequestResponse = streamReader.ReadToEnd();
                    /*if (string.Compare(SpeechContext.SelectedValue, "TV") == 0)
                    {
                        speechErrorMessage = speechRequestResponse;
                        streamReader.Close();
                        return;
                    }*/
                    if (!string.IsNullOrEmpty(speechRequestResponse))
                    {
                        JavaScriptSerializer deserializeJsonObject = new JavaScriptSerializer();
                        SpeechResponse deserializedJsonObj = (SpeechResponse)deserializeJsonObject.Deserialize(speechRequestResponse, typeof(SpeechResponse));
                        if (null != deserializedJsonObj)
                        {
                            speechResponseData = new SpeechResponse();
                            speechResponseData = deserializedJsonObj;
                            speechSuccessMessage = "true";
                            //speechErrorMessage = speechRequestResponse;
                        }
                        else
                        {
                            speechErrorMessage = "Empty speech to text response";
                        }
                    }
                    else
                    {
                        speechErrorMessage = "Empty speech to text response";
                    }

                    streamReader.Close();
                }
            }
            else
            {
                speechErrorMessage = "Empty speech to text response";
            }
        }
        catch (WebException we)
        {
            string errorResponse = string.Empty;

            try
            {
                using (StreamReader sr2 = new StreamReader(we.Response.GetResponseStream()))
                {
                    errorResponse = sr2.ReadToEnd();
                    sr2.Close();
                }
            }
            catch
            {
                errorResponse = "Unable to get response";
            }

            speechErrorMessage = errorResponse;
        }
        catch (Exception ex)
        {
            speechErrorMessage = ex.ToString();
        }
        finally
        {
            if (null != postStream)
            {
                postStream.Close();
            }
        }
    }
    /// <summary>
    /// Displays the result onto the page
    /// </summary>
    /// <param name="speechResponse">SpeechResponse received from api</param>
    private void DisplayResult(SpeechResponse speechResponse)
    {
        statusPanel.Visible = true;
        lblStatus.Text = speechResponse.Recognition.Status;
        lblResponseId.Text = speechResponse.Recognition.Responseid;
        foreach (NBest nbest in speechResponse.Recognition.NBest)
        {
            lblHypothesis.Text = nbest.Hypothesis;
            lblLanguageId.Text = nbest.LanguageId;
            lblResultText.Text = nbest.ResultText;
            lblGrade.Text = nbest.Grade;
            lblConfidence.Text = nbest.Confidence.ToString();
            string words = "[ ";
            if (null != nbest.Words)
            {
                foreach (string word in nbest.Words)
                {
                    words += "\"" + word + "\", ";
                }
                words = words.Substring(0, words.LastIndexOf(","));
                words = words + " ]";
            }

            lblWords.Text = nbest.Words != null ? words : string.Empty;

            if (null != nbest.WordScores)
            {
                lblWordScores.Text = "[ " + string.Join(", ", nbest.WordScores.ToArray()) + " ]";
            }
        }

        if (ddlSpeechContext.SelectedValue != "TV")
        {
            tvContextPanel.Visible = false;
            tvContextProgramPanel.Visible = false;
            tvContextShowtimePanel.Visible = false;
        }

        if (ddlSpeechContext.SelectedValue == "TV")
        {
            tvContextPanel.Visible = true;
            if (null != speechResponse.Recognition.Info)
            {
                lblInfoActionType.Text = speechResponse.Recognition.Info.ActionType;
                this.lblRecognized.Text = speechResponse.Recognition.Info.Recognized;
            }

            if (null != speechResponse.Recognition.Info.Interpretation)
            {
                lblInterpretation_genre_id.Text = speechResponse.Recognition.Info.Interpretation.Genre_id;
                lblInterpretation_genre_words.Text = speechResponse.Recognition.Info.Interpretation.Genre_words;
            }

            if (null != speechResponse.Recognition.Info.Metrics)
            {
                lblMetrics_audioBytes.Text = speechResponse.Recognition.Info.Metrics.AudioBytes.ToString();
                this.lblMetrics_audioTime.Text = speechResponse.Recognition.Info.Metrics.AudioTime.ToString();
            }

            List<Program> programs = null;

            if (null != speechResponse.Recognition.Info.Search && null != speechResponse.Recognition.Info.Search.Meta)
            {
                this.lblDescription.Text = speechResponse.Recognition.Info.Search.Meta.Description;

                if (null != speechResponse.Recognition.Info.Search.Meta.GuideDateStart)
                    this.lblGuideDateStart.Text = speechResponse.Recognition.Info.Search.Meta.GuideDateStart.ToString();

                if (null != speechResponse.Recognition.Info.Search.Meta.GuideDateEnd)
                    this.lblGuideDateEnd.Text = speechResponse.Recognition.Info.Search.Meta.GuideDateEnd.ToString();

                this.lblLineup.Text = speechResponse.Recognition.Info.Search.Meta.Lineup;
                this.lblMarket.Text = speechResponse.Recognition.Info.Search.Meta.Market;
                this.lblResultCount.Text = speechResponse.Recognition.Info.Search.Meta.ResultCount.ToString();

                programs = speechResponse.Recognition.Info.Search.Programs;

                if (null != programs)
                {
                    this.DisplayProgramDetails(programs);
                }
            }

            List<Showtime> showtimes = null;

            if (null != speechResponse.Recognition.Info.Search)
            {
                showtimes = speechResponse.Recognition.Info.Search.Showtimes;

                if (null != showtimes)
                {
                    this.DisplayShowTimeDetails(showtimes);
                }
            }
        }
    }
Exemplo n.º 17
0
    /// <summary>
    /// Method that calls SpeechToText method of RequestFactory when user clicked on submit button
    /// </summary>
    /// <param name="sender">sender that invoked this event</param>
    /// <param name="e">eventargs of the button</param>
    protected void SpeechToTextButton_Click(object sender, EventArgs e)
    {
        try
        {
            resultsPanel.Visible = false;
            this.Initialize();
            if (string.IsNullOrEmpty(fileUpload1.FileName))
            {
                if (!string.IsNullOrEmpty(ConfigurationManager.AppSettings["DefaultFile"]))
                {
                    this.fileToConvert = Request.MapPath(ConfigurationManager.AppSettings["DefaultFile"]);
                }
                else
                {
                    this.DrawPanelForFailure(statusPanel, "No file selected, and default file is not defined in web.config");
                    return;
                }
            }
            else
            {
                string fileName = fileUpload1.FileName;
                if (fileName.CompareTo("default.wav") == 0)
                {
                    fileName = "1" + fileUpload1.FileName;
                }
                fileUpload1.PostedFile.SaveAs(Request.MapPath("") + "/" + fileName);
                this.fileToConvert = Request.MapPath("").ToString() + "/" + fileName;
                this.deleteFile    = true;
            }

            SpeechResponse response = this.requestFactory.SpeechToText(this.fileToConvert, ddlSpeechContext.SelectedValue, this.xArgData);
            if (null != response)
            {
                this.DrawPanelForSuccess(statusPanel, "Response Parameters listed below");
                resultsPanel.Visible = true;
                this.DisplayResult(response);
            }
        }
        catch (InvalidScopeException invalidscope)
        {
            this.DrawPanelForFailure(statusPanel, invalidscope.Message);
        }
        catch (ArgumentException argex)
        {
            this.DrawPanelForFailure(statusPanel, argex.Message);
        }
        catch (InvalidResponseException ie)
        {
            this.DrawPanelForFailure(statusPanel, ie.Body);
        }
        catch (Exception ex)
        {
            this.DrawPanelForFailure(statusPanel, ex.Message);
        }
        finally
        {
            if ((this.deleteFile == true) && (File.Exists(this.fileToConvert)))
            {
                File.Delete(this.fileToConvert);
                this.deleteFile = false;
            }
        }
    }
    /// <summary>
    /// Displays the result onto the page
    /// </summary>
    /// <param name="speechResponse">SpeechResponse received from api</param>
    private void DisplayResult(SpeechResponse speechResponse)
    {
        lblResponseId.Text = speechResponse.Recognition.ResponseId;
        lblStatus.Text = speechResponse.Recognition.Status;
        if ((speechResponse.Recognition.NBest != null) && (speechResponse.Recognition.NBest.Count > 0))
        {
            foreach (NBest nbest in speechResponse.Recognition.NBest)
            {
                lblHypothesis.Text = nbest.Hypothesis;
                lblLanguageId.Text = nbest.LanguageId;
                lblResultText.Text = nbest.ResultText;
                lblGrade.Text = nbest.Grade;
                lblConfidence.Text = nbest.Confidence.ToString();

                string strText = "[";
                foreach (string word in nbest.Words)
                {
                    strText += "\"" + word + "\", ";
                }
                strText = strText.Substring(0, strText.LastIndexOf(","));
                strText = strText + "]";

                lblWords.Text = nbest.Words != null ? strText : string.Empty;

                lblWordScores.Text = "[" + string.Join(", ", nbest.WordScores.ToArray()) + "]";
            }
        }
        else
        {
            hypoRow.Visible = false;
            langRow.Visible = false;
            confRow.Visible = false;
            gradeRow.Visible = false;
            resultRow.Visible = false;
            wordsRow.Visible = false;
            wordScoresRow.Visible = false;
        }
    }
Exemplo n.º 19
0
    /// <summary>
    /// Method that calls SpeechToText method of RequestFactory when user clicked on submit button
    /// </summary>
    /// <param name="sender">sender that invoked this event</param>
    /// <param name="e">eventargs of the button</param>
    protected void SpeechToTextButton_Click(object sender, EventArgs e)
    {
        try
        {
            resultsPanel.Visible = false;
            this.Initialize();
            if (string.IsNullOrEmpty(fileUpload1.FileName))
            {
                if (!string.IsNullOrEmpty(ConfigurationManager.AppSettings["DefaultFile"]))
                {
                    this.fileToConvert = Request.MapPath(ConfigurationManager.AppSettings["DefaultFile"]);
                }
                else
                {
                    this.DrawPanelForFailure(statusPanel, "No file selected, and default file is not defined in web.config");
                    return;
                }
            }
            else
            {
                string fileName = fileUpload1.FileName;
                if (fileName.CompareTo("default.wav") == 0)
                {
                    fileName = "1" + fileUpload1.FileName;
                }
                fileUpload1.PostedFile.SaveAs(Request.MapPath("") + "/" + fileName);
                this.fileToConvert = Request.MapPath("").ToString() + "/" + fileName;
                this.deleteFile    = true;
            }

            XSpeechContext speechContext   = XSpeechContext.Generic;
            string         contentLanguage = string.Empty;
            this.xArgData = this.commonXArg;
            switch (ddlSpeechContext.SelectedValue)
            {
            case "Generic": speechContext = XSpeechContext.Generic; contentLanguage = ddlContentLang.SelectedValue; break;

            case "BusinessSearch": speechContext = XSpeechContext.BusinessSearch; break;

            case "TV": speechContext = XSpeechContext.TV; this.xArgData = this.xArgTVContext; break;

            case "Gaming": speechContext = XSpeechContext.Gaming; break;

            case "SocialMedia": speechContext = XSpeechContext.SocialMedia; this.xArgData = this.xArgSocialMediaContext; break;

            case "WebSearch": speechContext = XSpeechContext.WebSearch; break;

            case "SMS": speechContext = XSpeechContext.SMS; break;

            case "VoiceMail": speechContext = XSpeechContext.VoiceMail; break;

            case "QuestionAndAnswer": speechContext = XSpeechContext.QuestionAndAnswer; break;
            }

            string subContext = txtSubContext.Text;
            if (subContext.ToLower().Contains("example"))
            {
                subContext = string.Empty;
            }

            SpeechResponse response = this.requestFactory.SpeechToText(fileToConvert, speechContext, this.xArgData, contentLanguage, subContext, ddlAudioContentType.SelectedValue);

            if (null != response)
            {
                resultsPanel.Visible = true;
                this.DrawPanelForSuccess(statusPanel, "Response Parameters listed below");
                this.DisplayResult(response);
            }
        }
        catch (InvalidScopeException invalidscope)
        {
            this.DrawPanelForFailure(statusPanel, invalidscope.Message);
        }
        catch (ArgumentException argex)
        {
            this.DrawPanelForFailure(statusPanel, argex.Message);
        }
        catch (InvalidResponseException ie)
        {
            this.DrawPanelForFailure(statusPanel, ie.Body);
        }
        catch (Exception ex)
        {
            this.DrawPanelForFailure(statusPanel, ex.Message);
        }
        finally
        {
            if ((this.deleteFile == true) && (File.Exists(this.fileToConvert)))
            {
                File.Delete(this.fileToConvert);
                this.deleteFile = false;
            }
        }
    }
Exemplo n.º 20
0
    /// <summary>
    /// Displays the result onto the page
    /// </summary>
    /// <param name="speechResponse">SpeechResponse received from api</param>
    private void DisplayResult(SpeechResponse speechResponse)
    {
        statusPanel.Visible = true;
        lblStatus.Text      = speechResponse.Recognition.Status;
        lblResponseId.Text  = speechResponse.Recognition.Responseid;
        foreach (NBest nbest in speechResponse.Recognition.NBest)
        {
            lblHypothesis.Text = nbest.Hypothesis;
            lblLanguageId.Text = nbest.LanguageId;
            lblResultText.Text = nbest.ResultText;
            lblGrade.Text      = nbest.Grade;
            lblConfidence.Text = nbest.Confidence.ToString();
            string words = "[ ";
            if (null != nbest.Words)
            {
                foreach (string word in nbest.Words)
                {
                    words += "\"" + word + "\", ";
                }
                words = words.Substring(0, words.LastIndexOf(","));
                words = words + " ]";
            }

            lblWords.Text = nbest.Words != null ? words : string.Empty;

            if (null != nbest.WordScores)
            {
                lblWordScores.Text = "[ " + string.Join(", ", nbest.WordScores.ToArray()) + " ]";
            }
        }

        if (ddlSpeechContext.SelectedValue != "TV")
        {
            tvContextPanel.Visible         = false;
            tvContextProgramPanel.Visible  = false;
            tvContextShowtimePanel.Visible = false;
        }

        if (ddlSpeechContext.SelectedValue == "TV")
        {
            tvContextPanel.Visible = true;
            if (null != speechResponse.Recognition.Info)
            {
                lblInfoActionType.Text  = speechResponse.Recognition.Info.ActionType;
                this.lblRecognized.Text = speechResponse.Recognition.Info.Recognized;
            }

            if (null != speechResponse.Recognition.Info.Interpretation)
            {
                lblInterpretation_genre_id.Text    = speechResponse.Recognition.Info.Interpretation.Genre_id;
                lblInterpretation_genre_words.Text = speechResponse.Recognition.Info.Interpretation.Genre_words;
            }

            if (null != speechResponse.Recognition.Info.Metrics)
            {
                lblMetrics_audioBytes.Text     = speechResponse.Recognition.Info.Metrics.AudioBytes.ToString();
                this.lblMetrics_audioTime.Text = speechResponse.Recognition.Info.Metrics.AudioTime.ToString();
            }

            List <Program> programs = null;

            if (null != speechResponse.Recognition.Info.Search && null != speechResponse.Recognition.Info.Search.Meta)
            {
                this.lblDescription.Text = speechResponse.Recognition.Info.Search.Meta.Description;

                if (null != speechResponse.Recognition.Info.Search.Meta.GuideDateStart)
                {
                    this.lblGuideDateStart.Text = speechResponse.Recognition.Info.Search.Meta.GuideDateStart.ToString();
                }

                if (null != speechResponse.Recognition.Info.Search.Meta.GuideDateEnd)
                {
                    this.lblGuideDateEnd.Text = speechResponse.Recognition.Info.Search.Meta.GuideDateEnd.ToString();
                }

                this.lblLineup.Text      = speechResponse.Recognition.Info.Search.Meta.Lineup;
                this.lblMarket.Text      = speechResponse.Recognition.Info.Search.Meta.Market;
                this.lblResultCount.Text = speechResponse.Recognition.Info.Search.Meta.ResultCount.ToString();

                programs = speechResponse.Recognition.Info.Search.Programs;

                if (null != programs)
                {
                    this.DisplayProgramDetails(programs);
                }
            }

            List <Showtime> showtimes = null;

            if (null != speechResponse.Recognition.Info.Search)
            {
                showtimes = speechResponse.Recognition.Info.Search.Showtimes;

                if (null != showtimes)
                {
                    this.DisplayShowTimeDetails(showtimes);
                }
            }
        }
    }
Exemplo n.º 21
0
    /// <summary>
    /// This function invokes api SpeechToText to convert the given wav amr file and displays the result.
    /// </summary>
    private void ConvertToSpeech(string parEndPoint, string parAccessToken, string parXspeechContext, string parXArgs, string parSpeechFilePath, bool parChunked)
    {
        Stream     postStream      = null;
        FileStream audioFileStream = null;

        try
        {
            audioFileStream = new FileStream(parSpeechFilePath, FileMode.Open, FileAccess.Read);
            BinaryReader reader     = new BinaryReader(audioFileStream);
            byte[]       binaryData = reader.ReadBytes((int)audioFileStream.Length);
            reader.Close();
            audioFileStream.Close();
            if (null != binaryData)
            {
                HttpWebRequest httpRequest = (HttpWebRequest)WebRequest.Create(string.Empty + parEndPoint);
                httpRequest.Headers.Add("Authorization", "Bearer " + parAccessToken);
                httpRequest.Headers.Add("X-SpeechContext", parXspeechContext);
                if (!string.IsNullOrEmpty(parXArgs))
                {
                    httpRequest.Headers.Add("X-Arg", parXArgs);
                }
                string contentType = this.MapContentTypeFromExtension(Path.GetExtension(parSpeechFilePath));
                httpRequest.ContentLength = binaryData.Length;
                httpRequest.ContentType   = contentType;
                httpRequest.Accept        = "application/json";
                httpRequest.Method        = "POST";
                httpRequest.KeepAlive     = true;
                httpRequest.SendChunked   = parChunked;
                postStream = httpRequest.GetRequestStream();
                postStream.Write(binaryData, 0, binaryData.Length);
                postStream.Close();

                HttpWebResponse speechResponse = (HttpWebResponse)httpRequest.GetResponse();
                using (StreamReader streamReader = new StreamReader(speechResponse.GetResponseStream()))
                {
                    string speechRequestResponse = streamReader.ReadToEnd();

                    /*if (string.Compare(SpeechContext.SelectedValue, "TV") == 0)
                     * {
                     *  speechErrorMessage = speechRequestResponse;
                     *  streamReader.Close();
                     *  return;
                     * }*/
                    if (!string.IsNullOrEmpty(speechRequestResponse))
                    {
                        JavaScriptSerializer deserializeJsonObject = new JavaScriptSerializer();
                        SpeechResponse       deserializedJsonObj   = (SpeechResponse)deserializeJsonObject.Deserialize(speechRequestResponse, typeof(SpeechResponse));
                        if (null != deserializedJsonObj)
                        {
                            speechResponseData   = new SpeechResponse();
                            speechResponseData   = deserializedJsonObj;
                            speechSuccessMessage = "true";
                            //speechErrorMessage = speechRequestResponse;
                        }
                        else
                        {
                            speechErrorMessage = "Empty speech to text response";
                        }
                    }
                    else
                    {
                        speechErrorMessage = "Empty speech to text response";
                    }

                    streamReader.Close();
                }
            }
            else
            {
                speechErrorMessage = "Empty speech to text response";
            }
        }
        catch (WebException we)
        {
            string errorResponse = string.Empty;

            try
            {
                using (StreamReader sr2 = new StreamReader(we.Response.GetResponseStream()))
                {
                    errorResponse = sr2.ReadToEnd();
                    sr2.Close();
                }
            }
            catch
            {
                errorResponse = "Unable to get response";
            }

            speechErrorMessage = errorResponse;
        }
        catch (Exception ex)
        {
            speechErrorMessage = ex.ToString();
        }
        finally
        {
            if (null != postStream)
            {
                postStream.Close();
            }
        }
    }
Exemplo n.º 22
0
 /// <summary>
 /// Displays the result onto the page
 /// </summary>
 /// <param name="speechResponse">SpeechResponse received from api</param>
 private void DisplayResult(SpeechResponse speechResponse)
 {
     lblResponseId.Text = speechResponse.Recognition.ResponseId;
     foreach (NBest nbest in speechResponse.Recognition.NBest)
     {
         lblHypothesis.Text = nbest.Hypothesis;
         lblLanguageId.Text = nbest.LanguageId;
         lblResultText.Text = nbest.ResultText;
         lblGrade.Text = nbest.Grade;
         lblConfidence.Text = nbest.Confidence.ToString();
         lblWords.Text = nbest.Words != null ? string.Join(", ", nbest.Words.ToArray()) : string.Empty;
         lblWordScores.Text = string.Join(", ", nbest.WordScores.ToArray());
     }
 }
    /// <summary>
    /// This function invokes api SpeechToText to convert the given wav amr file and displays the result.
    /// </summary>
    private void ConvertToSpeech(string parEndPoint, string parAccessToken, string parXspeechContext, string parXArgs, string parSpeechFilePath)
    {
        Stream postStream = null;
        FileStream audioFileStream = null;
        audioFileStream = new FileStream(parSpeechFilePath, FileMode.Open, FileAccess.Read);
        BinaryReader reader = new BinaryReader(audioFileStream);
        try
        {

            byte[] binaryData = reader.ReadBytes((int)audioFileStream.Length);
            if (null != binaryData)
            {
                string boundary = "----------------------------" + DateTime.Now.Ticks.ToString("x");
                HttpWebRequest httpRequest = (HttpWebRequest)WebRequest.Create(string.Empty + parEndPoint);
                httpRequest.Headers.Add("Authorization", "Bearer " + parAccessToken);
                httpRequest.Headers.Add("X-SpeechContext", parXspeechContext);
                httpRequest.Headers.Add("Content-Language", "en-us");
                httpRequest.ContentType = "multipart/x-srgs-audio; " + "boundary=" + boundary;

                if (!string.IsNullOrEmpty(parXArgs))
                {
                    httpRequest.Headers.Add("X-Arg", parXArgs);
                }
                string filenameArgument = "filename";
                if (!string.IsNullOrEmpty(SpeechContext.SelectedValue))
                {
                    if (string.Compare("GenericHints", SpeechContext.SelectedValue) == 0)
                    {
                        filenameArgument = nameParam.SelectedValue.ToString();
                    }
                }

                string contentType = this.MapContentTypeFromExtension(Path.GetExtension(parSpeechFilePath));

                string data = string.Empty;

                data += "--" +boundary + "\r\n" + "Content-Disposition: form-data; name=\"x-dictionary\"; " + filenameArgument + "=\"speech_alpha.pls\"\r\nContent-Type: application/pls+xml\r\n";

                data += "\r\n" + xdictionaryContent + "\r\n\r\n\r\n";

                data += "--" + boundary + "\r\n" + "Content-Disposition: form-data; name=\"x-grammar\"";

                //data += "filename=\"prefix.srgs\" ";

                data += "\r\nContent-Type: application/srgs+xml \r\n" + "\r\n" + xgrammerContent + "\r\n\r\n\r\n" + "--" + boundary + "\r\n";

                data += "Content-Disposition: form-data; name=\"x-voice\"; " + filenameArgument + "=\"" + audio_file.SelectedValue + "\"";
                data += "\r\nContent-Type: " + contentType + "\r\n\r\n";
                UTF8Encoding encoding = new UTF8Encoding();
                byte[] firstPart = encoding.GetBytes(data);
                int newSize = firstPart.Length + binaryData.Length;

                var memoryStream = new MemoryStream(new byte[newSize], 0, newSize, true, true);
                memoryStream.Write(firstPart, 0, firstPart.Length);
                memoryStream.Write(binaryData, 0, binaryData.Length);

                byte[] postBytes = memoryStream.GetBuffer();

                byte[] byteLastBoundary = encoding.GetBytes("\r\n\r\n" + "--" + boundary + "--");
                int totalSize = postBytes.Length + byteLastBoundary.Length;

                var totalMS = new MemoryStream(new byte[totalSize], 0, totalSize, true, true);
                totalMS.Write(postBytes, 0, postBytes.Length);
                totalMS.Write(byteLastBoundary, 0, byteLastBoundary.Length);

                byte[] finalpostBytes = totalMS.GetBuffer();

                httpRequest.ContentLength = totalMS.Length;
                //httpRequest.ContentType = contentType;
                httpRequest.Accept = "application/json";
                httpRequest.Method = "POST";
                httpRequest.KeepAlive = true;
                postStream = httpRequest.GetRequestStream();
                postStream.Write(finalpostBytes, 0, finalpostBytes.Length);
                postStream.Close();

                HttpWebResponse speechResponse = (HttpWebResponse)httpRequest.GetResponse();
                using (StreamReader streamReader = new StreamReader(speechResponse.GetResponseStream()))
                {
                    string speechRequestResponse = streamReader.ReadToEnd();
                    if (!string.IsNullOrEmpty(speechRequestResponse))
                    {
                        JavaScriptSerializer deserializeJsonObject = new JavaScriptSerializer();
                        SpeechResponse deserializedJsonObj = (SpeechResponse)deserializeJsonObject.Deserialize(speechRequestResponse, typeof(SpeechResponse));
                        if (null != deserializedJsonObj)
                        {
                            speechResponseData = new SpeechResponse();
                            speechResponseData = deserializedJsonObj;
                            speechSuccessMessage = "true";
                            //speechErrorMessage = speechRequestResponse;
                        }
                        else
                        {
                            speechErrorMessage = "Empty speech to text response";
                        }
                    }
                    else
                    {
                        speechErrorMessage = "Empty speech to text response";
                    }

                    streamReader.Close();
                }
            }
            else
            {
                speechErrorMessage = "Empty speech to text response";
            }
        }
        catch (WebException we)
        {
            string errorResponse = string.Empty;

            try
            {
                using (StreamReader sr2 = new StreamReader(we.Response.GetResponseStream()))
                {
                    errorResponse = sr2.ReadToEnd();
                    sr2.Close();
                }
            }
            catch
            {
                errorResponse = "Unable to get response";
            }

            speechErrorMessage = errorResponse;
        }
        catch (Exception ex)
        {
            speechErrorMessage = ex.ToString();
        }
        finally
        {
            reader.Close();
            audioFileStream.Close();
            if (null != postStream)
            {
                postStream.Close();
            }
        }
    }
Exemplo n.º 24
0
        public SpeechResponse GetResponse(string Said, Mobile Speaker, Townsperson tp)
        {
            started = DateTime.Now;
            DataView dvReplies;

            DataRowView[] drvReplies;
            object        key;
            ArrayList     alReplies = new ArrayList();
            DataRow       drFound   = null;
            String        test      = null;
            bool          isWord    = false;

            if (Townsperson.Logging == Townsperson.LogLevel.Debug)
            {
                TownspersonLogging.WriteLine(Speaker, "Asynchronous call begun.");
            }

            if (SpeechData.blockRequests == true)
            {
                sr = new SpeechResponse(blockedReplies[Utility.Random(blockedReplies.Length)], Speaker, 0, 0, null, null);

                if (Townsperson.Logging == Townsperson.LogLevel.Debug)
                {
                    TownspersonLogging.WriteLine(Speaker, "Access to Database blocked.");
                }
            }
            else
            {
                String temp = Said.ToLower().Trim();
                if (isEmpty(temp))
                {
                    return(new SpeechResponse(blockedReplies[Utility.Random(blockedReplies.Length)], Speaker, 0, 0, null, null));
                }

                temp = ' ' + temp + ' ';
                foreach (DataRow dr in SpeechData.dsSpeechRules.Tables["dtTriggers"].Rows)
                {
                    test = dr["trigger"].ToString();

                    isWord = (bool)dr["word"];
                    if (isWord)
                    {
                        test = ' ' + test + ' ';
                    }

                    if (temp.IndexOf(test) >= 0)
                    {
                        drFound = dr;
                        if (Townsperson.Logging == Townsperson.LogLevel.Debug)
                        {
                            TownspersonLogging.WriteLine(Speaker, "Trigger matched: \"{0}\" : \"{1}\"", test, temp);
                        }

                        break;
                    }
                }

                dvReplies = new DataView(SpeechData.dsSpeechRules.Tables["dtResponses"]);
                dvReplies.RowStateFilter = DataViewRowState.CurrentRows;
                dvReplies.Sort           = "index";

                if (drFound == null)
                {
                    key = (object)0;

                    if (Townsperson.Logging == Townsperson.LogLevel.Basic || Townsperson.Logging == Townsperson.LogLevel.Debug)
                    {
                        TownspersonLogging.WriteLine(Speaker, "Default Rule: \"{0}\"", temp.Trim().ToUpper());
                    }
                }
                else
                {
                    key = (object)drFound[0].ToString();
                }

                drvReplies = dvReplies.FindRows(key);

                foreach (DataRowView drv in drvReplies)
                {
                    if ((int)drv["npcAttitude"] != 0 && (int)drv["npcAttitude"] != (int)tp.attitude)
                    {
                        continue;
                    }
                    if ((int)drv["playerGender"] != 0 && (int)drv["playerGender"] != (Speaker.Female ? 2 : 1))
                    {
                        continue;
                    }
                    if ((int)drv["npcGender"] != 0 && (int)drv["npcGender"] != (tp.Female ? 2 : 1))
                    {
                        continue;
                    }
                    if ((int)drv["timeOfDay"] != 0 && !(Townsperson.CheckTOD(tp, (int)drv["timeOfDay"])))
                    {
                        continue;
                    }
                    if (!isEmpty(drv["npcRegion"].ToString()) && drv["npcRegion"].ToString() != tp.Region.ToString())
                    {
                        continue;
                    }
                    if (!isEmpty(drv["npcTag"].ToString()) && drv["npcTag"].ToString() != tp.Tag)
                    {
                        continue;
                    }
                    if (!isEmpty(drv["npcName"].ToString()) && drv["npcName"].ToString() != tp.Name)
                    {
                        continue;
                    }
                    if (!isEmpty(drv["npcTitle"].ToString()) && drv["npcTitle"].ToString() != tp.Title)
                    {
                        continue;
                    }
                    if ((int)drv["objStatus"] != 0)
                    {
                        Item item = Townsperson.CheckInventory(Speaker, drv["questObject"].ToString());
                        if (item == null && (int)drv["objStatus"] == 1)
                        {
                            continue;
                        }
                        if (item != null && (int)drv["objStatus"] == 2)
                        {
                            continue;
                        }
                    }
                    alReplies.Add(drv);
                }

                int         cnt   = alReplies.Count;
                DataRowView reply = (DataRowView)alReplies[Utility.Random(cnt)];

                if (Townsperson.Logging == Townsperson.LogLevel.Debug)
                {
                    TownspersonLogging.WriteLine(Speaker, "Matched {0} Responses.", cnt);
                }

                string toSay = reply["response"].ToString();
                if (toSay == "{blank}")
                {
                    toSay = "";
                }
                int    anim   = (int)reply["npcAnimation"];
                int    react  = (int)reply["npcReaction"];
                string reward = reply["packObject"].ToString(); // is it better to pass empty string or null?
                string remove = null;
                if (!isEmpty(reply["questObject"].ToString()) && (bool)reply["questObjDelete"])
                {
                    remove = reply["questObject"].ToString();
                }

                sr = new SpeechResponse(toSay, Speaker, anim, react, reward, remove);
            }

            // Delay results for more realistic conversation
            TimeSpan timeused = DateTime.Now - started;
            TimeSpan timeleft = duration - timeused;

            if (Townsperson.Logging == Townsperson.LogLevel.Debug)
            {
                TownspersonLogging.WriteLine(Speaker, "Asynchronous call took {0} ms.", timeused.Milliseconds.ToString());
            }

            if (timeleft > TimeSpan.Zero && !Townsperson.Synchronous)
            {
                Thread.Sleep(timeleft);
            }

            return(sr);
        }
    /// <summary>
    /// This function invokes api SpeechToText to convert the given wav amr file and displays the result.
    /// </summary>
    private void ConvertToSpeech(string parEndPoint, string parAccessToken, string parXspeechContext, string parXArgs, string parSpeechFilePath)
    {
        Stream     postStream      = null;
        FileStream audioFileStream = null;

        audioFileStream = new FileStream(parSpeechFilePath, FileMode.Open, FileAccess.Read);
        BinaryReader reader = new BinaryReader(audioFileStream);

        try
        {
            byte[] binaryData = reader.ReadBytes((int)audioFileStream.Length);
            if (null != binaryData)
            {
                string         boundary    = "----------------------------" + DateTime.Now.Ticks.ToString("x");
                HttpWebRequest httpRequest = (HttpWebRequest)WebRequest.Create(string.Empty + parEndPoint);
                httpRequest.Headers.Add("Authorization", "Bearer " + parAccessToken);
                httpRequest.Headers.Add("X-SpeechContext", parXspeechContext);
                httpRequest.Headers.Add("Content-Language", "en-us");
                httpRequest.ContentType = "multipart/x-srgs-audio; " + "boundary=" + boundary;

                if (!string.IsNullOrEmpty(parXArgs))
                {
                    httpRequest.Headers.Add("X-Arg", parXArgs);
                }
                string filenameArgument = "filename";
                if (!string.IsNullOrEmpty(SpeechContext.SelectedValue))
                {
                    if (string.Compare("GenericHints", SpeechContext.SelectedValue) == 0)
                    {
                        filenameArgument = nameParam.SelectedValue.ToString();
                    }
                }

                string contentType = this.MapContentTypeFromExtension(Path.GetExtension(parSpeechFilePath));

                string data = string.Empty;


                data += "--" + boundary + "\r\n" + "Content-Disposition: form-data; name=\"x-dictionary\"; " + filenameArgument + "=\"speech_alpha.pls\"\r\nContent-Type: application/pls+xml\r\n";

                data += "\r\n" + xdictionaryContent + "\r\n\r\n\r\n";

                data += "--" + boundary + "\r\n" + "Content-Disposition: form-data; name=\"x-grammar\"";

                //data += "filename=\"prefix.srgs\" ";

                data += "\r\nContent-Type: application/srgs+xml \r\n" + "\r\n" + xgrammerContent + "\r\n\r\n\r\n" + "--" + boundary + "\r\n";

                data += "Content-Disposition: form-data; name=\"x-voice\"; " + filenameArgument + "=\"" + audio_file.SelectedValue + "\"";
                data += "\r\nContent-Type: " + contentType + "\r\n\r\n";
                UTF8Encoding encoding  = new UTF8Encoding();
                byte[]       firstPart = encoding.GetBytes(data);
                int          newSize   = firstPart.Length + binaryData.Length;

                var memoryStream = new MemoryStream(new byte[newSize], 0, newSize, true, true);
                memoryStream.Write(firstPart, 0, firstPart.Length);
                memoryStream.Write(binaryData, 0, binaryData.Length);

                byte[] postBytes = memoryStream.GetBuffer();

                byte[] byteLastBoundary = encoding.GetBytes("\r\n\r\n" + "--" + boundary + "--");
                int    totalSize        = postBytes.Length + byteLastBoundary.Length;

                var totalMS = new MemoryStream(new byte[totalSize], 0, totalSize, true, true);
                totalMS.Write(postBytes, 0, postBytes.Length);
                totalMS.Write(byteLastBoundary, 0, byteLastBoundary.Length);

                byte[] finalpostBytes = totalMS.GetBuffer();

                httpRequest.ContentLength = totalMS.Length;
                //httpRequest.ContentType = contentType;
                httpRequest.Accept    = "application/json";
                httpRequest.Method    = "POST";
                httpRequest.KeepAlive = true;
                postStream            = httpRequest.GetRequestStream();
                postStream.Write(finalpostBytes, 0, finalpostBytes.Length);
                postStream.Close();

                HttpWebResponse speechResponse = (HttpWebResponse)httpRequest.GetResponse();
                using (StreamReader streamReader = new StreamReader(speechResponse.GetResponseStream()))
                {
                    string speechRequestResponse = streamReader.ReadToEnd();
                    if (!string.IsNullOrEmpty(speechRequestResponse))
                    {
                        JavaScriptSerializer deserializeJsonObject = new JavaScriptSerializer();
                        SpeechResponse       deserializedJsonObj   = (SpeechResponse)deserializeJsonObject.Deserialize(speechRequestResponse, typeof(SpeechResponse));
                        if (null != deserializedJsonObj)
                        {
                            speechResponseData   = new SpeechResponse();
                            speechResponseData   = deserializedJsonObj;
                            speechSuccessMessage = "true";
                            //speechErrorMessage = speechRequestResponse;
                        }
                        else
                        {
                            speechErrorMessage = "Empty speech to text response";
                        }
                    }
                    else
                    {
                        speechErrorMessage = "Empty speech to text response";
                    }

                    streamReader.Close();
                }
            }
            else
            {
                speechErrorMessage = "Empty speech to text response";
            }
        }
        catch (WebException we)
        {
            string errorResponse = string.Empty;

            try
            {
                using (StreamReader sr2 = new StreamReader(we.Response.GetResponseStream()))
                {
                    errorResponse = sr2.ReadToEnd();
                    sr2.Close();
                }
            }
            catch
            {
                errorResponse = "Unable to get response";
            }

            speechErrorMessage = errorResponse;
        }
        catch (Exception ex)
        {
            speechErrorMessage = ex.ToString();
        }
        finally
        {
            reader.Close();
            audioFileStream.Close();
            if (null != postStream)
            {
                postStream.Close();
            }
        }
    }
    /// <summary>
    /// Displays the result onto the page
    /// </summary>
    /// <param name="speechResponse">SpeechResponse received from api</param>
    private void DisplayResult(SpeechResponse speechResponse)
    {
        statusPanel.Visible = true;
        lblStatus.Text = speechResponse.Recognition.Status;
        lblResponseId.Text = speechResponse.Recognition.Responseid;
        foreach (NBest nbest in speechResponse.Recognition.NBest)
        {
            lblHypothesis.Text = nbest.Hypothesis;
            lblLanguageId.Text = nbest.LanguageId;
            lblResultText.Text = nbest.ResultText;
            lblGrade.Text = nbest.Grade;
            lblConfidence.Text = nbest.Confidence.ToString();
            string words = "[ ";
            if (null != nbest.Words)
            {
                foreach (string word in nbest.Words)
                {
                    words += "\"" + word + "\", ";
                }
                words = words.Substring(0, words.LastIndexOf(","));
                words = words + " ]";
            }

            lblWords.Text = nbest.Words != null ? words : string.Empty;

            if (null != nbest.WordScores)
            {
                lblWordScores.Text = "[ " + string.Join(", ", nbest.WordScores.ToArray()) + " ]";
            }
        }

    }