// Allows the WebSocket client to receive messages in a background task
        private async Task Receiving(ClientWebSocket client)
        {
            try
            {
                var  buffer      = new byte[512];
                bool isReceiving = true;

                while (isReceiving)
                {
                    var wsResult = await client.ReceiveAsync(new ArraySegment <byte>(buffer), CancellationToken.None);

                    SpeechServiceResult wssr;

                    var resStr = Encoding.UTF8.GetString(buffer, 0, wsResult.Count);

                    switch (wsResult.MessageType)
                    {
                    // Incoming text messages can be hypotheses about the words the service recognized or the final
                    // phrase, which is a recognition result that won't change.
                    case WebSocketMessageType.Text:
                        wssr = ParseWebSocketSpeechResult(resStr);
                        Console.WriteLine(resStr + Environment.NewLine + "*** Message End ***" + Environment.NewLine);

                        // Set the recognized text field in the client for future lookup, this can be stored
                        // in either the Text property (for hypotheses) or DisplayText (for final phrases).
                        if (wssr.Path == SpeechServiceResult.SpeechMessagePaths.SpeechHypothesis)
                        {
                            RecognizedText = wssr.Result.Text;
                        }
                        else if (wssr.Path == SpeechServiceResult.SpeechMessagePaths.SpeechPhrase)
                        {
                            RecognizedText = wssr.Result.DisplayText;
                        }
                        // Raise an event with the message we just received.
                        // We also keep the last message received in case the client app didn't subscribe to the event.
                        LastMessageReceived = wssr;
                        if (OnMessageReceived != null)
                        {
                            OnMessageReceived.Invoke(wssr);
                        }
                        break;

                    case WebSocketMessageType.Binary:
                        Console.WriteLine("Binary messages are not suppported by this application.");
                        break;

                    case WebSocketMessageType.Close:
                        string description = client.CloseStatusDescription;
                        Console.WriteLine($"Closing WebSocket with Status: {description}");
                        await client.CloseOutputAsync(WebSocketCloseStatus.NormalClosure, "", CancellationToken.None);

                        isReceiving = false;
                        break;

                    default:
                        Console.WriteLine("The message type was not recognized.");
                        break;
                    }
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine("An exception occurred while receiving a message:" + Environment.NewLine + ex.Message);
            }
        }
示例#2
0
        SpeechServiceResult ParseWebSocketSpeechResult(string result)
        {
            SpeechServiceResult wssr = new SpeechServiceResult();

            using (StringReader sr = new StringReader(result))
            {
                int    linecount = 0;
                string line;
                bool   isBodyStarted = false;
                string bodyJSON      = "";

                // Parse each line in the WebSocket results to extra the headers and JSON body.
                // The header is in the first 3 lines of the response, the rest is the body.
                while ((line = sr.ReadLine()) != null)
                {
                    line = line.Trim();
                    if (line.Length > 0)
                    {
                        switch (linecount)
                        {
                        case 0:      // X-RequestID
                            if (line.Substring(0, 11).ToLower() == "x-requestid")
                            {
                                wssr.RequestId = line.Substring(12);
                            }
                            break;

                        case 1:      // Content-Type & charset on the same line, separated by a semi-colon
                            var sublines = line.Split(new[] { ';' });

                            if (sublines[0].Trim().Substring(0, 12).ToLower() == "content-type")
                            {
                                wssr.ContentType = sublines[0].Trim().Substring(13);

                                if (sublines.Length > 1)
                                {
                                    if (sublines[1].Trim().Substring(0, 7).ToLower() == "charset")
                                    {
                                        wssr.CharSet = sublines[1].Trim().Substring(8);
                                    }
                                }
                            }
                            break;

                        case 2:      // Path
                            if (line.Substring(0, 4).ToLower() == "path")
                            {
                                string pathStr = line.Substring(5).Trim().ToLower();
                                switch (pathStr)
                                {
                                case "turn.start":
                                    wssr.Path = SpeechServiceResult.SpeechMessagePaths.TurnStart;
                                    break;

                                case "speech.startdetected":
                                    wssr.Path = SpeechServiceResult.SpeechMessagePaths.SpeechStartDetected;
                                    break;

                                case "speech.hypothesis":
                                    wssr.Path = SpeechServiceResult.SpeechMessagePaths.SpeechHypothesis;
                                    //jobtelemetry.ReceivedMessages.Append<Receivedmessage>(
                                    //    new Receivedmessage(

                                    //    ));
                                    break;

                                case "speech.enddetected":
                                    wssr.Path = SpeechServiceResult.SpeechMessagePaths.SpeechEndDetected;
                                    break;

                                case "speech.phrase":
                                    wssr.Path = SpeechServiceResult.SpeechMessagePaths.SpeechPhrase;
                                    break;

                                case "turn.end":
                                    wssr.Path = SpeechServiceResult.SpeechMessagePaths.SpeechEndDetected;
                                    break;

                                default:
                                    break;
                                }
                            }
                            break;

                        default:
                            if (!isBodyStarted)
                            {
                                // For all non-empty lines past the first three (header), once we encounter an opening brace '{'
                                // we treat the rest of the response as the main results body which is formatted in JSON.
                                if (line.Substring(0, 1) == "{")
                                {
                                    isBodyStarted = true;
                                    bodyJSON     += line + lineSeparator;
                                }
                            }
                            else
                            {
                                bodyJSON += line + lineSeparator;
                            }
                            break;
                        }
                    }

                    linecount++;
                }

                // Once the full response has been parsed between header and body components,
                // we need to parse the JSON content of the body itself.
                if (bodyJSON.Length > 0)
                {
                    RecognitionContent srr = JsonConvert.DeserializeObject <RecognitionContent>(bodyJSON);
                    if (srr != null)
                    {
                        wssr.Result = srr;
                    }
                }
            }

            return(wssr);
        }
        private void WebSocket_MessageReceived(MessageWebSocket sender, MessageWebSocketMessageReceivedEventArgs args)
        {
            try
            {
                SpeechServiceResult wssr;
                using (DataReader dataReader = args.GetDataReader())
                {
                    dataReader.UnicodeEncoding = Windows.Storage.Streams.UnicodeEncoding.Utf8;
                    string resStr = dataReader.ReadString(dataReader.UnconsumedBufferLength);
                    Console.WriteLine("Message received from MessageWebSocket: " + resStr);
                    //this.messageWebSocket.Dispose();

                    switch (args.MessageType)
                    {
                    // Incoming text messages can be hypotheses about the words the service recognized or the final
                    // phrase, which is a recognition result that won't change.
                    case SocketMessageType.Utf8:
                        wssr = ParseWebSocketSpeechResult(resStr);
                        Console.WriteLine(resStr + Environment.NewLine + "*** Message End ***" + Environment.NewLine);

                        // Set the recognized text field in the client for future lookup, this can be stored
                        // in either the Text property (for hypotheses) or DisplayText (for final phrases).
                        if (wssr.Path == SpeechServiceResult.SpeechMessagePaths.SpeechHypothesis)
                        {
                            RecognizedText = wssr.Result.Text;
                        }
                        else if (wssr.Path == SpeechServiceResult.SpeechMessagePaths.SpeechPhrase)
                        {
                            RecognizedText = wssr.Result.DisplayText;
                        }
                        // Raise an event with the message we just received.
                        // We also keep the last message received in case the client app didn't subscribe to the event.
                        LastMessageReceived = wssr;
                        if (OnMessageReceived != null)
                        {
                            OnMessageReceived.Invoke(wssr);
                        }
                        break;

                    case SocketMessageType.Binary:
                        Console.WriteLine("Binary messages are not suppported by this application.");
                        break;

                    //case WebSocketMessageType.Close:
                    //    string description = client.CloseStatusDescription;
                    //    Console.WriteLine($"Closing WebSocket with Status: {description}");
                    //    await client.CloseOutputAsync(WebSocketCloseStatus.NormalClosure, "", CancellationToken.None);
                    //    isReceiving = false;
                    //    break;

                    default:
                        Console.WriteLine("The WebSocket message type was not recognized.");
                        break;
                    }
                }
            }
            catch (Exception ex)
            {
                //Windows.Web.WebErrorStatus webErrorStatus = WebSocketError.GetStatus(ex.GetBaseException().HResult);
                Console.WriteLine("An exception occurred while receiving a message:" + Environment.NewLine + ex.Message);
                // Add additional code here to handle exceptions.
            }
        }