Ejemplo n.º 1
0
        private static async Task CompleteContinuousRecognition(ConversationTranscriber recognizer, string conversationId)
        {
            TaskCompletionSource <int> m_taskCompletionSource = new TaskCompletionSource <int>();

            recognizer.SessionStopped += (s, e) =>
            {
                m_taskCompletionSource.TrySetResult(0);
            };
            string canceled = string.Empty;

            recognizer.Canceled += (s, e) =>
            {
                canceled = e.ErrorDetails;
                if (e.Reason == CancellationReason.Error)
                {
                    m_taskCompletionSource.TrySetResult(0);
                }
            };

            await recognizer.StartTranscribingAsync().ConfigureAwait(false);

            await Task.WhenAny(m_taskCompletionSource.Task, Task.Delay(TimeSpan.FromSeconds(10)));

            await recognizer.StopTranscribingAsync().ConfigureAwait(false);
        }
Ejemplo n.º 2
0
 private static ConversationTranscriber TrackSessionId(ConversationTranscriber recognizer)
 {
     recognizer.SessionStarted += (s, e) =>
     {
         Console.WriteLine("SessionId: " + e.SessionId);
     };
     return(recognizer);
 }
Ejemplo n.º 3
0
        private static async Task <List <string> > GetRecognizerResult(ConversationTranscriber recognizer, string conversationId)
        {
            List <string> recognizedText = new List <string>();

            recognizer.Transcribed += (s, e) =>
            {
                if (e.Result.Text.Length > 0)
                {
                    recognizedText.Add(e.Result.Text);
                }
            };


            await CompleteContinuousRecognition(recognizer, conversationId);

            recognizer.Dispose();
            return(recognizedText);
        }
Ejemplo n.º 4
0
        public static async Task TranscribeConversationsAsync(string conversationWaveFile, string subscriptionKey, string region)
        {
            var config = SpeechConfig.FromSubscription(subscriptionKey, region);

            config.SetProperty("ConversationTranscriptionInRoomAndOnline", "true");
            var stopRecognition = new TaskCompletionSource <int>();

            // Create an audio stream from a wav file or from the default microphone if you want to stream live audio from the supported devices
            using (var audioInput = AudioStreamReader.OpenWavFile(conversationWaveFile))
            {
                var meetingID = Guid.NewGuid().ToString();
                using (var conversation = await Conversation.CreateConversationAsync(config, meetingID))
                {
                    // Create a conversation transcriber using audio stream input
                    using (var conversationTranscriber = new ConversationTranscriber(audioInput))
                    {
                        // Subscribe to events
                        conversationTranscriber.Transcribing += (s, e) =>
                        {
                            Console.WriteLine($"TRANSCRIBING: Text={e.Result.Text} SpeakerId={e.Result.UserId}");
                        };

                        conversationTranscriber.Transcribed += (s, e) =>
                        {
                            if (e.Result.Reason == ResultReason.RecognizedSpeech)
                            {
                                Console.WriteLine($"TRANSCRIBED: Text={e.Result.Text} SpeakerId={e.Result.UserId}");
                            }
                            else if (e.Result.Reason == ResultReason.NoMatch)
                            {
                                Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                            }
                        };

                        conversationTranscriber.Canceled += (s, e) =>
                        {
                            Console.WriteLine($"CANCELED: Reason={e.Reason}");

                            if (e.Reason == CancellationReason.Error)
                            {
                                Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}");
                                Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
                                Console.WriteLine($"CANCELED: Did you update the subscription info?");
                                stopRecognition.TrySetResult(0);
                            }
                        };

                        conversationTranscriber.SessionStarted += (s, e) =>
                        {
                            Console.WriteLine($"\nSession started event. SessionId={e.SessionId}");
                        };

                        conversationTranscriber.SessionStopped += (s, e) =>
                        {
                            Console.WriteLine($"\nSession stopped event. SessionId={e.SessionId}");
                            Console.WriteLine("\nStop recognition.");
                            stopRecognition.TrySetResult(0);
                        };

                        // Add participants to the conversation.
                        // Voice signature needs to be in the following format:
                        // { "Version": <Numeric value>, "Tag": "string", "Data": "string" }
                        var languageForUser1 = "User1PreferredLanguage"; // For example "en-US"
                        var speakerA         = Participant.From("User1", languageForUser1, voiceSignatureUser1);
                        var languageForUser2 = "User2PreferredLanguage"; // For example "en-US"
                        var speakerB         = Participant.From("User2", languageForUser2, voiceSignatureUser2);
                        await conversation.AddParticipantAsync(speakerA);

                        await conversation.AddParticipantAsync(speakerB);

                        // Join to the conversation.
                        await conversationTranscriber.JoinConversationAsync(conversation);

                        // Starts transcribing of the conversation. Uses StopTranscribingAsync() to stop transcribing when all participants leave.
                        await conversationTranscriber.StartTranscribingAsync().ConfigureAwait(false);

                        // Waits for completion.
                        // Use Task.WaitAny to keep the task rooted.
                        Task.WaitAny(new[] { stopRecognition.Task });

                        // Stop transcribing the conversation.
                        await conversationTranscriber.StopTranscribingAsync().ConfigureAwait(false);
                    }
                }
            }
        }
Ejemplo n.º 5
0
        public async Task TranscribeConversationsAsync(IEnumerable <string> voiceSignatureStringUsers)
        {
            uint samplesPerSecond = 16000;
            byte bitsPerSample    = 16;
            byte channels         = 8; // 7 + 1 channels

            var config = SpeechConfig.FromSubscription(this.SubscriptionKey, this.Region);

            config.SetProperty("ConversationTranscriptionInRoomAndOnline", "true");
            var stopRecognition = new TaskCompletionSource <int>();

            using (var audioInput = AudioInputStream.CreatePushStream(AudioStreamFormat.GetWaveFormatPCM(samplesPerSecond, bitsPerSample, channels)))
            {
                var meetingID = Guid.NewGuid().ToString();
                using (var conversation = await Conversation.CreateConversationAsync(config, meetingID))
                {
                    // create a conversation transcriber using audio stream input
                    using (this.conversationTranscriber = new ConversationTranscriber(AudioConfig.FromStreamInput(audioInput)))
                    {
                        conversationTranscriber.Transcribing += (s, e) =>
                        {
                            this.SetText($"TRANSCRIBING: Text={e.Result.Text} SpeakerId={e.Result.UserId}");
                        };

                        conversationTranscriber.Transcribed += (s, e) =>
                        {
                            if (e.Result.Reason == ResultReason.RecognizedSpeech)
                            {
                                this.SetText($"TRANSCRIBED: Text={e.Result.Text} SpeakerId={e.Result.UserId}");
                            }
                            else if (e.Result.Reason == ResultReason.NoMatch)
                            {
                                this.SetText($"NOMATCH: Speech could not be recognized.");
                            }
                        };

                        conversationTranscriber.Canceled += (s, e) =>
                        {
                            this.SetText($"CANCELED: Reason={e.Reason}");

                            if (e.Reason == CancellationReason.Error)
                            {
                                this.SetText($"CANCELED: ErrorCode={e.ErrorCode}");
                                this.SetText($"CANCELED: ErrorDetails={e.ErrorDetails}");
                                this.SetText($"CANCELED: Did you update the subscription info?");
                                stopRecognition.TrySetResult(0);
                            }
                        };

                        conversationTranscriber.SessionStarted += (s, e) =>
                        {
                            this.SetText($"\nSession started event. SessionId={e.SessionId}");
                        };

                        conversationTranscriber.SessionStopped += (s, e) =>
                        {
                            this.SetText($"\nSession stopped event. SessionId={e.SessionId}");
                            this.SetText("\nStop recognition.");
                            stopRecognition.TrySetResult(0);
                        };

                        // Add participants to the conversation.
                        int i = 1;
                        foreach (var voiceSignatureStringUser in voiceSignatureStringUsers)
                        {
                            var speaker = Participant.From($"User{i++}", "en-US", voiceSignatureStringUser);
                            await conversation.AddParticipantAsync(speaker);
                        }

                        // Join to the conversation and start transcribing
                        await conversationTranscriber.JoinConversationAsync(conversation);

                        await conversationTranscriber.StartTranscribingAsync().ConfigureAwait(false);

                        using (var p = Pipeline.Create())
                        {
                            var store   = PsiStore.Create(p, "Transcribe", @"D:\Temp");
                            var capture = new AudioCapture(p, WaveFormat.CreatePcm((int)samplesPerSecond, bitsPerSample, channels)).Write("Audio", store);
                            capture.Do(audio => audioInput.Write(audio.Data));
                            p.RunAsync();

                            // waits for completion, then stop transcription
                            await stopRecognition.Task;
                        }

                        await conversationTranscriber.StopTranscribingAsync().ConfigureAwait(false);
                    }
                }
            }
        }
Ejemplo n.º 6
0
        public static async Task ConversationWithPullAudioStreamAsync(string filePath, string region, string key)
        {
            // Creates an instance of a speech config with specified subscription key and service region
            // Replace with your own subscription key and region
            var config = SpeechConfig.FromSubscription(key, region);

            config.SetProperty("ConversationTranscriptionInRoomAndOnline", "true");
            var stopTranscription = new TaskCompletionSource <int>();

            var callback   = Helper.CreateWavReader(filePath);
            var pullStream = AudioInputStream.CreatePullStream(callback, AudioStreamFormat.GetWaveFormatPCM(16000, 16, 8));

            // Create an audio stream from a wav file or from the default microphone if you want to stream live audio from the supported devices
            // Replace with your own audio file name and Helper class which implements AudioConfig using PullAudioInputStreamCallback
            using (var audioInput = AudioConfig.FromStreamInput(pullStream))
            {
                var meetingId = Guid.NewGuid().ToString();
                using (var conversation = await Conversation.CreateConversationAsync(config, meetingId).ConfigureAwait(false))
                {
                    // Create a conversation transcriber using audio stream input
                    using (var conversationTranscriber = new ConversationTranscriber(audioInput))
                    {
                        await conversationTranscriber.JoinConversationAsync(conversation);

                        // Subscribe to events
                        conversationTranscriber.Transcribing += (s, e) =>
                        {
                            //Console.WriteLine($"TRANSCRIBING: Text={e.Result.Text}");
                        };

                        conversationTranscriber.Transcribed += (s, e) =>
                        {
                            if (e.Result.Reason == ResultReason.RecognizedSpeech)
                            {
                                if (!String.IsNullOrWhiteSpace(e.Result.Text))
                                {
                                    Console.WriteLine($"TRANSCRIBED: Text={e.Result.Text}, UserID={e.Result.UserId}");
                                }
                            }
                            else if (e.Result.Reason == ResultReason.NoMatch)
                            {
                                Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                            }
                        };

                        conversationTranscriber.Canceled += (s, e) =>
                        {
                            Console.WriteLine($"CANCELED: Reason={e.Reason}");

                            if (e.Reason == CancellationReason.Error)
                            {
                                Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}");
                                Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
                                Console.WriteLine($"CANCELED: Did you update the subscription info?");
                                stopTranscription.TrySetResult(0);
                            }
                        };

                        conversationTranscriber.SessionStarted += (s, e) =>
                        {
                            Console.WriteLine("\nSession started event.");
                        };

                        conversationTranscriber.SessionStopped += (s, e) =>
                        {
                            Console.WriteLine("\nSession stopped event.");
                            Console.WriteLine("\nStop recognition.");
                            stopTranscription.TrySetResult(0);
                        };

                        // Add participants to the conversation.
                        // Create voice signatures using REST API described in the earlier section in this document.
                        // Voice signature needs to be in the following format:
                        // { "Version": <Numeric string or integer value>, "Tag": "string", "Data": "string" }

                        /*
                         * var speakerA = Participant.From("Katie", "en-us", KatieSignature);
                         * var speakerB = Participant.From("Steve", "en-us", SteveSignature);
                         * await conversation.AddParticipantAsync(speakerA);
                         * await conversation.AddParticipantAsync(speakerB);
                         */

                        // Starts transcribing of the conversation. Uses StopTranscribingAsync() to stop transcribing when all participants leave.
                        await conversationTranscriber.StartTranscribingAsync().ConfigureAwait(false);

                        // Waits for completion.
                        // Use Task.WaitAny to keep the task rooted.
                        Task.WaitAny(new[] { stopTranscription.Task });

                        // Stop transcribing the conversation.
                        await conversationTranscriber.StopTranscribingAsync().ConfigureAwait(false);
                    }
                }
            }
        }