/// <summary> /// Retrieves all the speakers asynchronously and adds them to the list /// </summary> /// <returns>Task to track the status of the asynchronous task.</returns> public async Task UpdateAllSpeakersAsync() { try { SpeakerIdentificationServiceClient _serviceClient = new SpeakerIdentificationServiceClient(speakerAPISubscriptionKey); Title = String.Format("Retrieving All Profiles..."); Profile[] allProfiles = await _serviceClient.GetProfilesAsync(); Title = String.Format("All Profiles Retrieved."); enrollVoiceList.Clear(); foreach (Profile profile in allProfiles) { AddSpeaker(profile); } } catch (GetProfileException ex) { Console.WriteLine("Error Retrieving Profiles: " + ex.Message); GC.Collect(); } catch (Exception ex) { Console.WriteLine("Error: " + ex.Message); GC.Collect(); } }
//To delete the speaker profile. private async void DeleteSpeaker() { try { SpeakerIdentificationServiceClient _serviceClient = new SpeakerIdentificationServiceClient(speakerAPISubscriptionKey); Title = String.Format("Deleting All Profiles..."); Profile[] allProfiles = await _serviceClient.GetProfilesAsync(); Title = String.Format("All Profiles Deleted."); int i = 0; foreach (Profile profile in allProfiles) { i++; Delete(profile); if (i > 5) { return; } } GC.Collect(); } catch (Exception ex) { Console.WriteLine("Error : " + ex.Message); GC.Collect(); } }
/// <summary>Defines the prompts and steps of the dialog.</summary> /// <param name="configuration"></param> public MainDialogSet(IConfiguration configuration) { _configuration = configuration ?? throw new ArgumentNullException(nameof(configuration)); //config settings string subscriptionKey = configuration[SubscriptionKeySettingKey]; if (string.IsNullOrWhiteSpace(subscriptionKey)) { throw new ArgumentException($"{SubscriptionKeySettingKey} setting is missing from configuration", nameof(configuration)); } //communication client for Azure Speaker Recognition _client = new SpeakerIdentificationServiceClient(subscriptionKey); //add main dialog AddMainDialog(); //add profile dialog AddProfileDialog(); //add recognizer dialog AddRecognizerDialog(); //add speech to text dialog AddSpeechToTextDialog(); //add text sentiment analyzes AddTextAnalyzes(); }
public RecordConversationPage() { InitializeComponent(); _selectedFile = "abc0.wav"; _speakersListFrame.Navigate(SpeakersListPage.SpeakersList); writer = new WaveFileWriter(_selectedFile, new WaveFormat(16000, 1)); MainWindow window = (MainWindow)Application.Current.MainWindow; _serviceClient = new SpeakerIdentificationServiceClient(window.ScenarioControl.SubscriptionKey); bw.DoWork += new DoWorkEventHandler( delegate(object o, DoWorkEventArgs args) { BackgroundWorker b = o as BackgroundWorker; audioText = readAudio(_selectedFile); }); bw.RunWorkerCompleted += new RunWorkerCompletedEventHandler( delegate(object o, RunWorkerCompletedEventArgs args) { identify(audioText); //textBlock.Text = audioText; }); bw.RunWorkerAsync(); }
/// <summary> /// Builder to create a Registration controller. Creates a connection to the database /// and loads profiles using the collection of associated user emails. Takes required /// arguments for the list of emails of already registered users. /// /// By default, has "en-us" enrollment locale /// and a delay between concurrent requests specified by SPEAKER_RECOGNITION_API_INTERVAL /// </summary> /// <param name="dbConnectionString"></param> /// <param name="userEmails"></param> /// <param name="speakerIDKeySub"></param> public static RegistrationController BuildController(string dbConnectionStr, List <string> registeredEmails, string speakerIDSubKey = "", string enrollmentLocale = "en-us", int apiInterval = SPEAKER_RECOGNITION_API_INTERVAL) { SpeakerIdentificationServiceClient enrollmentClient = new SpeakerIdentificationServiceClient(speakerIDSubKey); List <User> userProfiles = new List <User>(); Console.WriteLine(">\tLoading All Attendees' Voice Profiles From Database..."); string email = ""; foreach (var curEmail in registeredEmails) { try { email = curEmail; DatabaseController.Initialize(dbConnectionStr); User curUser = DatabaseController.LoadUser(curEmail); if (curUser is null) { continue; } curUser.AudioStream = AudioFileSplitter.Resample(curUser.AudioStream, 16000); userProfiles.Add(curUser); Console.WriteLine($"\t-\t[Load Succeeded]\t{email}"); } catch (Exception ex) { Console.Error.WriteLine($"\t-\t[Load Failed]\t{email} Reason: {ex.Message}"); } } return(new RegistrationController(userProfiles, enrollmentClient, enrollmentLocale, apiInterval)); }
public projectOxfordSpeaker() { if (_serviceClient == null) { _serviceClient = new SpeakerIdentificationServiceClient(_subscriptionKey); } }
/// <summary> /// Function which enrolls 2 users for testing purposes. In final system, enrollment will /// be done by users. /// </summary> /// <param name="speakerIDKey"></param> /// <param name="audioFile"></param> /// <returns></returns> public static async Task EnrollUsers(string speakerIDKey, List <User> voiceprints, string enrollmentLocale = "en-us") { /*Create REST client for enrolling users */ SpeakerIdentificationServiceClient enrollmentClient = new SpeakerIdentificationServiceClient(speakerIDKey); /*Create new enrollment profile for each user */ foreach (User curUser in voiceprints) { await Task.Delay(SPEAKER_RECOGNITION_API_INTERVAL); var profileCreateTask = CreateUserProfile(enrollmentClient, curUser, enrollmentLocale); await profileCreateTask; curUser.ProfileGUID = profileCreateTask.Result; } var enrollmentTasks = new List <Task <OperationLocation> >(); /*Start enrollment tasks for all user voiceprints */ for (int i = 0; i < voiceprints.Count; i++) { await Task.Delay(SPEAKER_RECOGNITION_API_INTERVAL); enrollmentTasks.Add(enrollmentClient.EnrollAsync(voiceprints[i].AudioStream, voiceprints[i].ProfileGUID, true)); } /*Async wait for all speaker voiceprints to be submitted in request for enrollment */ await Task.WhenAll(enrollmentTasks.ToArray()); /*Async wait for all enrollments to be in an enrolled state */ await ConfirmEnrollment(enrollmentTasks, enrollmentClient); }
/// <summary> /// Retrieves all the speakers asynchronously and adds them to the list /// </summary> /// <returns>Task to track the status of the asynchronous task.</returns> public async Task UpdateAllSpeakersAsync() { SpeakerIdentificationServiceClient _serviceClient = new SpeakerIdentificationServiceClient("Paste your speaker recognition API key here"); MainWindow window = (MainWindow)Application.Current.MainWindow; try { //window.Log("Retrieving All Profiles..."); Title = String.Format("Retrieving All Profiles..."); Profile[] allProfiles = await _serviceClient.GetProfilesAsync(); //window.Log("All Profiles Retrieved."); Title = String.Format("All Profiles Retrieved."); enrollVoiceList.Clear(); foreach (Profile profile in allProfiles) { AddSpeaker(profile); } //_speakersLoaded = true; } catch (GetProfileException ex) { //window.Log("Error Retrieving Profiles: " + ex.Message); Title = String.Format("Error Retrieving Profiles: " + ex.Message); } catch (Exception ex) { //window.Log("Error: " + ex.Message); Console.WriteLine("Error: " + ex.Message); } }
private void UpdateServiceClient() { Dispatcher.Invoke((Action) delegate { MainWindow window = (MainWindow)Application.Current.MainWindow; this.serviceClient = new SpeakerIdentificationServiceClient(window.ScenarioControl.SubscriptionKey); }); }
private async void Page_Loaded(object sender, RoutedEventArgs e) { if (_speakersLoaded == false) { MainWindow window = (MainWindow)Application.Current.MainWindow; _serviceClient = new SpeakerIdentificationServiceClient(window.ScenarioControl.SubscriptionKey); await UpdateAllSpeakersAsync(); } }
/// <summary> /// Constructor to initialize the Enroll Speakers page /// </summary> public EnrollSpeakersPage() { InitializeComponent(); _speakersListFrame.Navigate(SpeakersListPage.SpeakersList); MainWindow window = (MainWindow)Application.Current.MainWindow; _serviceClient = new SpeakerIdentificationServiceClient(window.ScenarioControl.SubscriptionKey); }
private async void Window_Loaded(object sender, RoutedEventArgs e) { try { speechSynthesizer.SpeakAsync("Hi Visitor, Welcome to the new world of NCR."); await Task.Delay(1000); speechSynthesizer.SpeakAsync("To verify your face. Please put your face clearly infront of the ATM."); SpeakerIdentificationServiceClient _serviceClient = new SpeakerIdentificationServiceClient(speakerAPISubscriptionKey); bool groupExists = false; var faceServiceClient = new FaceServiceClient(faceAPISubscriptionKey); // Test whether the group already exists try { Title = String.Format("Request: Group {0} will be used to build a person database. Checking whether the group exists.", GroupName); Console.WriteLine("Request: Group {0} will be used to build a person database. Checking whether the group exists.", GroupName); await faceServiceClient.GetPersonGroupAsync(GroupName); groupExists = true; Title = String.Format("Response: Group {0} exists.", GroupName); Console.WriteLine("Response: Group {0} exists.", GroupName); } catch (FaceAPIException ex) { if (ex.ErrorCode != "PersonGroupNotFound") { Title = String.Format("Response: {0}. {1}", ex.ErrorCode, ex.ErrorMessage); return; } else { Title = String.Format("Response: Group {0} did not exist previously.", GroupName); } } if (groupExists) { Title = String.Format("Success..... Now your Group {0} ready to use.", GroupName); webcam.Start(); return; } else { Console.WriteLine("Group did not exist. First you need to create a group"); } } catch (Exception ex) { Console.WriteLine("Error : ", ex.Message); } }
private void Page_Loaded(object sender, RoutedEventArgs e) { Dispatcher.Invoke(async delegate { MainWindow window = (MainWindow)Application.Current.MainWindow; _serviceClient = new SpeakerIdentificationServiceClient(window.ScenarioControl.SubscriptionKey); await SpeakersListPage.SpeakersList.UpdateAllSpeakersAsync().ConfigureAwait(false); SpeakersListPage.SpeakersList.SetSingleSelectionMode(); }); }
/// <summary> /// Constructor to initialize the Enroll Speakers page /// </summary> public EnrollSpeakersPage() { InitializeComponent(); _speakersListFrame.Navigate(SpeakersListPage.SpeakersList); MainWindow window = (MainWindow)Application.Current.MainWindow; _serviceClient = new SpeakerIdentificationServiceClient(window.ScenarioControl.SubscriptionKey); }
/// <summary> /// Creates a new user profile for a User and returns the GUID for that profile. /// In the full system, this method should include a check to find out /// if the user is already registered in persistent storage (i.e. database). /// </summary> /// <param name="client"></param> /// <param name="locale"></param> /// <returns></returns> public static async Task <Guid> CreateUserProfile(SpeakerIdentificationServiceClient client, User user, string locale = "en-us") { var taskComplete = new TaskCompletionSource <Guid>(); var profileTask = client.CreateProfileAsync(locale); await profileTask; taskComplete.SetResult(profileTask.Result.ProfileId); return(profileTask.Result.ProfileId); }
/// <summary> /// Creates new identification-streaming recognition client /// </summary> /// <param name="clientId">ID associated with all requests related to this client</param> /// <param name="speakerIds">Speaker ids for recognition</param> /// <param name="stepSize">Frequency of sending requests to the server in seconds. /// If set to 1, the client will send a request to the server for every second received from the user</param> /// <param name="windowSize">Number of seconds sent per request</param> /// <param name="audioFormat">Audio format</param> /// <param name="resultCallBack">Value callback action consisted of identification result, client ID and request ID</param> /// <param name="serviceClient">Client used in identifying the streamed audio file</param> /// <param name="httpRequests"></param> /// <returns>Identification-Streaming and recognition client</returns> public RecognitionClient CreateRecognitionClient(Guid clientId, Guid[] speakerIds, int stepSize, int windowSize, AudioFormat audioFormat, Action <RecognitionResult> resultCallBack, SpeakerIdentificationServiceClient serviceClient, List <Task> httpRequests) { if (speakerIds.Length < 1) { throw new ArgumentException("Speakers count can't be smaller than 1."); } var recognitionClient = new RecognitionClient(clientId, speakerIds, stepSize, windowSize, audioFormat, resultCallBack, serviceClient, httpRequests); return(recognitionClient); }
private async void DeleteEnrollment(Guid speakerid) { try { Console.WriteLine("In"); SpeakerIdentificationServiceClient _serviceClient = new SpeakerIdentificationServiceClient(speakerAPISubscriptionKey); await _serviceClient.ResetEnrollmentsAsync(speakerid); Console.WriteLine("Done"); } catch (Exception ex) { Console.WriteLine("Error : " + ex.Message); } }
/// <summary> /// Ensures that all DiScribe User profiles have matching profiles in the /// the Azure Speaker Recognition service. Creates a valid RegistrationController /// for registering additional users. /// </summary> /// <param name="dbController"></param> /// <param name="userEmails"></param> /// <param name="speakerIDKeySub"></param> /// <param name="enrollmentLocale"></param> /// <param name="apiInterval"></param> private RegistrationController(List <User> userProfiles, SpeakerIdentificationServiceClient enrollmentClient, string enrollmentLocale, int apiInterval) { /*Create REST client for enrolling users */ EnrollmentClient = enrollmentClient; EnrollmentLocale = enrollmentLocale; UserProfiles = userProfiles; if (userProfiles.Count > 0) { /*Ensure that all DiScribe users have profiles enrolled with the Azure Speaker Recognition endpoint */ EnrollVoiceProfiles().Wait(); } }
private void EnrollSpeaker(Stream stream) { // Reset pointer stream.Seek(0, SeekOrigin.Begin); SpeakerIdentificationServiceClient speakerIDClient = new SpeakerIdentificationServiceClient("c6b005dcf13e45b6a91485d38763277b"); //Creating Speaker Profile... CreateProfileResponse creationResponse = speakerIDClient.CreateProfileAsync("en-US").Result; //Speaker Profile Created. //Retrieving The Created Profile... Profile profile = speakerIDClient.GetProfileAsync(creationResponse.ProfileId).Result; //Speaker Profile Retrieved." //Enrolling Speaker OperationLocation processPollingLocation = speakerIDClient.EnrollAsync(stream, profile.ProfileId, false).Result; EnrollmentOperation enrollmentResult; int numOfRetries = 10; TimeSpan timeBetweenRetries = TimeSpan.FromSeconds(5.0); while (numOfRetries > 0) { Task.Delay(timeBetweenRetries); enrollmentResult = speakerIDClient.CheckEnrollmentStatusAsync(processPollingLocation).Result; if (enrollmentResult.Status == Status.Succeeded) { break; } else if (enrollmentResult.Status == Status.Failed) { throw new EnrollmentException(enrollmentResult.Message); } numOfRetries--; } if (numOfRetries <= 0) { throw new EnrollmentException("Enrollment operation timeout."); } //Enrollment Done. // Store profile in memory cache ObjectCache memCache = MemoryCache.Default; var profiles = memCache.Get("SpeakerProfiles") != null?memCache.Get("SpeakerProfiles") as List <Profile> : new List <Profile>(); memCache.Remove("SpeakerProfiles"); memCache.Add("SpeakerProfiles", profiles, DateTimeOffset.UtcNow.AddHours(2)); }
private async void identifySpeaker(string _selectedFile) { SpeakerIdentificationServiceClient _serviceClient; OperationLocation processPollingLocation; _serviceClient = new SpeakerIdentificationServiceClient("e5404f463d1242ad8ce61c5422afc4bf"); Profile[] allProfiles = await _serviceClient.GetProfilesAsync(); Guid[] testProfileIds = new Guid[allProfiles.Length]; for (int i = 0; i < testProfileIds.Length; i++) { testProfileIds[i] = allProfiles[i].ProfileId; } using (Stream audioStream = File.OpenRead(_selectedFile)) { _selectedFile = ""; processPollingLocation = await _serviceClient.IdentifyAsync(audioStream, testProfileIds, true); } IdentificationOperation identificationResponse = null; int numOfRetries = 10; TimeSpan timeBetweenRetries = TimeSpan.FromSeconds(5.0); while (numOfRetries > 0) { await Task.Delay(timeBetweenRetries); identificationResponse = await _serviceClient.CheckIdentificationStatusAsync(processPollingLocation); if (identificationResponse.Status == Status.Succeeded) { writeUser("User: "******"User: unknown"); break; } numOfRetries--; } if (numOfRetries <= 0) { writeUser("User: unknown"); } }
public async Task <IdentificationOperation> RecognizeSpeaker(string recordingFileName) { var srsc = new SpeakerIdentificationServiceClient(Settings.Instance.SpeakerRecognitionApiKeyValue); var profiles = await srsc.GetProfilesAsync(); //First we choose set of profiles we want to try match speaker of narration with Guid[] testProfileIds = new Guid[profiles.Length]; for (int i = 0; i < testProfileIds.Length; i++) { testProfileIds[i] = profiles[i].ProfileId; } //IdentifyAsync is longer operation so we need to implement result polling mechanism OperationLocation processPollingLocation; using (Stream audioStream = File.OpenRead(recordingFileName)) { processPollingLocation = await srsc.IdentifyAsync(audioStream, testProfileIds, true); } IdentificationOperation identificationResponse = null; int numOfRetries = 10; TimeSpan timeBetweenRetries = TimeSpan.FromSeconds(5.0); // while (numOfRetries > 0) { await Task.Delay(timeBetweenRetries); identificationResponse = await srsc.CheckIdentificationStatusAsync(processPollingLocation); if (identificationResponse.Status == Microsoft.ProjectOxford.SpeakerRecognition.Contract.Identification.Status.Succeeded) { break; } else if (identificationResponse.Status == Microsoft.ProjectOxford.SpeakerRecognition.Contract.Identification.Status.Failed) { throw new IdentificationException(identificationResponse.Message); } numOfRetries--; } if (numOfRetries <= 0) { throw new IdentificationException("Identification operation timeout."); } return(identificationResponse); }
/// <summary> /// Identify a stream of audio /// </summary> /// <param name="stream">Audio buffer to be recognized</param> /// <param name="serviceClient">Client used in identifying the streamed audio wave</param> /// <param name="clientId">Client ID</param> /// <param name="requestId">Request ID</param> public async Task IdentifyStreamAsync(Stream stream, SpeakerIdentificationServiceClient serviceClient, Guid clientId, int requestId) { try { OperationLocation processPollingLocation; processPollingLocation = await serviceClient.IdentifyAsync(stream, this.speakerIds, forceShortAudio : true).ConfigureAwait(false); int numOfRetries = int.Parse(_configuration["NumberOfPollingRetries"] ?? "5"); TimeSpan timeBetweenRetries = TimeSpan.FromSeconds(int.Parse(_configuration["TimeSpanBetweenPollingRetries"] ?? "1")); while (numOfRetries > 0) { await Task.Delay(timeBetweenRetries); var identificationResponse = await serviceClient.CheckIdentificationStatusAsync(processPollingLocation); if (identificationResponse.Status == Status.Succeeded) { var result = new RecognitionResult(identificationResponse.ProcessingResult, clientId, requestId); await resultCallback(result); break; } else if (identificationResponse.Status == Status.Failed) { var failureResult = new RecognitionResult(false, identificationResponse.Message, requestId); await resultCallback(failureResult); return; } numOfRetries--; } if (numOfRetries <= 0) { var failureResult = new RecognitionResult(false, "Request timeout.", requestId); await resultCallback(failureResult); return; } } catch (Exception ex) { var result = new RecognitionResult(false, ex.Message, requestId); await resultCallback(result); } }
public MainPage() { this.InitializeComponent(); CaptureMedia = null; _etimer = new DispatcherTimer(); _etimer.Interval = new TimeSpan(0, 0, 60); _etimer.Tick += EnrollmentTime_Over; _itimer = new DispatcherTimer(); _itimer.Interval = new TimeSpan(0, 0, 60); _itimer.Tick += IdentificationTime_Over; _subscriptionKey = "put_your_subscription_key_here"; _serviceClient = new SpeakerIdentificationServiceClient(_subscriptionKey); }
/// <summary> /// Identify a stream of audio /// </summary> /// <param name="stream">Audio buffer to be recognized</param> /// <param name="serviceClient">Client used in identifying the streamed audio wave</param> /// <param name="clientId">Client ID</param> /// <param name="requestId">Request ID</param> public async Task IdentifyStreamAsync(Stream stream, SpeakerIdentificationServiceClient serviceClient, Guid clientId, int requestId) { try { OperationLocation processPollingLocation = await serviceClient.IdentifyAsync(stream, _speakerIds, true).ConfigureAwait(false); var numberOfPollingRetries = 3; while (numberOfPollingRetries > 0) { await Task.Delay(TimeSpan.FromSeconds(TimeSpanBetweenPollingRetries)); IdentificationOperation identificationResponse = await serviceClient.CheckIdentificationStatusAsync(processPollingLocation); if (identificationResponse.Status == Status.Succeeded) { var result = new RecognitionResult(identificationResponse.ProcessingResult, clientId, requestId); _resultCallback(result); break; } if (identificationResponse.Status == Status.Failed) { var failureResult = new RecognitionResult(false, identificationResponse.Message, requestId); _resultCallback(failureResult); return; } numberOfPollingRetries--; } if (numberOfPollingRetries <= 0) { var failureResult = new RecognitionResult(false, "Request timeout.", requestId); _resultCallback(failureResult); } } catch (Exception ex) { var result = new RecognitionResult(false, ex.Message, requestId); _resultCallback(result); } }
public async void voicetoprofile(byte[] data, string filename) { try { SpeakerIdentificationServiceClient _serviceClient = new SpeakerIdentificationServiceClient("xxxxxxxxxxxxxxxxxxxxxxx"); CreateProfileResponse creationResponse = await _serviceClient.CreateProfileAsync(name.Text.ToString()); Profile profile = await _serviceClient.GetProfileAsync(creationResponse.ProfileId); //SpeakersListPage.SpeakersList.AddSpeaker(profile); OperationLocation processPollingLocation; using (Stream audioStream = new MemoryStream(data)) { //_selectedFile = ""; processPollingLocation = await _serviceClient.EnrollAsync(audioStream, profile.ProfileId); } EnrollmentOperation enrollmentResult; int numOfRetries = 10; TimeSpan timeBetweenRetries = TimeSpan.FromSeconds(5.0); while (numOfRetries > 0) { await Task.Delay(timeBetweenRetries); enrollmentResult = await _serviceClient.CheckEnrollmentStatusAsync(processPollingLocation); if (enrollmentResult.Status == Status.Succeeded) { break; } else if (enrollmentResult.Status == Status.Failed) { throw new EnrollmentException(enrollmentResult.Message); } numOfRetries--; } if (numOfRetries <= 0) { throw new EnrollmentException("Enrollment operation timeout."); } } catch (Exception ex) { throw ex; } }
private void RecognizeSpeaker(Stream stream) { // Reset pointer stream.Seek(0, SeekOrigin.Begin); SpeakerIdentificationServiceClient speakerIDClient = new SpeakerIdentificationServiceClient("c6b005dcf13e45b6a91485d38763277b"); // Fetch existing profiles ObjectCache memCache = MemoryCache.Default; var profiles = memCache.Get("SpeakerProfiles") != null?memCache.Get("SpeakerProfiles") as List <Profile> : new List <Profile>(); List <Guid> testProfileIds = (from prof in profiles select prof.ProfileId).ToList(); OperationLocation processPollingLocation = speakerIDClient.IdentifyAsync(stream, testProfileIds.ToArray(), false).Result; IdentificationOperation identificationResponse = null; int numOfRetries = 10; TimeSpan timeBetweenRetries = TimeSpan.FromSeconds(5.0); while (numOfRetries > 0) { Task.Delay(timeBetweenRetries); identificationResponse = speakerIDClient.CheckIdentificationStatusAsync(processPollingLocation).Result; if (identificationResponse.Status == Status.Succeeded) { break; } else if (identificationResponse.Status == Status.Failed) { throw new IdentificationException(identificationResponse.Message); } numOfRetries--; } if (numOfRetries <= 0) { throw new IdentificationException("Identification operation timeout."); } //"Identification Done." //Values now accessible!! var _identificationResultTxtBlk = identificationResponse.ProcessingResult.IdentifiedProfileId.ToString(); var _identificationConfidenceTxtBlk = identificationResponse.ProcessingResult.Confidence.ToString(); }
//Delete the speaker profiles private async void Delete(Profile speaker) { try { SpeakerIdentificationServiceClient _serviceClient = new SpeakerIdentificationServiceClient(speakerAPISubscriptionKey); Console.WriteLine("Deleting profile..."); Console.WriteLine("Another Id is : {0}", Guid.Parse(speaker.ProfileId.ToString())); await _serviceClient.DeleteProfileAsync(Guid.Parse(speaker.ProfileId.ToString())); Console.WriteLine("Deleted..."); GC.Collect(); } catch (Exception ex) { Console.WriteLine("Error : " + ex.Message); GC.Collect(); } }
/// <summary> /// Initializes a new instance of the RecognitionClient class. /// </summary> /// <param name="clientId">ID associated with all requests related to this client</param> /// <param name="speakerIds">Speaker IDs for identification</param> /// <param name="stepSize">Step size in seconds</param> /// <param name="windowSize">Number of seconds sent per request</param> /// <param name="audioFormat">Audio format</param> /// <param name="resultCallback">Value callback action consisted of identification result, client ID and request ID</param> /// <param name="serviceClient">Client used in identifying the streamed audio file</param> internal RecognitionClient(Guid clientId, Guid[] speakerIds, int stepSize, int windowSize, AudioFormat audioFormat, Action <RecognitionResult> resultCallback, SpeakerIdentificationServiceClient serviceClient) { this.ClientId = clientId; this.SpeakerIds = speakerIds; this.StepSize = stepSize; this.WindowSize = windowSize; this.requestID = 0; this.AudioFormat = audioFormat; this.audioFormatHandler = new AudioFormatHandler(audioFormat); this.serviceClient = serviceClient; this.audioProcessor = new AudioProcessor(this.WindowSize, this.StepSize, this.audioFormatHandler); this.idClient = new IdentificationClient(this.SpeakerIds, resultCallback); this.requestingTaskCancelletionTokenSource = new CancellationTokenSource(); this.requestingTask = Task.Run(async() => { await SendingRequestsTask(requestingTaskCancelletionTokenSource.Token).ConfigureAwait(false); }); }
//This Creates a new speaker in the database //INPUT: //OUTPUT: functionResult public async Task <functionResult> addSpeaker() { functionResult result = new functionResult(); //If the _serviceClient is null, create it with the subsciption key stored if (_serviceClient == null) { _serviceClient = new SpeakerIdentificationServiceClient(_subscriptionKey); } try { //Create the new profile and assign it to the Profile Tag CreateProfileResponse creationResponse = await _serviceClient.CreateProfileAsync("en-us"); Profile profile = await _serviceClient.GetProfileAsync(creationResponse.ProfileId); result.Result = true; result.Message = profile.ProfileId.ToString(); } catch (CreateProfileException ex) { result.Result = false; result.Message = "Error Creating The Profile: " + ex.Message.ToString(); } catch (GetProfileException ex) { result.Result = false; result.Message = "Error Retrieving The Profile: " + ex.Message.ToString(); } catch (Exception ex) { result.Result = false; result.Message = "Error: " + ex.Message.ToString(); } return(result); }
/// <summary> /// Initializes a new instance of the RecognitionClient class. /// </summary> /// <param name="clientId">ID associated with all requests related to this client</param> /// <param name="speakerIds">Speaker IDs for identification</param> /// <param name="stepSize">Step size in seconds</param> /// <param name="windowSize">Number of seconds sent per request</param> /// <param name="audioFormat">Audio format</param> /// <param name="resultCallback">Value callback action consisted of identification result, client ID and request ID</param> /// <param name="serviceClient">Client used in identifying the streamed audio file</param> /// <param name="httpRequests"></param> internal RecognitionClient(Guid clientId, Guid[] speakerIds, int stepSize, int windowSize, AudioFormat audioFormat, Action <RecognitionResult> resultCallback, SpeakerIdentificationServiceClient serviceClient, List <Task> httpRequests) { ClientId = clientId; SpeakerIds = speakerIds; StepSize = stepSize; WindowSize = windowSize; _requestId = 0; AudioFormat = audioFormat; var audioFormatHandler = new AudioFormatHandler(audioFormat); _serviceClient = serviceClient; _httpRequests = httpRequests; _audioProcessor = new AudioProcessor(WindowSize, StepSize, audioFormatHandler); _idClient = new IdentificationClient(SpeakerIds, resultCallback); _requestingTaskCancelletionTokenSource = new CancellationTokenSource(); _requestingTask = Task.Run(async() => { await SendingRequestsTask(_requestingTaskCancelletionTokenSource.Token).ConfigureAwait(false); }); }
private async void voiceIdentification() { try { _identificationResultStckPnl.Visibility = Visibility.Hidden; if (_waveIn != null) { _waveIn.StopRecording(); } TimeSpan timeBetweenSaveAndIdentify = TimeSpan.FromSeconds(5.0); await Task.Delay(timeBetweenSaveAndIdentify); SpeakerIdentificationServiceClient _serviceClient = new SpeakerIdentificationServiceClient(speakerAPISubscriptionKey); List <Guid> list = new List <Guid>(); Microsoft.ProjectOxford.SpeakerRecognition.Contract.Identification.Profile[] allProfiles = await _serviceClient.GetProfilesAsync(); int itemsCount = 0; foreach (Microsoft.ProjectOxford.SpeakerRecognition.Contract.Identification.Profile profile in allProfiles) { list.Add(profile.ProfileId); itemsCount++; } Guid[] selectedIds = new Guid[itemsCount]; for (int i = 0; i < itemsCount; i++) { selectedIds[i] = list[i]; } if (_selectedFile == "") { throw new Exception("No File Selected."); } speechSynthesizer.SpeakAsync("Please wait we are verifying your voice."); Title = String.Format("Identifying File..."); OperationLocation processPollingLocation; Console.WriteLine("Selected file is : {0}", _selectedFile); using (Stream audioStream = File.OpenRead(_selectedFile)) { //_selectedFile = ""; Console.WriteLine("Start"); Console.WriteLine("Audio File is : {0}", audioStream); processPollingLocation = await _serviceClient.IdentifyAsync(audioStream, selectedIds, true); Console.WriteLine("ProcesPolling Location : {0}", processPollingLocation); Console.WriteLine("Done"); } IdentificationOperation identificationResponse = null; int numOfRetries = 10; TimeSpan timeBetweenRetries = TimeSpan.FromSeconds(5.0); while (numOfRetries > 0) { await Task.Delay(timeBetweenRetries); identificationResponse = await _serviceClient.CheckIdentificationStatusAsync(processPollingLocation); Console.WriteLine("Response is : {0}", identificationResponse); if (identificationResponse.Status == Microsoft.ProjectOxford.SpeakerRecognition.Contract.Identification.Status.Succeeded) { break; } else if (identificationResponse.Status == Microsoft.ProjectOxford.SpeakerRecognition.Contract.Identification.Status.Failed) { Console.WriteLine("In"); speechSynthesizer.SpeakAsync("Failed. Please make sure your voice is registered."); throw new IdentificationException(identificationResponse.Message); } numOfRetries--; } if (numOfRetries <= 0) { throw new IdentificationException("Identification operation timeout."); } Title = String.Format("Identification Done."); conn.Open(); SqlCommand cmd = conn.CreateCommand(); cmd.CommandType = System.Data.CommandType.Text; cmd.CommandText = "Select AccountNo, CustomerName From AccountDetails where AccountNo = (Select AccountNo From AuthenticationDetails where VoiceId = '" + identificationResponse.ProcessingResult.IdentifiedProfileId.ToString() + "')"; dr = cmd.ExecuteReader(); if (dr.HasRows) { while (dr.Read()) { accountNo = dr.GetInt32(0); voiceIdentifiedUserName = dr[1].ToString(); Console.WriteLine("Account No is : " + accountNo); Console.WriteLine("Identified as :" + voiceIdentifiedUserName); _identificationResultTxtBlk.Text = voiceIdentifiedUserName; } } dr.Close(); conn.Close(); if (_identificationResultTxtBlk.Text == "") { _identificationResultTxtBlk.Text = identificationResponse.ProcessingResult.IdentifiedProfileId.ToString(); speechSynthesizer.SpeakAsync("Sorry we have not found your data."); return; } else { if (faceIdentifiedUserName == voiceIdentifiedUserName) { Console.WriteLine("Selected file is : {0}", _selectedFile); Stream stream = File.OpenRead(_selectedFile); verifySpeaker(stream); //speechSynthesizer.SpeakAsync("Hi."); //speechSynthesizer.SpeakAsync(_identificationResultTxtBlk.Text.ToString()); //speechSynthesizer.SpeakAsync("Thanks to verify your face and voice."); //speechSynthesizer.SpeakAsync("Now you can do your transactions"); } else { speechSynthesizer.SpeakAsync("Sorry we have found different voice identity from your face identity."); return; } _identificationConfidenceTxtBlk.Text = identificationResponse.ProcessingResult.Confidence.ToString(); _identificationResultStckPnl.Visibility = Visibility.Visible; GC.Collect(); } } catch (IdentificationException ex) { Console.WriteLine("Speaker Identification Error : " + ex.Message); GC.Collect(); } catch (Exception ex) { Console.WriteLine("Error : " + ex.Message); GC.Collect(); } }
private async void Page_Loaded(object sender, RoutedEventArgs e) { if (_speakersLoaded == false) { MainWindow window = (MainWindow)Application.Current.MainWindow; _serviceClient = new SpeakerIdentificationServiceClient(window.ScenarioControl.SubscriptionKey); await UpdateAllSpeakersAsync(); } }