Example #1
0
        private async void identifySpeaker(string _selectedFile)
        {
            SpeakerIdentificationServiceClient _serviceClient;
            OperationLocation processPollingLocation;

            _serviceClient = new SpeakerIdentificationServiceClient("e5404f463d1242ad8ce61c5422afc4bf");


            Profile[] allProfiles = await _serviceClient.GetProfilesAsync();

            Guid[] testProfileIds = new Guid[allProfiles.Length];
            for (int i = 0; i < testProfileIds.Length; i++)
            {
                testProfileIds[i] = allProfiles[i].ProfileId;
            }
            using (Stream audioStream = File.OpenRead(_selectedFile))
            {
                _selectedFile          = "";
                processPollingLocation = await _serviceClient.IdentifyAsync(audioStream, testProfileIds, true);
            }

            IdentificationOperation identificationResponse = null;
            int      numOfRetries       = 10;
            TimeSpan timeBetweenRetries = TimeSpan.FromSeconds(5.0);

            while (numOfRetries > 0)
            {
                await Task.Delay(timeBetweenRetries);

                identificationResponse = await _serviceClient.CheckIdentificationStatusAsync(processPollingLocation);

                if (identificationResponse.Status == Status.Succeeded)
                {
                    writeUser("User: "******"User: unknown");
                    break;
                }
                numOfRetries--;
            }
            if (numOfRetries <= 0)
            {
                writeUser("User: unknown");
            }
        }
Example #2
0
        /// <summary>
        /// Identify a stream of audio
        /// </summary>
        /// <param name="stream">Audio buffer to be recognized</param>
        /// <param name="serviceClient">Client used in identifying the streamed audio wave</param>
        /// <param name="clientId">Client ID</param>
        /// <param name="requestId">Request ID</param>
        public async Task IdentifyStreamAsync(Stream stream, SpeakerIdentificationServiceClient serviceClient, Guid clientId, int requestId)
        {
            try
            {
                OperationLocation processPollingLocation;
                processPollingLocation = await serviceClient.IdentifyAsync(stream, this.speakerIds, forceShortAudio : true).ConfigureAwait(false);

                int      numOfRetries       = int.Parse(_configuration["NumberOfPollingRetries"] ?? "5");
                TimeSpan timeBetweenRetries = TimeSpan.FromSeconds(int.Parse(_configuration["TimeSpanBetweenPollingRetries"] ?? "1"));
                while (numOfRetries > 0)
                {
                    await Task.Delay(timeBetweenRetries);

                    var identificationResponse = await serviceClient.CheckIdentificationStatusAsync(processPollingLocation);

                    if (identificationResponse.Status == Status.Succeeded)
                    {
                        var result = new RecognitionResult(identificationResponse.ProcessingResult, clientId, requestId);
                        await resultCallback(result);

                        break;
                    }
                    else if (identificationResponse.Status == Status.Failed)
                    {
                        var failureResult = new RecognitionResult(false, identificationResponse.Message, requestId);
                        await resultCallback(failureResult);

                        return;
                    }

                    numOfRetries--;
                }

                if (numOfRetries <= 0)
                {
                    var failureResult = new RecognitionResult(false, "Request timeout.", requestId);
                    await resultCallback(failureResult);

                    return;
                }
            }
            catch (Exception ex)
            {
                var result = new RecognitionResult(false, ex.Message, requestId);
                await resultCallback(result);
            }
        }
Example #3
0
        public async Task <IdentificationOperation> RecognizeSpeaker(string recordingFileName)
        {
            var srsc     = new SpeakerIdentificationServiceClient(Settings.Instance.SpeakerRecognitionApiKeyValue);
            var profiles = await srsc.GetProfilesAsync();

            //First we choose set of profiles we want to try match speaker of narration with
            Guid[] testProfileIds = new Guid[profiles.Length];
            for (int i = 0; i < testProfileIds.Length; i++)
            {
                testProfileIds[i] = profiles[i].ProfileId;
            }

            //IdentifyAsync is longer operation so we need to implement result polling mechanism
            OperationLocation processPollingLocation;

            using (Stream audioStream = File.OpenRead(recordingFileName))
            {
                processPollingLocation = await srsc.IdentifyAsync(audioStream, testProfileIds, true);
            }

            IdentificationOperation identificationResponse = null;
            int      numOfRetries       = 10;
            TimeSpan timeBetweenRetries = TimeSpan.FromSeconds(5.0);

            //
            while (numOfRetries > 0)
            {
                await Task.Delay(timeBetweenRetries);

                identificationResponse = await srsc.CheckIdentificationStatusAsync(processPollingLocation);

                if (identificationResponse.Status == Microsoft.ProjectOxford.SpeakerRecognition.Contract.Identification.Status.Succeeded)
                {
                    break;
                }
                else if (identificationResponse.Status == Microsoft.ProjectOxford.SpeakerRecognition.Contract.Identification.Status.Failed)
                {
                    throw new IdentificationException(identificationResponse.Message);
                }
                numOfRetries--;
            }
            if (numOfRetries <= 0)
            {
                throw new IdentificationException("Identification operation timeout.");
            }
            return(identificationResponse);
        }
        /// <summary>
        ///     Identify a stream of audio
        /// </summary>
        /// <param name="stream">Audio buffer to be recognized</param>
        /// <param name="serviceClient">Client used in identifying the streamed audio wave</param>
        /// <param name="clientId">Client ID</param>
        /// <param name="requestId">Request ID</param>
        public async Task IdentifyStreamAsync(Stream stream, SpeakerIdentificationServiceClient serviceClient,
                                              Guid clientId, int requestId)
        {
            try
            {
                OperationLocation processPollingLocation = await serviceClient.IdentifyAsync(stream, _speakerIds, true).ConfigureAwait(false);

                var numberOfPollingRetries = 3;
                while (numberOfPollingRetries > 0)
                {
                    await Task.Delay(TimeSpan.FromSeconds(TimeSpanBetweenPollingRetries));

                    IdentificationOperation identificationResponse = await serviceClient.CheckIdentificationStatusAsync(processPollingLocation);

                    if (identificationResponse.Status == Status.Succeeded)
                    {
                        var result = new RecognitionResult(identificationResponse.ProcessingResult, clientId,
                                                           requestId);
                        _resultCallback(result);
                        break;
                    }

                    if (identificationResponse.Status == Status.Failed)
                    {
                        var failureResult = new RecognitionResult(false, identificationResponse.Message, requestId);

                        _resultCallback(failureResult);
                        return;
                    }

                    numberOfPollingRetries--;
                }


                if (numberOfPollingRetries <= 0)
                {
                    var failureResult = new RecognitionResult(false, "Request timeout.", requestId);
                    _resultCallback(failureResult);
                }
            }
            catch (Exception ex)
            {
                var result = new RecognitionResult(false, ex.Message, requestId);
                _resultCallback(result);
            }
        }
Example #5
0
        private void RecognizeSpeaker(Stream stream)
        {
            // Reset pointer
            stream.Seek(0, SeekOrigin.Begin);

            SpeakerIdentificationServiceClient speakerIDClient = new SpeakerIdentificationServiceClient("c6b005dcf13e45b6a91485d38763277b");

            // Fetch existing profiles
            ObjectCache memCache = MemoryCache.Default;
            var         profiles = memCache.Get("SpeakerProfiles") != null?memCache.Get("SpeakerProfiles") as List <Profile> : new List <Profile>();

            List <Guid> testProfileIds = (from prof in profiles select prof.ProfileId).ToList();

            OperationLocation processPollingLocation = speakerIDClient.IdentifyAsync(stream, testProfileIds.ToArray(), false).Result;

            IdentificationOperation identificationResponse = null;
            int      numOfRetries       = 10;
            TimeSpan timeBetweenRetries = TimeSpan.FromSeconds(5.0);

            while (numOfRetries > 0)
            {
                Task.Delay(timeBetweenRetries);
                identificationResponse = speakerIDClient.CheckIdentificationStatusAsync(processPollingLocation).Result;

                if (identificationResponse.Status == Status.Succeeded)
                {
                    break;
                }
                else if (identificationResponse.Status == Status.Failed)
                {
                    throw new IdentificationException(identificationResponse.Message);
                }
                numOfRetries--;
            }
            if (numOfRetries <= 0)
            {
                throw new IdentificationException("Identification operation timeout.");
            }

            //"Identification Done."

            //Values now accessible!!
            var _identificationResultTxtBlk     = identificationResponse.ProcessingResult.IdentifiedProfileId.ToString();
            var _identificationConfidenceTxtBlk = identificationResponse.ProcessingResult.Confidence.ToString();
        }
        private async void voiceIdentification()
        {
            try
            {
                _identificationResultStckPnl.Visibility = Visibility.Hidden;
                if (_waveIn != null)
                {
                    _waveIn.StopRecording();
                }

                TimeSpan timeBetweenSaveAndIdentify = TimeSpan.FromSeconds(5.0);
                await Task.Delay(timeBetweenSaveAndIdentify);

                SpeakerIdentificationServiceClient _serviceClient = new SpeakerIdentificationServiceClient(speakerAPISubscriptionKey);

                List <Guid> list = new List <Guid>();
                Microsoft.ProjectOxford.SpeakerRecognition.Contract.Identification.Profile[] allProfiles = await _serviceClient.GetProfilesAsync();

                int itemsCount = 0;
                foreach (Microsoft.ProjectOxford.SpeakerRecognition.Contract.Identification.Profile profile in allProfiles)
                {
                    list.Add(profile.ProfileId);
                    itemsCount++;
                }
                Guid[] selectedIds = new Guid[itemsCount];
                for (int i = 0; i < itemsCount; i++)
                {
                    selectedIds[i] = list[i];
                }
                if (_selectedFile == "")
                {
                    throw new Exception("No File Selected.");
                }

                speechSynthesizer.SpeakAsync("Please wait we are verifying your voice.");
                Title = String.Format("Identifying File...");
                OperationLocation processPollingLocation;
                Console.WriteLine("Selected file is : {0}", _selectedFile);
                using (Stream audioStream = File.OpenRead(_selectedFile))
                {
                    //_selectedFile = "";
                    Console.WriteLine("Start");
                    Console.WriteLine("Audio File is : {0}", audioStream);
                    processPollingLocation = await _serviceClient.IdentifyAsync(audioStream, selectedIds, true);

                    Console.WriteLine("ProcesPolling Location : {0}", processPollingLocation);
                    Console.WriteLine("Done");
                }

                IdentificationOperation identificationResponse = null;
                int      numOfRetries       = 10;
                TimeSpan timeBetweenRetries = TimeSpan.FromSeconds(5.0);
                while (numOfRetries > 0)
                {
                    await Task.Delay(timeBetweenRetries);

                    identificationResponse = await _serviceClient.CheckIdentificationStatusAsync(processPollingLocation);

                    Console.WriteLine("Response is : {0}", identificationResponse);

                    if (identificationResponse.Status == Microsoft.ProjectOxford.SpeakerRecognition.Contract.Identification.Status.Succeeded)
                    {
                        break;
                    }
                    else if (identificationResponse.Status == Microsoft.ProjectOxford.SpeakerRecognition.Contract.Identification.Status.Failed)
                    {
                        Console.WriteLine("In");
                        speechSynthesizer.SpeakAsync("Failed. Please make sure your voice is registered.");
                        throw new IdentificationException(identificationResponse.Message);
                    }
                    numOfRetries--;
                }
                if (numOfRetries <= 0)
                {
                    throw new IdentificationException("Identification operation timeout.");
                }

                Title = String.Format("Identification Done.");

                conn.Open();
                SqlCommand cmd = conn.CreateCommand();
                cmd.CommandType = System.Data.CommandType.Text;
                cmd.CommandText = "Select AccountNo, CustomerName From AccountDetails where AccountNo = (Select AccountNo From AuthenticationDetails where VoiceId = '" + identificationResponse.ProcessingResult.IdentifiedProfileId.ToString() + "')";
                dr = cmd.ExecuteReader();
                if (dr.HasRows)
                {
                    while (dr.Read())
                    {
                        accountNo = dr.GetInt32(0);
                        voiceIdentifiedUserName = dr[1].ToString();
                        Console.WriteLine("Account No is : " + accountNo);
                        Console.WriteLine("Identified as :" + voiceIdentifiedUserName);
                        _identificationResultTxtBlk.Text = voiceIdentifiedUserName;
                    }
                }
                dr.Close();
                conn.Close();
                if (_identificationResultTxtBlk.Text == "")
                {
                    _identificationResultTxtBlk.Text = identificationResponse.ProcessingResult.IdentifiedProfileId.ToString();
                    speechSynthesizer.SpeakAsync("Sorry we have not found your data.");
                    return;
                }
                else
                {
                    if (faceIdentifiedUserName == voiceIdentifiedUserName)
                    {
                        Console.WriteLine("Selected file is : {0}", _selectedFile);

                        Stream stream = File.OpenRead(_selectedFile);
                        verifySpeaker(stream);
                        //speechSynthesizer.SpeakAsync("Hi.");
                        //speechSynthesizer.SpeakAsync(_identificationResultTxtBlk.Text.ToString());
                        //speechSynthesizer.SpeakAsync("Thanks to verify your face and voice.");
                        //speechSynthesizer.SpeakAsync("Now you can do your transactions");
                    }
                    else
                    {
                        speechSynthesizer.SpeakAsync("Sorry we have found different voice identity from your face identity.");
                        return;
                    }
                    _identificationConfidenceTxtBlk.Text    = identificationResponse.ProcessingResult.Confidence.ToString();
                    _identificationResultStckPnl.Visibility = Visibility.Visible;
                    GC.Collect();
                }
            }
            catch (IdentificationException ex)
            {
                Console.WriteLine("Speaker Identification Error : " + ex.Message);
                GC.Collect();
            }
            catch (Exception ex)
            {
                Console.WriteLine("Error : " + ex.Message);
                GC.Collect();
            }
        }
Example #7
0
        async Task finishIdentification()
        {
            if (btnIdentify.IsEnabled == false)
            {
                return;                                 // if user clicks and then comes timer event
            }
            btnIdentify.Content   = "Start voice identification";
            btnIdentify.IsEnabled = false;
            await CaptureMedia.StopRecordAsync();

            Stream str = AudioStream.AsStream();

            str.Seek(0, SeekOrigin.Begin);

            Profile[] selectedProfiles = new Profile[lbProfiles.Items.Count];;

            Guid[] testProfileIds = new Guid[selectedProfiles.Length];
            for (int i = 0; i < lbProfiles.Items.Count; i++)
            {
                testProfileIds[i] = Guid.Parse((lbProfiles.Items[i] as ListBoxItem).Content.ToString());
            }

            OperationLocation processPollingLocation;

            try
            {
                processPollingLocation = await _serviceClient.IdentifyAsync(str, testProfileIds);
            }
            catch (IdentificationException vx)
            {
                txtInfo.Text = vx.Message;
                CleanAfter();
                return;
            }
            catch (Exception vx)
            {
                txtInfo.Text = vx.Message;
                CleanAfter();
                return;
            }

            IdentificationOperation identificationResponse = null;
            int      numOfRetries       = 10;
            TimeSpan timeBetweenRetries = TimeSpan.FromSeconds(5.0);

            while (numOfRetries > 0)
            {
                await Task.Delay(timeBetweenRetries);

                identificationResponse = await _serviceClient.CheckIdentificationStatusAsync(processPollingLocation);

                if (identificationResponse.Status == Status.Succeeded)
                {
                    break;
                }
                else if (identificationResponse.Status == Status.Failed)
                {
                    txtInfo.Text = identificationResponse.Message;
                    CleanAfter();
                    return;
                }
                numOfRetries--;
            }

            if (numOfRetries <= 0)
            {
                txtInfo.Text = "Identification operation timeout.";
            }
            else
            {
                txtInfo.Text = identificationResponse.ProcessingResult.IdentifiedProfileId.ToString();
                txtInfo.Text = txtInfo.Text + Environment.NewLine + identificationResponse.ProcessingResult.Confidence.ToString();
            }

            CleanAfter();
        }
Example #8
0
        private async void identify(string ou)
        {
            MainWindow window = (MainWindow)Application.Current.MainWindow;

            try
            {
                window.Log("Identifying File...");
                Profile[] selectedProfiles = SpeakersListPage.SpeakersList.GetSelectedProfiles();
                Guid[]    testProfileIds   = new Guid[selectedProfiles.Length];
                for (int i = 0; i < testProfileIds.Length; i++)
                {
                    testProfileIds[i] = selectedProfiles[i].ProfileId;
                }

                List <string> list = new List <string>();
                for (int j = 0; j < 15; j++)
                {
                    list.Add(_selectedFile);
                }
                //list.Add(_selectedFile);
                string _selectedFile1 = _selectedFile.Substring(0, _selectedFile.Length - 4) + "1" + ".wav";
                Concatenate(_selectedFile1, list);

                OperationLocation processPollingLocation;
                using (Stream audioStream = File.OpenRead(_selectedFile1))
                {
                    //for (int i = 1; i <= 2;i++ )
                    //    audioStream.CopyTo(audioStream);
                    //_selectedFile = "";
                    processPollingLocation = await _serviceClient.IdentifyAsync(audioStream, testProfileIds);
                }

                IdentificationOperation identificationResponse = null;
                int      numOfRetries       = 15;
                TimeSpan timeBetweenRetries = TimeSpan.FromSeconds(5.0);
                while (numOfRetries > 0)
                {
                    await Task.Delay(timeBetweenRetries);

                    identificationResponse = await _serviceClient.CheckIdentificationStatusAsync(processPollingLocation);

                    if (identificationResponse.Status == Status.Succeeded)
                    {
                        break;
                    }
                    else if (identificationResponse.Status == Status.Failed)
                    {
                        throw new IdentificationException(identificationResponse.Message);
                    }
                    numOfRetries--;
                }
                if (numOfRetries <= 0)
                {
                    throw new IdentificationException("Identification operation timeout.");
                }

                window.Log("Identification Done.");
                tblock.Text  = count + identificationResponse.ProcessingResult.IdentifiedProfileId.ToString();
                output.Text += "\n" + identificationResponse.ProcessingResult.IdentifiedProfileId.ToString() + ":" + ou;
                count++;
                //_identificationResultTxtBlk.Text = identificationResponse.ProcessingResult.IdentifiedProfileId.ToString();
                //_identificationConfidenceTxtBlk.Text = identificationResponse.ProcessingResult.Confidence.ToString();
                //_identificationResultStckPnl.Visibility = Visibility.Visible;
            }
            catch (IdentificationException ex)
            {
                window.Log("Speaker Identification Error: " + ex.Message);
            }
            catch (Exception ex)
            {
                window.Log("Error: " + ex.Message);
            }
        }
        //This functionality will Verify the speaker with the passed Audio stream and the passed GUID
        //INPUT: Audio stream to veridy, Speaker ID, SampleLength
        //speakerVerificatioResult
        public async Task <speakerVerificationResult> verifySpeaker(Stream ms, Guid speakerID, double sampleLength)
        {
            speakerVerificationResult result = new speakerVerificationResult();

            if (sampleLength > 2.5)
            {
                try
                {
                    //If the audio stream is emtpy, then trow an error
                    if (ms == null)
                    {
                        throw new Exception("No Audio stream found.");
                    }

                    //Create a array of prifiles to be used. We only authenticate against one,
                    //In the future we can add multiple speakers into this array and then the system could tell us "who" is speaking, but this isn't what
                    //the aim of this project is. We only want to get true/false if the current speaker is who we expect is is
                    Guid[] testProfileIds = new Guid[1];

                    //Assign the selected enrollment item to the array
                    testProfileIds[0] = speakerID;

                    OperationLocation processPollingLocation;
                    using (Stream audioStream = ms)
                    {
                        audioStream.Position   = 0;
                        processPollingLocation = await _serviceClient.IdentifyAsync(audioStream, testProfileIds, true);
                    }

                    IdentificationOperation identificationResponse = null;
                    int      numOfRetries       = 10;
                    TimeSpan timeBetweenRetries = TimeSpan.FromSeconds(5.0);
                    while (numOfRetries > 0)
                    {
                        await Task.Delay(timeBetweenRetries);

                        identificationResponse = await _serviceClient.CheckIdentificationStatusAsync(processPollingLocation);

                        if (identificationResponse.Status == Status.Succeeded)
                        {
                            break;
                        }
                        else if (identificationResponse.Status == Status.Failed)
                        {
                            throw new IdentificationException(identificationResponse.Message);
                        }
                        numOfRetries--;
                    }
                    if (numOfRetries <= 0)
                    {
                        throw new IdentificationException("Identification operation timeout.");
                    }

                    result.Status = identificationResponse.Status.ToString();
                    result.IdentifiedProfileId = identificationResponse.ProcessingResult.IdentifiedProfileId.ToString();
                    result.Confidence          = identificationResponse.ProcessingResult.Confidence.ToString();

                    result.Result  = true;
                    result.Message = ("VerifciationResult passed back");
                }
                catch (IdentificationException ex)
                {
                    result.Result = false;
                    if (ex.Message.ToUpper().Contains("SPEAKERINVALID"))
                    {
                        result.Message = "Your voice sample could not be verified against the enrolled sample";
                    }
                    else
                    {
                        result.Message = ("Speaker Identification Error: " + ex.Message);
                    }
                }
                catch (Exception ex)
                {
                    result.Result  = false;
                    result.Message = ("Error: " + ex.Message);
                }
            }
            else
            {
                result.Result  = false;
                result.Message = ("Enroll: Audio sample need to be > 2.5 seconds");
            }
            return(result);
        }
        //private void loadFileBtn_Click(object sender, RoutedEventArgs e)
        //{
        //    identifyBtn.IsEnabled = true;
        //    OpenFileDialog openFileDialog = new OpenFileDialog();
        //    openFileDialog.Filter = "WAV Files(*.wav)|*.wav";
        //    bool? result = openFileDialog.ShowDialog();

        //    if (!(bool)result)
        //    {
        //        Title = String.Format("No File Selected.");
        //        return;
        //    }
        //    Title = String.Format("File Selected: " + openFileDialog.FileName);
        //    _selectedFile = openFileDialog.FileName;
        //}


        private async void identifyBtn_Click(object sender, RoutedEventArgs e)
        {
            _identificationResultStckPnl.Visibility = Visibility.Hidden;
            //First Stop Recording...
            recordBtn.IsEnabled   = true;
            identifyBtn.IsEnabled = false;
            if (_waveIn != null)
            {
                _waveIn.StopRecording();
            }

            TimeSpan timeBetweenSaveAndIdentify = TimeSpan.FromSeconds(5.0);
            await Task.Delay(timeBetweenSaveAndIdentify);

            //await UpdateAllSpeakersAsync();

            //Identify Voice
            //List<Guid> selectedItems = new List<Guid>(3);
            //selectedItems.Add(Guid.Parse("f7d5a9d2-9663-4504-b53c-ee0c2c975104")); //f7d5a9d2-9663-4504-b53c-ee0c2c975104
            //selectedItems.Add(Guid.Parse("acec28d0-cfd5-4bc4-8840-03bb523a43f7"));
            //selectedItems.Add(Guid.Parse("7ce81071-ef9d-46cf-9d87-02d465b1a972"));
            //selectedItems.Add(Guid.Parse("f7d5a9d2-9663-4504-b53c-ee0c2c975104"));
            //selectedItems.Add(Guid.Parse("0501e357-e56d-46b0-87ad-957ef8744d9c"));
            //selectedItems.Add(Guid.Parse("aeb46767-24b7-4d9d-a9ad-dd7dc965b0bb"));
            //selectedItems.Add(Guid.Parse("26c11b8d-6de2-4c5e-971c-8c4dc774d5e1"));
            //selectedItems.Add(Guid.Parse("f7d5a9d2-9663-4504-b53c-ee0c2c975104"));
            //selectedItems.Add(Guid.Parse("8f85b2c6-688a-4d44-a8c4-370be910b8bf"));

            //Guid[] selectedIds = new Guid[1];
            //int i = 0;
            //foreach (KeyValuePair<Guid, string> kv in enrollVoiceList)
            //{
            //    selectedIds[i] = kv.Key;
            //    i++;
            //}
            //selectedIds[0] = selectedItems[0];

            //for (int j = 0; j < 1; j++)
            //{
            //    selectedIds[i] = selectedItems[i];

            //}

            List <Guid> list = new List <Guid>();

            Profile[] allProfiles = await _serviceClient.GetProfilesAsync();

            int itemsCount = 0;

            foreach (Profile profile in allProfiles)
            {
                list.Add(profile.ProfileId);
                itemsCount++;
            }
            Guid[] selectedIds = new Guid[1];
            for (int i = 0; i < 1; i++)
            {
                selectedIds[i] = list[i];
            }

            try
            {
                if (_selectedFile == "")
                {
                    throw new Exception("No File Selected.");
                }

                Title = String.Format("Identifying File...");
                OperationLocation processPollingLocation;
                using (Stream audioStream = File.OpenRead(_selectedFile))
                {
                    _selectedFile = "";
                    Console.WriteLine("Audio File is : {0}", audioStream);
                    processPollingLocation = await _serviceClient.IdentifyAsync(audioStream, selectedIds, true);
                }

                IdentificationOperation identificationResponse = null;
                int      numOfRetries       = 10;
                TimeSpan timeBetweenRetries = TimeSpan.FromSeconds(5.0);
                while (numOfRetries > 0)
                {
                    await Task.Delay(timeBetweenRetries);

                    identificationResponse = await _serviceClient.CheckIdentificationStatusAsync(processPollingLocation);

                    Console.WriteLine("Response is : {0}", identificationResponse);
                    if (identificationResponse.Status == Status.Succeeded)
                    {
                        break;
                    }
                    else if (identificationResponse.Status == Status.Failed)
                    {
                        throw new IdentificationException(identificationResponse.Message);
                    }
                    numOfRetries--;
                }
                if (numOfRetries <= 0)
                {
                    throw new IdentificationException("Identification operation timeout.");
                }

                Title = String.Format("Identification Done.");

                if (identificationResponse.ProcessingResult.IdentifiedProfileId.ToString() == "aeb46767-24b7-4d9d-a9ad-dd7dc965b0bb")
                {
                    _identificationResultTxtBlk.Text = "Toshif"; //aeb46767 - 24b7 - 4d9d - a9ad - dd7dc965b0bb //f7d5a9d2-9663-4504-b53c-ee0c2c975104
                }

                else if (identificationResponse.ProcessingResult.IdentifiedProfileId.ToString() == "7ce81071-ef9d-46cf-9d87-02d465b1a972")
                {
                    _identificationResultTxtBlk.Text = "Aakash";
                }
                else if (identificationResponse.ProcessingResult.IdentifiedProfileId.ToString() == "acec28d0-cfd5-4bc4-8840-03bb523a43f7")
                {
                    _identificationResultTxtBlk.Text = "Mohammad Toshif Khan"; //0501e357 - e56d - 46b0 - 87ad - 957ef8744d9c
                }
                else if (identificationResponse.ProcessingResult.IdentifiedProfileId.ToString() == "f7d5a9d2-9663-4504-b53c-ee0c2c975104")
                {
                    _identificationResultTxtBlk.Text = "Nandini"; //f7d5a9d2 - 9663 - 4504 - b53c - ee0c2c975104
                }
                else
                {
                    _identificationResultTxtBlk.Text = identificationResponse.ProcessingResult.IdentifiedProfileId.ToString();
                }
                _identificationConfidenceTxtBlk.Text    = identificationResponse.ProcessingResult.Confidence.ToString();
                _identificationResultStckPnl.Visibility = Visibility.Visible;
            }
            catch (IdentificationException ex)
            {
                GC.Collect();
                Title = String.Format("Speaker Identification Error: " + ex.Message);
                Console.WriteLine("Speaker Identification Error : " + ex.Message);
            }
            catch (Exception ex)
            {
                GC.Collect();
                Title = String.Format("Error: " + ex.Message);
            }

            GC.Collect();
        }
Example #11
0
        private async void _identifyBtn_Click(object sender, RoutedEventArgs e)
        {
            MainWindow window = (MainWindow)Application.Current.MainWindow;

            try
            {
                if (_selectedFile == "")
                {
                    throw new Exception("No File Selected.");
                }

                window.Log("Identifying File...");
                Profile[] selectedProfiles = SpeakersListPage.SpeakersList.GetSelectedProfiles();
                Guid[]    testProfileIds   = new Guid[selectedProfiles.Length];
                for (int i = 0; i < testProfileIds.Length; i++)
                {
                    testProfileIds[i] = selectedProfiles[i].ProfileId;
                }

                OperationLocation processPollingLocation;
                using (Stream audioStream = File.OpenRead(_selectedFile))
                {
                    _selectedFile          = "";
                    processPollingLocation = await _serviceClient.IdentifyAsync(audioStream, testProfileIds, ((sender as Button) == _identifyShortAudioBtn));
                }

                IdentificationOperation identificationResponse = null;
                int      numOfRetries       = 10;
                TimeSpan timeBetweenRetries = TimeSpan.FromSeconds(5.0);
                while (numOfRetries > 0)
                {
                    await Task.Delay(timeBetweenRetries);

                    identificationResponse = await _serviceClient.CheckIdentificationStatusAsync(processPollingLocation);

                    if (identificationResponse.Status == Status.Succeeded)
                    {
                        break;
                    }
                    else if (identificationResponse.Status == Status.Failed)
                    {
                        throw new IdentificationException(identificationResponse.Message);
                    }
                    numOfRetries--;
                }
                if (numOfRetries <= 0)
                {
                    throw new IdentificationException("Identification operation timeout.");
                }

                window.Log("Identification Done.");

                _identificationResultTxtBlk.Text        = identificationResponse.ProcessingResult.IdentifiedProfileId.ToString();
                _identificationConfidenceTxtBlk.Text    = identificationResponse.ProcessingResult.Confidence.ToString();
                _identificationResultStckPnl.Visibility = Visibility.Visible;
            }
            catch (IdentificationException ex)
            {
                window.Log("Speaker Identification Error: " + ex.Message);
            }
            catch (Exception ex)
            {
                window.Log("Error: " + ex.Message);
            }
        }
Example #12
0
        /// <summary>
        /// Performs speaker recognition on TranscriberOutputs to set
        /// the Speaker property.
        /// set set their User property representing the speaker.
        ///
        /// Note that apiDelayInterval allows the time between API requests in MS to be set.
        /// It is set to 3000 by default
        /// </summary>
        /// <param name="transcription"></param>
        /// <param name="apiDelayInterval"></param>
        public async Task DoSpeakerRecognition(SortedList <long, TranscriptionOutput> TranscriptionOutputs, int apiDelayInterval = 3000)
        {
            Console.WriteLine(">\tBegin Speaker Recognition...");
            var recognitionComplete = new TaskCompletionSource <int>();

            /*Create REST client for enrolling users */
            SpeakerIdentificationServiceClient idClient = new SpeakerIdentificationServiceClient(Controller.SpeakerIDSubKey);

            /*Dictionary for efficient voiceprint lookup by enrollment GUID*/
            Dictionary <Guid, User> voiceprintDictionary = new Dictionary <Guid, User>();

            Guid[] userIDs = new Guid[Controller.Voiceprints.Count];

            foreach (var voiceprint in Controller.Voiceprints)
            {
                Console.WriteLine($">\tAdding profile to voice print dictionary for {voiceprint.Email}");

                try
                {
                    voiceprintDictionary.Add(voiceprint.ProfileGUID, voiceprint);
                }catch (Exception ex)
                {
                    Console.Error.WriteLine("Error adding profile to voiceprint dictionary. Continuing to any" +
                                            "remaining voiceprints. Reason: " + ex.Message);
                }
            }

            if (voiceprintDictionary.Count == 0)
            {
                Console.WriteLine(">\tNo Voice Profiles Detected");
                return;
            }
            voiceprintDictionary.Keys.CopyTo(userIDs, 0);                  //Hold GUIDs in userIDs array


            /*Iterate over each phrase and attempt to identify the user.
             * Passes the audio data as a stream and the user GUID associated with the
             * Azure SpeakerRecogniztion API profile to the API via the IdentifyAsync() method.
             * Sets the User property in each TranscriptionOutput object in TrancriptionOutputs*/
            try
            {
                foreach (var curPhrase in TranscriptionOutputs)
                {
                    try
                    {
                        /*Handle case where phrase is too short to perform speaker recognition */
                        if (curPhrase.Value.EndOffset - curPhrase.Value.StartOffset < 1000)
                        {
                            continue;
                        }


                        /*Write audio data in segment to a buffer containing wav file header */
                        byte[] wavBuf = AudioFileSplitter.WriteWavToBuf(curPhrase.Value.Segment.AudioData);


                        await Task.Delay(apiDelayInterval);

                        /*Create the task which submits the request to begin speaker recognition to the Speaker Recognition API.
                         * Request contains the stream of this phrase and the GUIDs of users that may be present.*/
                        Task <OperationLocation> idTask = idClient.IdentifyAsync(new MemoryStream(wavBuf), userIDs, true);

                        await idTask;

                        var resultLoc = idTask.Result;                                      //URL wrapper to check recognition status

                        /*Continue to check task status until it is completed */
                        Task <IdentificationOperation> idOutcomeCheck;
                        Boolean done = false;
                        Status  outcome;
                        do
                        {
                            await Task.Delay(apiDelayInterval);

                            idOutcomeCheck = idClient.CheckIdentificationStatusAsync(resultLoc);
                            await idOutcomeCheck;

                            outcome = idOutcomeCheck.Result.Status;
                            /*If recognition is complete or failed, stop checking for status*/
                            done = (outcome == Status.Succeeded || outcome == Status.Failed);
                        }while (!done);

                        User speaker = null;

                        /*Set user as unrecognizable if API request resonse indicates failure */
                        if (outcome == Status.Failed)
                        {
                            Console.Error.WriteLine("Recognition operation failed for this phrase.");
                        }

                        else
                        {
                            Guid profileID = idOutcomeCheck.Result.ProcessingResult.IdentifiedProfileId;           //Get profile ID for this identification.

                            /*If the recognition request succeeded but no user could be recognized */
                            if (outcome == Status.Succeeded &&
                                profileID.ToString() == "00000000-0000-0000-0000-000000000000")
                            {
                                speaker = null;
                            }

                            /*If task suceeded and the profile ID matches an ID in
                             * the set of known user profiles then set associated user */
                            else if (idOutcomeCheck.Result.Status == Status.Succeeded &&
                                     voiceprintDictionary.ContainsKey(profileID))
                            {
                                Console.WriteLine($">\tRecognized {voiceprintDictionary[profileID].Email}");
                                speaker = voiceprintDictionary[profileID];
                            }
                        }

                        curPhrase.Value.Speaker = speaker;                     //Set speaker property in TranscriptionOutput object based on result.

                        //End-foreach
                    }
                    catch (Exception ex)
                    {
                        Console.Error.WriteLine(ex.Message);
                    }
                }
            }
            catch (AggregateException ex)
            {
                Console.Error.WriteLine("Id failed: " + ex.Message);
            }
            recognitionComplete.SetResult(0);
        }