// Function for converting text to a voiced wav file via text-to-speech
    public bool TextToWav(string FilePath, string text)
    {
        //VA.WriteToLog("creating wav file"); // Output info to event log
        SpFileStream stream = new SpFileStream();                                 // Create new SpFileStream instance

        try                                                                       // Attempt the following code
        {
            if (System.IO.File.Exists(FilePath) == true)                          // Check if voice recognition wav file already exists
            {
                System.IO.File.Delete(FilePath);                                  // Delete existing voice recognition wav file
            }
            stream.Format.Type = SpeechAudioFormatType.SAFT48kHz16BitStereo;      // Set the file stream audio format
            stream.Open(FilePath, SpeechStreamFileMode.SSFMCreateForWrite, true); // Open the specified file for writing with events enabled
            SpVoice voice = new SpVoice();                                        // Create new SPVoice instance
            voice.Volume = 100;                                                   // Set the volume level of the text-to-speech voice
            voice.Rate   = -2;                                                    // Set the rate at which text is spoken by the text-to-speech engine
            string NameAttribute = "Name = " + VA.GetText("~~TextToSpeechVoice");
            voice.Voice = voice.GetVoices(NameAttribute).Item(0);
            //voice.Speak(text);
            voice.AudioOutputStream = stream;                                          // Send the audio output to the file stream
            voice.Speak(text, SpeechVoiceSpeakFlags.SVSFDefault);                      // Internally "speak" the inputted text (which records it in the wav file)
            voice = null;                                                              // Set to null in preparation for garbage collection
        }
        catch                                                                          // Handle exceptions in above code
        {
            VA.SetText("~~RecognitionError", "Error during wav file creation (SAPI)"); // Send error detail back to VoiceAttack as text variable
            return(false);                                                             // Send "false" back to calling code line
        }
        finally                                                                        // Runs whether an exception is encountered or not
        {
            stream.Close();                                                            // Close the file stream
            stream = null;                                                             // Set to null in preparation for garbage collection
        }
        return(true);                                                                  // Send "true" back to calling code line
    }
示例#2
0
 public void main()
 {
     Console.WriteLine("Making API Call...");
     using (var client = new HttpClient(new HttpClientHandler {
         AutomaticDecompression = DecompressionMethods.GZip | DecompressionMethods.Deflate
     }))
     {
         client.BaseAddress = new Uri("http://127.0.0.1:5000/play/" + VA.GetText("SongUri"));
         HttpResponseMessage response = client.GetAsync("").Result;
         response.EnsureSuccessStatusCode();
         string result = response.Content.ReadAsStringAsync().Result;
         Console.WriteLine("Result: " + result);
     }
     Console.ReadLine();
 }
    bool UseDictation;     // Declare boolean variable for storing pronunciation dictation grammar setting

    public void main()
    {
        // Reset relevant VoiceAttack text variables
        VA.SetText("~~RecognitionError", null);
        VA.SetText("~~RecognizedText", null);
        VA.SetText("~~SAPIPhonemes", null);
        VA.SetText("~~SAPIPhonemesRaw", null);
        //VA.SetText("~~FalseRecognitionFlag", null);

        // Retrieve the desired word data contained within VoiceAttack text variable
        string ProcessText = null;                     // Initialize string variable for storing the text of interest

        if (VA.GetText("~~ProcessText") != null)       // Check if user provided valid text in input variable
        {
            ProcessText = VA.GetText("~~ProcessText"); // Store text of interest held by VA text variable
        }
        else
        {
            VA.SetText("~~RecognitionError", "Error in input text string (SAPI)"); // Send error detail back to VoiceAttack as text variable
            return;                                                                // End code processing
        }

        // Retrieve path to speech grammar XML file from VoiceAttack
        GrammarPath = VA.GetText("~~GrammarFilePath");

        // Retrieve path to voice recognition input wav file from VoiceAttack
        AudioPath = VA.GetText("~~AudioFilePath");

        // Check if TTS engine is voicing the input for the speech recognition engine
        if (VA.GetBoolean("~~UserVoiceInput") == false)
        {
            //VA.WriteToLog("creating wav file");
            if (TextToWav(AudioPath, ProcessText) == false) // Create wav file with specified path that voices specified text (with text-to-speech) and check if the creation was NOT successful
            {
                return;                                     // Stop executing the code
            }
        }

        // Create speech recognizer and associated context
        SpInprocRecognizer  MyRecognizer = new SpInprocRecognizer();                              // Create new instance of SpInprocRecognizer
        SpInProcRecoContext RecoContext  = (SpInProcRecoContext)MyRecognizer.CreateRecoContext(); // Initialize the SpInProcRecoContext (in-process recognition context)

        try                                                                                       // Attempt the following code
        {
            // Open the created wav in a new FileStream
            FileStream = new SpFileStream();                                        // Create new instance of SpFileStream
            FileStream.Open(AudioPath, SpeechStreamFileMode.SSFMOpenForRead, true); // Open the specified file in the FileStream for reading with events enabled

            // Set the voice recognition input as the FileStream
            MyRecognizer.AudioInputStream = FileStream;             // This will internally "speak" the wav file for input into the voice recognition engine

            // Set up recognition event handling
            RecoContext.Recognition      += new _ISpeechRecoContextEvents_RecognitionEventHandler(RecoContext_Recognition);           // Register for successful voice recognition events
            RecoContext.FalseRecognition += new _ISpeechRecoContextEvents_FalseRecognitionEventHandler(RecoContext_FalseRecognition); // Register for failed (low confidence) voice recognition events
            if (VA.GetBoolean("~~ShowRecognitionHypothesis") == true)                                                                 // Check if user wants to show voice recognition hypothesis results
            {
                RecoContext.Hypothesis += new _ISpeechRecoContextEvents_HypothesisEventHandler(RecoContext_Hypothesis);               // Register for voice recognition hypothesis events
            }
            RecoContext.EndStream += new _ISpeechRecoContextEvents_EndStreamEventHandler(RecoContext_EndStream);                      // Register for end of file stream events

            // Set up the grammar
            grammar      = RecoContext.CreateGrammar();                     // Initialize the grammar object
            UseDictation = (bool?)VA.GetBoolean("~~UseDictation") ?? false; // Set UserDictation based on value from VoiceAttack boolean variable
            if (UseDictation == true)                                       // Check if pronunciation dictation grammar should be used with speech recognition
            {
                //grammar.DictationLoad("", SpeechLoadOption.SLOStatic); // Load blank dictation topic into the grammar
                grammar.DictationLoad("Pronunciation", SpeechLoadOption.SLOStatic);    // Load pronunciation dictation topic into the grammar so that the raw (unfiltered) phonemes may be retrieved
                grammar.DictationSetState(SpeechRuleState.SGDSActive);                 // Activate dictation grammar
            }
            else
            {
                grammar.CmdLoadFromFile(GrammarPath, SpeechLoadOption.SLODynamic);           // Load custom XML grammar file
                grammar.CmdSetRuleIdState(0, SpeechRuleState.SGDSActive);                    // Activate the loaded grammar
            }
            Application.Run();                                                               // Starts a standard application message loop on the current thread
        }
        catch                                                                                // Handle exceptions in above code
        {
            VA.SetText("~~RecognitionError", "Error during voice recognition setup (SAPI)"); // Send error detail back to VoiceAttack as text variable
            return;                                                                          // Stop executing the code
        }
        finally                                                                              // Runs whether an exception is encountered or not
        {
            MyRecognizer = null;                                                             // Set to null in preparation for garbage collection
            FileStream.Close();                                                              // Close the input FileStream
            FileStream = null;                                                               // Set to null in preparation for garbage collection

            // Close up recognition event handling
            RecoContext.Recognition      -= new _ISpeechRecoContextEvents_RecognitionEventHandler(RecoContext_Recognition);           // Unregister for successful voice recognition events
            RecoContext.FalseRecognition -= new _ISpeechRecoContextEvents_FalseRecognitionEventHandler(RecoContext_FalseRecognition); // Unregister for failed (low confidence) voice recognition events
            if (VA.GetBoolean("~~ShowRecognitionHypothesis") == true)                                                                 // Check if user wanted to show voice recognition hypothesis results
            {
                RecoContext.Hypothesis -= new _ISpeechRecoContextEvents_HypothesisEventHandler(RecoContext_Hypothesis);               // Unregister for voice recognition hypothesis events
            }
            RecoContext.EndStream -= new _ISpeechRecoContextEvents_EndStreamEventHandler(RecoContext_EndStream);                      // Unregister for end of file stream events
            RecoContext            = null;                                                                                            // Set to null in preparation for garbage collection
        }
        //VA.WriteToLog("voice recognition complete"); // Output info to event log
    }