public override bool Equals(object obj) { SpeechAudioFormatInfo speechAudioFormatInfo = obj as SpeechAudioFormatInfo; if (speechAudioFormatInfo == null) { return(false); } if (!_averageBytesPerSecond.Equals(speechAudioFormatInfo._averageBytesPerSecond) || !_bitsPerSample.Equals(speechAudioFormatInfo._bitsPerSample) || !_blockAlign.Equals(speechAudioFormatInfo._blockAlign) || !_encodingFormat.Equals(speechAudioFormatInfo._encodingFormat) || !_channelCount.Equals(speechAudioFormatInfo._channelCount) || !_samplesPerSecond.Equals(speechAudioFormatInfo._samplesPerSecond)) { return(false); } if (_formatSpecificData.Length != speechAudioFormatInfo._formatSpecificData.Length) { return(false); } for (int i = 0; i < _formatSpecificData.Length; i++) { if (_formatSpecificData[i] != speechAudioFormatInfo._formatSpecificData[i]) { return(false); } } return(true); }
static void Main(string[] args) { CultureInfo culture = CultureInfo.CreateSpecificCulture("en-US"); Console.OutputEncoding = System.Text.Encoding.UTF8; string device = ""; string recognizer = ""; string language = "fr-FR"; string hotword = "SARAH"; double confidence = 0.5; int deviceId = -1; var grammar = ""; bool help = false; var p = new OptionSet() { { "device=", "the device id", v => device = v }, { "recognizer=", "the recognizer id", v => recognizer = v }, { "language=", "the recognizer language", v => language = v }, { "grammar=", "the grammar directory", v => grammar = v }, { "hotword=", "the hotword (default is SARAH)", v => hotword = v }, { "confidence=", "the reconizer confidence", v => confidence = Double.Parse(v, culture) }, { "h|help", "show this message and exit", v => help = v != null }, }; List <string> extra; try { extra = p.Parse(args); } catch (OptionException e) { Console.Write("Listen: "); Console.WriteLine(e.Message); Console.WriteLine("Try `Listen --help' for more information."); return; } if (help) { ShowHelp(p); return; } // Create Speech Engine & Grammar Manager SpeechEngine engine = new SpeechEngine(device, recognizer, language, confidence); GrammarManager.GetInstance().SetEngine(engine, language, hotword); if (!String.IsNullOrEmpty(grammar)) { grammar = Path.GetFullPath(grammar); GrammarManager.GetInstance().Load(grammar, 2); GrammarManager.GetInstance().Watch(grammar); } else { GrammarManager.GetInstance().LoadFile("default_" + language + ".xml"); } engine.Load(GrammarManager.GetInstance().Cache, false); engine.Init(); // Create Stream var buffer = new StreamBuffer(); var waveIn = new WaveInEvent(); waveIn.DeviceNumber = deviceId; waveIn.WaveFormat = new WaveFormat(16000, 2); waveIn.DataAvailable += (object sender, WaveInEventArgs e) => { lock (buffer) { var pos = buffer.Position; buffer.Write(e.Buffer, 0, e.BytesRecorded); buffer.Position = pos; } }; waveIn.StartRecording(); // Pipe Stream and start #if KINECT var info = new Microsoft.Speech.AudioFormat.SpeechAudioFormatInfo(Microsoft.Speech.AudioFormat.EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null); #else var info = new System.Speech.AudioFormat.SpeechAudioFormatInfo(16000, AudioBitsPerSample.Sixteen, AudioChannel.Stereo); #endif engine.SetInputToAudioStream(buffer, info); engine.Start(); // Prevent console from closing Console.WriteLine("Waiting for key pressed..."); Console.ReadLine(); }
private static bool SetAudioFormat(string format) { int bitPerSample; System.Speech.AudioFormat.AudioBitsPerSample sysAudioBitsPerSample; Microsoft.Speech.AudioFormat.AudioBitsPerSample msftAudioBitsPerSample; System.Speech.AudioFormat.AudioChannel sysAudioChannel; Microsoft.Speech.AudioFormat.AudioChannel msftAudioChannel; decimal rawInput; string[] parts = Regex.Split(format, "_"); if (parts.Length != 3) { Console.WriteLine("Invalid Audio Format"); return(false); } if (Decimal.TryParse(parts[0], out rawInput) && (rawInput % 1) == 0 && rawInput > 0) { bitPerSample = (int)rawInput; } else { Console.WriteLine("Invalid Audio Format: SamplePerSecond must be a positive integer"); return(false); } if (Decimal.TryParse(parts[1], out rawInput) && (rawInput % 1) == 0 && rawInput > 0) { if (rawInput == 8.0M) { sysAudioBitsPerSample = System.Speech.AudioFormat.AudioBitsPerSample.Eight; msftAudioBitsPerSample = Microsoft.Speech.AudioFormat.AudioBitsPerSample.Eight; } else if (rawInput == 16.0M) { sysAudioBitsPerSample = System.Speech.AudioFormat.AudioBitsPerSample.Sixteen; msftAudioBitsPerSample = Microsoft.Speech.AudioFormat.AudioBitsPerSample.Sixteen; } else { Console.WriteLine("Invalid Audio Format: BitsPerSample must be either 8 or 16"); return(false); } } else { Console.WriteLine("Invalid Audio Format: BitsPerSample must be either 8 or 16"); return(false); } if (parts[2].Equals("Mono")) { sysAudioChannel = System.Speech.AudioFormat.AudioChannel.Mono; msftAudioChannel = Microsoft.Speech.AudioFormat.AudioChannel.Mono; } else if (parts[2].Equals("Stereo")) { sysAudioChannel = System.Speech.AudioFormat.AudioChannel.Stereo; msftAudioChannel = Microsoft.Speech.AudioFormat.AudioChannel.Stereo; } else { Console.WriteLine("Invalid Audio Format: AudioChannel must be either Mono or Stereo"); return(false); } sysAF = new System.Speech.AudioFormat.SpeechAudioFormatInfo(bitPerSample, sysAudioBitsPerSample, sysAudioChannel); msftAF = new Microsoft.Speech.AudioFormat.SpeechAudioFormatInfo(bitPerSample, msftAudioBitsPerSample, msftAudioChannel); return(true); }
private static bool SetAudioFormat(string format) { int bitPerSample; System.Speech.AudioFormat.AudioBitsPerSample sysAudioBitsPerSample; Microsoft.Speech.AudioFormat.AudioBitsPerSample msftAudioBitsPerSample; System.Speech.AudioFormat.AudioChannel sysAudioChannel; Microsoft.Speech.AudioFormat.AudioChannel msftAudioChannel; decimal rawInput; string[] parts = Regex.Split(format, "_"); if (parts.Length != 3) { Console.WriteLine("Invalid Audio Format"); return false; } if (Decimal.TryParse(parts[0], out rawInput) && (rawInput % 1) == 0 && rawInput > 0) { bitPerSample = (int) rawInput; } else { Console.WriteLine("Invalid Audio Format: SamplePerSecond must be a positive integer"); return false; } if (Decimal.TryParse(parts[1], out rawInput) && (rawInput % 1) == 0 && rawInput > 0) { if (rawInput == 8.0M) { sysAudioBitsPerSample = System.Speech.AudioFormat.AudioBitsPerSample.Eight; msftAudioBitsPerSample = Microsoft.Speech.AudioFormat.AudioBitsPerSample.Eight; } else if (rawInput == 16.0M) { sysAudioBitsPerSample = System.Speech.AudioFormat.AudioBitsPerSample.Sixteen; msftAudioBitsPerSample = Microsoft.Speech.AudioFormat.AudioBitsPerSample.Sixteen; } else { Console.WriteLine("Invalid Audio Format: BitsPerSample must be either 8 or 16"); return false; } } else { Console.WriteLine("Invalid Audio Format: BitsPerSample must be either 8 or 16"); return false; } if (parts[2].Equals("Mono")) { sysAudioChannel = System.Speech.AudioFormat.AudioChannel.Mono; msftAudioChannel = Microsoft.Speech.AudioFormat.AudioChannel.Mono; } else if (parts[2].Equals("Stereo")) { sysAudioChannel = System.Speech.AudioFormat.AudioChannel.Stereo; msftAudioChannel = Microsoft.Speech.AudioFormat.AudioChannel.Stereo; } else { Console.WriteLine("Invalid Audio Format: AudioChannel must be either Mono or Stereo"); return false; } sysAF = new System.Speech.AudioFormat.SpeechAudioFormatInfo(bitPerSample, sysAudioBitsPerSample, sysAudioChannel); msftAF = new Microsoft.Speech.AudioFormat.SpeechAudioFormatInfo(bitPerSample, msftAudioBitsPerSample, msftAudioChannel); return true; }