public static T GetValue <T>(string key) { T val; INISetting.TryGetGroupValue <T>(INISetting.defaultGroup, key, out val); return(val); }
public static void Main(string[] args) { int valueWithAdd = INISetting.GetValueWithAdd <int>("Duration, s", 60); if (!Directory.Exists(Path.GetTempPath() + "\\speechtext")) { Directory.CreateDirectory(Path.GetTempPath() + "\\speechtext"); } Task <object> task = Program.StreamingMicRecognizeAsync(valueWithAdd); try { task.Wait(); } catch (Exception ex) { File.WriteAllText("error.txt", ex.ToString()); } }
public static void SetValue(string group, string key, object value) { if (INISetting.Exists(group, key)) { INISetting.groups[group][key] = new INISetting.ValueResult(value.ToString()); } else { if (!INISetting.groups.ContainsKey(group)) { INISetting.groups.Add(group, new INISetting.INI_Values()); } if (INISetting.groups[group].ContainsKey(key)) { return; } INISetting.groups[group].Add(key, new INISetting.ValueResult(value.ToString())); } }
private static async Task <object> StreamingMicRecognizeAsync(int seconds) { if (WaveIn.DeviceCount < 1) { File.WriteAllText("error.txt", "No microphone!"); return((object)-1); } string lower = INISetting.GetValueWithAdd <string>("CredentialsFilePath", "credentials.json").ToLower(); Console.WriteLine(lower); GoogleCredential googleCredential; using (Stream stream = (Stream) new FileStream(lower, FileMode.Open)) googleCredential = GoogleCredential.FromStream(stream); SpeechClient.StreamingRecognizeStream streamingCall = SpeechClient.Create(new Channel(SpeechClient.DefaultEndpoint.Host, googleCredential.ToChannelCredentials())).StreamingRecognize(); await streamingCall.WriteAsync(new StreamingRecognizeRequest() { StreamingConfig = new StreamingRecognitionConfig() { Config = new RecognitionConfig() { Encoding = RecognitionConfig.Types.AudioEncoding.Linear16, SampleRateHertz = 16000, LanguageCode = "ru" }, InterimResults = true } }); Task printResponses = Task.Run((Func <Task>)(async() => { string s = ""; while (true) { if (await streamingCall.ResponseStream.MoveNext(new CancellationToken())) { using (IEnumerator <StreamingRecognitionResult> enumerator1 = streamingCall.ResponseStream.Current.Results.GetEnumerator()) { if (enumerator1.MoveNext()) { using (IEnumerator <SpeechRecognitionAlternative> enumerator2 = enumerator1.Current.Alternatives.GetEnumerator()) { if (enumerator2.MoveNext()) { SpeechRecognitionAlternative current = enumerator2.Current; Console.WriteLine(current.Transcript); s += current.Transcript; } } } } File.WriteAllText(Path.GetTempPath() + "\\speechtext\\speechtext.txt", s); s = ""; } else { break; } } })); object writeLock = new object(); bool writeMore = true; WaveInEvent waveIn = new WaveInEvent(); waveIn.DeviceNumber = 0; waveIn.WaveFormat = new WaveFormat(16000, 1); waveIn.DataAvailable += (EventHandler <WaveInEventArgs>)((sender, args) => { lock (writeLock) { if (!writeMore) { return; } streamingCall.WriteAsync(new StreamingRecognizeRequest() { AudioContent = ByteString.CopyFrom(args.Buffer, 0, args.BytesRecorded) }).Wait(); } }); waveIn.StartRecording(); Console.WriteLine("Speak now " + (object)seconds); await Task.Delay(TimeSpan.FromSeconds((double)seconds)); waveIn.StopRecording(); lock (writeLock) writeMore = false; await streamingCall.WriteCompleteAsync(); await printResponses; return((object)0); }
public static void SetValue(string key, string value) => INISetting.SetValue(INISetting.defaultGroup, key, (object)value);
public static T GetValueWithAdd <T>(string key, T defaultValue) => INISetting.GetGroupValueWithAdd <T>(INISetting.defaultGroup, key, defaultValue);
public static bool TryGetValue <T>(string key, out T val) => INISetting.TryGetGroupValue <T>(INISetting.defaultGroup, key, out val);