private async Task Initialze() { if (LightningProvider.IsLightningEnabled) { LowLevelDevicesController.DefaultProvider = LightningProvider.GetAggregateProvider(); } else { throw new Exception("Lightning drivers not enabled. Please enable Lightning drivers."); } _camera = new Camera(); await _camera.Initialize(); SpeedSensor.Initialize(); SpeedSensor.Start(); SpeechSynthesis.Initialze(); await AudioPlayerController.Initialize(); _accelerometerSensor = new AccelerometerGyroscopeSensor(); await _accelerometerSensor.Initialize(); _accelerometerSensor.Start(); _automaticSpeakController = new AutomaticSpeakController(_accelerometerSensor); _motorController = new MotorController(); await _motorController.Initialize(_automaticSpeakController); _servoController = new ServoController(); await _servoController.Initialize(); _distanceMeasurementSensor = new DistanceMeasurementSensor(); await _distanceMeasurementSensor.Initialize(I2C_ADDRESS_SERVO); _automaticDrive = new AutomaticDrive(_motorController, _servoController, _distanceMeasurementSensor); _speechRecognation = new SpeechRecognition(); await _speechRecognation.Initialze(_motorController, _servoController, _automaticDrive); _speechRecognation.Start(); _gamepadController = new GamepadController(_motorController, _servoController, _automaticDrive, _accelerometerSensor); _camera.Start(); _httpServerController = new HttpServerController(_motorController, _servoController, _automaticDrive, _camera); SystemController.Initialize(_accelerometerSensor, _automaticSpeakController, _motorController, _servoController, _automaticDrive, _camera, _httpServerController, _speechRecognation, _gamepadController); await SystemController.SetAudioRenderVolume(AUDIO_RENDER_VOLUME, true); await SystemController.SetAudioCaptureVolume(AUDIO_CAPTURE_VOLUME, true); await AudioPlayerController.PlayAndWaitAsync(AudioName.Welcome); _automaticSpeakController.Start(); }
static void Main(string[] args) { String msg = "Cystemz is really bad"; Console.WriteLine(msg); SpeechLib.Synthesis.SpeechSynthesis speechSynthesis = new SpeechSynthesis(); speechSynthesis.Speak(msg); Console.ReadLine(); }
public bool Display(string message) { var sendText = new SpeechSynthesis { Data = Encoding.Default.GetBytes(message).ToList() }; //string info = ttsFormat.Replace("{GreenhouseName}", ttsDevice.TtsNode.GreenHouse.GreenHouseName); //info = info.Replace("{DeviceName}", device.Name); //info = info.Replace("{LatestValue}", device.LatestValue.ToString()); //info = info.Replace("{LatestProcessedValue}", device.LatestProcessedValue.ToString(CultureInfo.InvariantCulture)); //info = info.Replace("{LatestTime}", device.LatestTime.ToString(CultureInfo.InvariantCulture)); //info = info.Replace("{Unit}", device.Sensor.Unit); sendText.CalculateExtendData(); var sendData = sendText.GetStream().ReadBytes(); try { _transport.Send(sendData); //立即接收语音合成成功的回复 var buffer = new byte[1]; _transport.Receive(buffer, 0, 1); if (buffer[0] == 0x41) { //语音合成成功 _transport.Receive(buffer, 0, 1); if (buffer[0] == 0x4F) { //语音合成成功 return(true); } //语音播报失败 #if DEBUG XTrace.WriteLine("语音播报失败:{0} {1}", Name, message); #endif } else { //语音合成失败 #if DEBUG XTrace.WriteLine("语音合成失败:{0} {1}", Name, message); #endif } } catch (Exception ex) { XTrace.WriteException(ex); } return(false); }
/// <summary> /// Initializes a new TwitchChatBot object, which handles all communications between Twitch and the client. /// </summary> /// <param name="authenticator">A pre-initialized WebAuthenticator.</param> /// <param name="appState">The global AppState.</param> public TwitchChatBot(WebAuthenticator authenticator, Guid appState) { this.authenticator = authenticator; this.appState = appState; twitchApi = new TwitchAPI(ClientId); // Retrieve access tokens and usernames for logging in userAccessToken = authenticator.GetAccessTokenByStateAsync(appState).Result; botAccessToken = authenticator.GetBotAccessTokenByValidStateAsync(appState).Result; Username = authenticator.GetUsernameFromOAuthAsync(userAccessToken).Result; Botname = authenticator.GetUsernameFromOAuthAsync(botAccessToken).Result; // If the either of the usernames are blank, then we have to refresh the tokens. if (string.IsNullOrEmpty(Username)) { ConsoleHelper.WriteLine("Refreshing user access token..."); userAccessToken = RefreshAccessToken(appState, ClientId, userAccessToken).Result; Username = authenticator.GetUsernameFromOAuthAsync(userAccessToken).Result; } if (string.IsNullOrEmpty(Botname)) { ConsoleHelper.WriteLine("Refreshing bot access token..."); botAccessToken = RefreshAccessToken(BotState, ClientId, botAccessToken).Result; Botname = authenticator.GetUsernameFromOAuthAsync(botAccessToken).Result; } twitchApi.Settings.AccessToken = botAccessToken; userClient = new TwitchClient(new ConnectionCredentials(Username, userAccessToken), Username); botClient = new TwitchClient(new ConnectionCredentials(Botname, botAccessToken), Username); commandFactory = new CommandFactory(); speechSynthesizer = new SpeechSynthesis(); // ATCB-made events ConsoleHelper.OnConsoleCommand += (sender, e) => { PerformConsoleCommand((e as ConsoleCommandEventArgs).Message); }; // User client events userClient.OnConnected += OnUserConnected; userClient.OnBeingHosted += OnUserBeingHosted; // Bot client events botClient.OnConnected += OnBotConnected; botClient.OnConnectionError += OnBotConnectionError; botClient.OnMessageReceived += OnMessageReceived; botClient.OnMessageSent += OnBotMessageSent; botClient.OnChatCommandReceived += OnChatCommandReceived; botClient.OnNewSubscriber += OnNewSubscriber; botClient.OnReSubscriber += OnReSubscriber; }
private void OnSpeechPlClicked() { if (TranslationsState?.Value?.BaseTranslation.BaseTerm is null) { return; } var utterance = new SpeechSynthesisUtterance { Text = TranslationsState.Value.BaseTerm.Name, Lang = Const.PlLangSpeechCode, // BCP 47 language tag Pitch = 1.0, // 0.0 ~ 2.0 (Default 1.0) Rate = 1.0, // 0.1 ~ 10.0 (Default 1.0) Volume = 1.0 // 0.0 ~ 1.0 (Default 1.0) }; SpeechSynthesis?.Speak(utterance); }
private void OnSpeechEnClicked() { if (TranslationsState?.Value?.BaseTranslation.Translation is null) { return; } var utterance = new SpeechSynthesisUtterance { Text = TranslationsState.Value.Translation.Name, Lang = Func.GetLangSpeech(TranslationsState.Value.Translation.LanguageId), // BCP 47 language tag Pitch = 1.0, // 0.0 ~ 2.0 (Default 1.0) Rate = 1.0, // 0.1 ~ 10.0 (Default 1.0) Volume = 1.0 // 0.0 ~ 1.0 (Default 1.0) }; SpeechSynthesis?.Speak(utterance); }
private static async Task AddAudioFile(AudioName audioName, string text) { IStorageFile storageFile = null; var soundFile = await ApplicationData.Current.LocalFolder.TryGetItemAsync(EnumHelper.GetName(audioName)); if (soundFile == null) { var headsetStream = await SpeechSynthesis.SpeakAsStream(text); var localFolder = ApplicationData.Current.LocalFolder; storageFile = await localFolder.CreateFileAsync(EnumHelper.GetName(audioName), CreationCollisionOption.FailIfExists); using (var outputStream = await storageFile.OpenStreamForWriteAsync()) { await headsetStream.AsStream().CopyToAsync(outputStream); } } storageFile = await ApplicationData.Current.LocalFolder.GetFileAsync(EnumHelper.GetName(audioName)); await _headsetSpeaker.AddFileAsync(storageFile); await _carSpeaker.AddFileAsync(storageFile); }