private async Task <byte[]> DownLoadMovieFromYoutubeAsync(YouTubeVideo y) { try { if (!y.IsEncrypted) { Debug.Log("動画の再生準備中です。少しお待ちください。"); byte[] bytes = await y.GetBytesAsync(); Debug.Log("完了しました!"); return(bytes); } else { Debug.Log("再生できない動画です。"); return(null); } } catch (Exception e) { Debug.Log("動画再生準備時にエラーが発生しました。:" + e); return(null); } }
public async Task TranslateAsync(YouTubeVideo youTubeVideo, string fromLanguage, IEnumerable <string> toLanguages) { // Declare the necessary directories and files variables var outputPath = Path.Combine("Output", Guid.NewGuid().ToString()); var downloadFilePath = Path.Combine(outputPath, "input.mp4"); // StringBuilders for data to be passed to event subscriber var tsb = new StringBuilder(); var osb = new StringBuilder(); var info = new StringBuilder(); var config = SpeechTranslationConfig.FromSubscription( configuration["AzureSpeechTranslation:SubscriptionKey"], configuration["AzureSpeechTranslation:Region"]); config.SpeechRecognitionLanguage = fromLanguage; foreach (var language in toLanguages) { config.AddTargetLanguage(language); } var vidBytes = await youTubeVideo.GetBytesAsync(); // Before saving the video, create the directory CreateOutputDirectory(outputPath); // Save the video await File.WriteAllBytesAsync(downloadFilePath, vidBytes); // Extract the audio from the video to work on it var wavAudioFile = await ExtractingWavAudioAsync(downloadFilePath); var stopTranslation = new TaskCompletionSource <int>(); var lineCount = 1; using (var audioInput = AudioConfig.FromWavFileInput(wavAudioFile)) { using (var recognizer = new TranslationRecognizer(config, audioInput)) { // Subscribes to events. recognizer.Recognized += (s, e) => { if (e.Result.Reason == ResultReason.TranslatedSpeech) { foreach (var element in e.Result.Translations) { var fromTime = TimeSpan.FromTicks(e.Result.OffsetInTicks); var toTime = fromTime.Add(e.Result.Duration); osb.AppendLine($"{lineCount}"); osb.AppendLine($"{fromTime.ToString(@"hh\:mm\:ss\.fff")} --> {toTime.ToString(@"hh\:mm\:ss\.fff")}"); osb.AppendLine(e.Result.Text); osb.AppendLine(); tsb.AppendLine($"{lineCount}"); tsb.AppendLine($"{fromTime.ToString(@"hh\:mm\:ss\.fff")} --> {toTime.ToString(@"hh\:mm\:ss\.fff")}"); tsb.AppendLine(element.Value); tsb.AppendLine(); var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, element.Key, osb.ToString(), tsb.ToString()); osb.Clear(); tsb.Clear(); SpeechRecognized?.Invoke(this, speechServicesEventArgs); } lineCount++; } else if (e.Result.Reason == ResultReason.RecognizedSpeech) { info.AppendLine($"RECOGNIZED: Text={e.Result.Text}"); info.AppendLine($" Speech not translated."); var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString()); info.Clear(); SpeechRecognized?.Invoke(this, speechServicesEventArgs); } else if (e.Result.Reason == ResultReason.NoMatch) { info.AppendLine($"NOMATCH: Speech could not be recognized."); var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString()); info.Clear(); SpeechRecognized?.Invoke(this, speechServicesEventArgs); } }; recognizer.Canceled += (s, e) => { info.AppendLine($"CANCELED: Reason={e.Reason}"); if (e.Reason == CancellationReason.Error) { info.AppendLine($"CANCELED: ErrorCode={e.ErrorCode}"); info.AppendLine($"CANCELED: ErrorDetails={e.ErrorDetails}"); info.AppendLine($"CANCELED: Did you update the subscription info?"); } var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString()); info.Clear(); SpeechCanceled?.Invoke(this, speechServicesEventArgs); stopTranslation.TrySetResult(0); }; recognizer.SpeechStartDetected += (s, e) => { info.AppendLine("Speech start detected event."); var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString()); info.Clear(); SpeechStartDetected?.Invoke(this, speechServicesEventArgs); }; recognizer.SpeechEndDetected += (s, e) => { info.AppendLine("Speech end detected event."); var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString()); info.Clear(); SpeechEndDetected?.Invoke(this, speechServicesEventArgs); }; recognizer.SessionStarted += (s, e) => { info.AppendLine("Start translation..."); info.AppendLine("Session started event."); var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString()); info.Clear(); SpeechSessionStarted?.Invoke(this, speechServicesEventArgs); }; recognizer.SessionStopped += (s, e) => { info.AppendLine("Session stopped event."); info.AppendLine("Stop translation."); var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString()); info.Clear(); SpeechSessionStopped?.Invoke(this, speechServicesEventArgs); stopTranslation.TrySetResult(0); }; // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition. await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false); // Waits for completion. // Use Task.WaitAny to keep the task rooted. Task.WaitAny(new[] { stopTranslation.Task }); // Stops translation. await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false); } } //Housekeeping Directory.Delete(outputPath, true); }