public InterpretationResult(SpeechToTextGoogleAPI interpreter) { if (interpreter.InterpretationResult == null) { throw new InvalidOperationException("interpreter must have run"); } ResultText = interpreter.InterpretationResult; InterpreterName = interpreter.InterpreterName; }
public static RecordingText TranslateSpeechToText(AudioBinary audioBinary, SpeechToTextGoogleAPI interpreter) { if (audioBinary == null || interpreter == null) { throw new InvalidOperationException("AudioBinary and Interpreter cannot be null."); } InterpretationResult result = null; switch (interpreter.AudioRequirement) { case AudioType.Flac: Tuple<int, string> sampleRateAndTargetFlac = ConvertFileToFlac(audioBinary); interpreter.SpeechRequest(sampleRateAndTargetFlac.Item1, sampleRateAndTargetFlac.Item2); result = new InterpretationResult(interpreter); File.Delete(sampleRateAndTargetFlac.Item2); break; default: throw new NotImplementedException("This audio type is not yet implemented"); } return new RecordingText(result, audioBinary); }
public static RecordingText TranslateSpeechToText(AudioBinary audioBinary, SpeechToTextGoogleAPI interpreter) { if (audioBinary == null || interpreter == null) { throw new InvalidOperationException("AudioBinary and Interpreter cannot be null."); } InterpretationResult result = null; switch (interpreter.AudioRequirement) { case AudioType.Flac: Tuple <int, string> sampleRateAndTargetFlac = ConvertFileToFlac(audioBinary); interpreter.SpeechRequest(sampleRateAndTargetFlac.Item1, sampleRateAndTargetFlac.Item2); result = new InterpretationResult(interpreter); File.Delete(sampleRateAndTargetFlac.Item2); break; default: throw new NotImplementedException("This audio type is not yet implemented"); } return(new RecordingText(result, audioBinary)); }
public async Task<ActionResult> Recorded() { byte[] audioBytes = null; HttpPostedFileBase file = null; SalesforceUser user = Session[helper.UserKey] as SalesforceUser; DateTimeOffset recordingTime = DateTime.Now; SpeechToTextGoogleAPI googleInterpreter = new SpeechToTextGoogleAPI(); if (Request.Files.Count < 1) { return RedirectToAction("Record", new { id = helper.RecordFailureRouteValue }); } foreach (string upload in Request.Files) { file = Request.Files[upload]; if (file == null) continue; audioBytes = new byte[file.ContentLength]; } //Only want the one file, will default to last file iterated and write to the audioBytes file.InputStream.Read(audioBytes, 0, file.ContentLength); AudioBinary audioBinary = new AudioBinary(audioBytes, AudioType.Wav); RecordingText googleText = InterpreterDirector.TranslateSpeechToText(audioBinary, googleInterpreter); Recording newRecording = new Recording(user, recordingTime, audioBinary); recordingsDb.Recordings.Add(newRecording); await recordingsDb.SaveChangesAsync(); recordingsDb.AudioBinarys.Add(audioBinary); await recordingsDb.SaveChangesAsync(); recordingsDb.RecordingTexts.Add(googleText); await recordingsDb.SaveChangesAsync(); return RedirectToAction("index", new { id = newRecording.ID }); }