public short[] getArrayFromRecognizedAudio(RecognizedAudio inputAudio) { SpeechAudioFormatInfo speechAudioFormatInfo = inputAudio.Format; // Put the audio into an array. // Use a 16 bit short because 16 bits is the max sample size. MemoryStream audioStream = new MemoryStream(); inputAudio.WriteToAudioStream(audioStream); byte[] byteArray = audioStream.ToArray(); /* // For Debugging. * // Print out the byte audio. * String output = "audioByteArray: "; * for (int i = 0; i < byteArray.Length; ++i) * output += byteArray[i] + "."; * System.Diagnostics.Debug.WriteLine(output); */ // Convert byteArray[] to short[], keeping channels interleaved. long numSamplesInAudio = byteArray.Length / speechAudioFormatInfo.BlockAlign * speechAudioFormatInfo.ChannelCount; short[] audioArray = new short[numSamplesInAudio]; for (int i = 0; i < byteArray.Length; i += speechAudioFormatInfo.BlockAlign / speechAudioFormatInfo.ChannelCount) { if (speechAudioFormatInfo.BitsPerSample == 16) { int audioIndex = i / 2; audioArray[audioIndex] = 0; // The ordering of the bytes for each 16-bit sample is Little-Endian!!! audioArray[audioIndex] |= (short)(byteArray[i + 1] << 8); audioArray[audioIndex] |= (short)byteArray[i]; } else // if (speechAudioFormatInfo.BitsPerSample == 8) { audioArray[i] = (short)byteArray[i]; } } /* // For Debugging. * // Print out the short audio. * output = "audioshortArray: "; * for (int i = 0; i < numSamplesInAudio; ++i) * output += audioArray[i] + "."; * System.Diagnostics.Debug.WriteLine(output); */ return(audioArray); }
public void SendVoice(RecognizedAudio audio) { using (var client = GetClient()) using (var audioStream = new MemoryStream()) { if (client == null) { return; } audio.WriteToAudioStream(audioStream); var response = client.PutAsync(_serverLocation, new StreamContent(audioStream)).Result; SendServerConnectionEvent(this, new ConnectionEventArgs(response.IsSuccessStatusCode ? ConnectionStatus.Success : ConnectionStatus.ServerBusy, response)); } }
public short[] getArrayFromRecognizedAudio(RecognizedAudio inputAudio) { SpeechAudioFormatInfo speechAudioFormatInfo = inputAudio.Format; // Put the audio into an array. // Use a 16 bit short because 16 bits is the max sample size. MemoryStream audioStream = new MemoryStream(); inputAudio.WriteToAudioStream(audioStream); byte[] byteArray = audioStream.ToArray(); /* // For Debugging. // Print out the byte audio. String output = "audioByteArray: "; for (int i = 0; i < byteArray.Length; ++i) output += byteArray[i] + "."; System.Diagnostics.Debug.WriteLine(output); */ // Convert byteArray[] to short[], keeping channels interleaved. long numSamplesInAudio = byteArray.Length / speechAudioFormatInfo.BlockAlign * speechAudioFormatInfo.ChannelCount; short[] audioArray = new short[numSamplesInAudio]; for (int i = 0; i < byteArray.Length; i += speechAudioFormatInfo.BlockAlign / speechAudioFormatInfo.ChannelCount) { if (speechAudioFormatInfo.BitsPerSample == 16) { int audioIndex = i / 2; audioArray[audioIndex] = 0; // The ordering of the bytes for each 16-bit sample is Little-Endian!!! audioArray[audioIndex] |= (short)(byteArray[i + 1] << 8); audioArray[audioIndex] |= (short)byteArray[i]; } else // if (speechAudioFormatInfo.BitsPerSample == 8) audioArray[i] = (short)byteArray[i]; } /* // For Debugging. // Print out the short audio. output = "audioshortArray: "; for (int i = 0; i < numSamplesInAudio; ++i) output += audioArray[i] + "."; System.Diagnostics.Debug.WriteLine(output); */ return audioArray; }