private void WriteWaveHeader(StreamMarshaler sm) { char[] array = new char[4] { 'R', 'I', 'F', 'F' }; byte[] array2 = _audioFormat.FormatSpecificData(); sm.WriteArray(array, array.Length); sm.WriteStream((uint)(_rawAudioData.Length + 38 + array2.Length)); char[] array3 = new char[4] { 'W', 'A', 'V', 'E' }; sm.WriteArray(array3, array3.Length); char[] array4 = new char[4] { 'f', 'm', 't', ' ' }; sm.WriteArray(array4, array4.Length); sm.WriteStream(18 + array2.Length); sm.WriteStream((ushort)_audioFormat.EncodingFormat); sm.WriteStream((ushort)_audioFormat.ChannelCount); sm.WriteStream(_audioFormat.SamplesPerSecond); sm.WriteStream(_audioFormat.AverageBytesPerSecond); sm.WriteStream((ushort)_audioFormat.BlockAlign); sm.WriteStream((ushort)_audioFormat.BitsPerSample); sm.WriteStream((ushort)array2.Length); if (array2.Length != 0) { sm.WriteStream(array2); } char[] array5 = new char[4] { 'd', 'a', 't', 'a' }; sm.WriteArray(array5, array5.Length); sm.WriteStream(_rawAudioData.Length); }
internal SpAudioStreamWrapper(Stream stream, SpeechAudioFormatInfo audioFormat) : base(stream) { _formatType = SAPIGuids.SPDFID_WaveFormatEx; if (audioFormat != null) { WAVEFORMATEX wAVEFORMATEX = new WAVEFORMATEX { wFormatTag = (short)audioFormat.EncodingFormat, nChannels = (short)audioFormat.ChannelCount, nSamplesPerSec = audioFormat.SamplesPerSecond, nAvgBytesPerSec = audioFormat.AverageBytesPerSecond, nBlockAlign = (short)audioFormat.BlockAlign, wBitsPerSample = (short)audioFormat.BitsPerSample, cbSize = (short)audioFormat.FormatSpecificData().Length }; _wfx = wAVEFORMATEX.ToBytes(); if (wAVEFORMATEX.cbSize == 0) { byte[] array = new byte[_wfx.Length + wAVEFORMATEX.cbSize]; Array.Copy(_wfx, array, _wfx.Length); Array.Copy(audioFormat.FormatSpecificData(), 0, array, _wfx.Length, wAVEFORMATEX.cbSize); _wfx = array; } } else { try { GetStreamOffsets(stream); } catch (IOException) { throw new FormatException(SR.Get(SRID.SynthesizerInvalidWaveFile)); } } }
internal SpAudioStreamWrapper(Stream stream, SpeechAudioFormatInfo audioFormat) : base(stream) { // Assume PCM to start with _formatType = SAPIGuids.SPDFID_WaveFormatEx; if (audioFormat != null) { WAVEFORMATEX wfx = new(); wfx.wFormatTag = (short)audioFormat.EncodingFormat; wfx.nChannels = (short)audioFormat.ChannelCount; wfx.nSamplesPerSec = audioFormat.SamplesPerSecond; wfx.nAvgBytesPerSec = audioFormat.AverageBytesPerSecond; wfx.nBlockAlign = (short)audioFormat.BlockAlign; wfx.wBitsPerSample = (short)audioFormat.BitsPerSample; wfx.cbSize = (short)audioFormat.FormatSpecificData().Length; _wfx = wfx.ToBytes(); if (wfx.cbSize == 0) { byte[] wfxTemp = new byte[_wfx.Length + wfx.cbSize]; Array.Copy(_wfx, wfxTemp, _wfx.Length); Array.Copy(audioFormat.FormatSpecificData(), 0, wfxTemp, _wfx.Length, wfx.cbSize); _wfx = wfxTemp; } } else { try { GetStreamOffsets(stream); } catch (IOException) { throw new FormatException(SR.Get(SRID.SynthesizerInvalidWaveFile)); } } }