public AudioStream(SampleFormat format, Synthesizer synth) { MMRESULT result; var fmt = CreateFormatSpec(format, synth.SampleRate); // Create output device if ((result = waveOutOpen(ref _ptrOutputDevice, WAVE_MAPPER, ref fmt, WaveOutProc, IntPtr.Zero, WaveOutOpenFlags.CALLBACK_FUNCTION)) != MMRESULT.MMSYSERR_NOERROR) throw new ExternalException($"Function 'waveOutOpen' returned error code {result}"); synth.TTS.AddActiveAudio(this); _synth = synth; }
public void Speak(string text) { // Actual speaking isn't supported yet. This is debug code for testing vocal properties. var synth = new Synthesizer(this) { Fundamental = 165 }; const float amp = .015f; const float tilt = -3.00f; // Generate 100 harmonics for (int i = 0; i < 100; i++) synth.AddSampler(new HarmonicSampler(synth, i, amp, .14f * i, tilt)); synth.AddSampler(new VocalSampler(synth, Phoneme.GetPresetIPA("e"))); synth.CreateAudioStream(); ThreadPool.QueueUserWorkItem(PlaySynthFunc, synth); }
public static void GenerateWavHeader(Synthesizer synth, Stream stream, int sampleCount, SampleFormat sampleFormat) { using (var headerStream = new MemoryStream(40)) using(var header = new BinaryWriter(headerStream, Encoding.Default, false)) { header.Write(CHUNK_RIFF); int posFileSize = (int)headerStream.Position; header.Write(0); // Replace later header.Write(ID_WAVE); // "fmt " chunk header.Write(CHUNK_FMT); header.Write(FMT_CHUNK_SIZE); header.Write(sampleFormat == SampleFormat.Float32 || sampleFormat == SampleFormat.Float64 ? WAV_FORMAT_IEEE_FLOAT : WAV_FORMAT_PCM); header.Write(NUM_CHANNELS); header.Write(synth.SampleRate); // Data rate (bytes per second), block size, and bits per sample int blockSize = 0; switch (sampleFormat) { case SampleFormat.Float64: header.Write(synth.SampleRate * sizeof(double) * NUM_CHANNELS); header.Write((short)(blockSize = sizeof(double) * NUM_CHANNELS)); header.Write((short)(sizeof(double) * 8)); break; case SampleFormat.Float32: header.Write(synth.SampleRate * sizeof(float) * NUM_CHANNELS); header.Write((short)(blockSize = sizeof(float) * NUM_CHANNELS)); header.Write((short)(sizeof(float) * 8)); break; case SampleFormat.Signed16: header.Write(synth.SampleRate * sizeof(short) * NUM_CHANNELS); header.Write((short)(blockSize = sizeof(short) * NUM_CHANNELS)); header.Write((short)(sizeof(short) * 8)); break; case SampleFormat.Unsigned8: header.Write(synth.SampleRate * sizeof(byte) * NUM_CHANNELS); header.Write((short)(blockSize = sizeof(byte) * NUM_CHANNELS)); header.Write((short)(sizeof(byte) * 8)); break; } header.Write(EXT_SIZE); // cbSize, required for non-PCM formats // "fact" chunk (required for non-PCM formats) if (sampleFormat == SampleFormat.Float32 || sampleFormat == SampleFormat.Float64) { header.Write(CHUNK_FACT); header.Write(FACT_CHUNK_SIZE); header.Write(sampleCount); } // "data" chunk header.Write(CHUNK_DATA); int dataSize = blockSize * sampleCount; header.Write(dataSize); int dataPos = (int)headerStream.Position; headerStream.Position = posFileSize; header.Write((int)headerStream.Length + dataSize); // Copy the data over to the audio stream headerStream.Flush(); headerStream.WriteTo(stream); stream.Position = dataPos; } }