Пример #1
0
        /// <summary>
        /// Receiver for the audio data.
        /// </summary>
        /// <param name="audioData">A buffer containing the next chunk of audio data.</param>
        public override void Receive(Message <AudioBuffer> audioData)
        {
            // take action only if format is different
            if (audioData.Data.HasValidData)
            {
                if (!WaveFormat.Equals(audioData.Data.Format, this.currentInputFormat))
                {
                    // Make a copy of the new input format (don't just use a direct reference,
                    // as the object graph of the Message.Data will be reclaimed by the runtime).
                    audioData.Data.Format.DeepClone(ref this.currentInputFormat);
                    this.configuration.InputFormat = this.currentInputFormat;

                    // stop and restart the renderer to switch formats
                    this.wasapiRender.StopRendering();
                    this.wasapiRender.StartRendering(
                        this.configuration.BufferLengthSeconds,
                        this.configuration.TargetLatencyInMs,
                        this.configuration.Gain,
                        this.configuration.InputFormat);
                }

                // Append the audio buffer to the audio renderer
                this.wasapiRender.AppendAudio(audioData.Data.Data, false);
            }
        }
Пример #2
0
        /// <summary>
        /// Receiver for audio data.
        /// </summary>
        /// <param name="audioBuffer">A buffer containing the next chunk of audio data.</param>
        /// <param name="e">The message envelope for the audio data.</param>
        protected override void Receive(AudioBuffer audioBuffer, Envelope e)
        {
            // take action only if format is different
            if (audioBuffer.HasValidData)
            {
                if (!WaveFormat.Equals(this.currentInputFormat, audioBuffer.Format))
                {
                    this.SetInputFormat(audioBuffer.Format);
                }

                unsafe
                {
                    // pass pointer to audio buffer data directly to MFResampler
                    fixed(void *dataPtr = audioBuffer.Data)
                    {
                        // compute the timestamp at the start of the chunk (originating time is at the end)
                        // Note that timestamp sent to resampler is expressed in ticks and will need to be
                        // converted back to a DateTime when posting the resampled audio.
                        this.resampler.Resample(
                            new IntPtr(dataPtr),
                            audioBuffer.Data.Length,
                            e.OriginatingTime.Ticks - (10000000L * audioBuffer.Length / this.Configuration.InputFormat.AvgBytesPerSec));
                    }
                }
            }
        }
Пример #3
0
        /// <summary>
        /// Receiver for the audio data.
        /// </summary>
        /// <param name="audioData">A buffer containing the next chunk of audio data.</param>
        public override void Receive(Message <AudioBuffer> audioData)
        {
            // take action only if format is different
            if (audioData.Data.HasValidData)
            {
                if (!WaveFormat.Equals(audioData.Data.Format, this.currentInputFormat))
                {
                    // Make a copy of the new input format (don't just use a direct reference,
                    // as the object graph of the Message.Data will be reclaimed by the runtime).
                    audioData.Data.Format.DeepClone(ref this.currentInputFormat);
                    this.configuration.InputFormat = this.currentInputFormat;

                    // stop and restart the renderer to switch formats
                    this.audioRenderDevice.StopRendering();
                    this.audioRenderDevice.StartRendering(
                        this.configuration.BufferLengthSeconds,
                        this.configuration.TargetLatencyInMs,
                        this.configuration.Gain,
                        this.configuration.InputFormat);
                }

                // Append the audio buffer to the audio renderer, specifying whether or not to
                // overwrite existing data or block until internal rendering queue is available.
                this.audioRenderDevice.AppendAudio(audioData.Data.Data, this.overwrite);
            }
        }
Пример #4
0
 public GoogleSpeak(Pipeline pipeline, Microsoft.Psi.Audio.WaveFormat format, string languageCode) : base(pipeline)
 {
     this.gClient          = TextToSpeechClient.Create();
     this.format           = format;
     this.TextLanguageCode = languageCode;
     if (format.Equals(Microsoft.Psi.Audio.WaveFormat.Create16kHz1Channel16BitPcm()))
     {
         this.googleAudioFormat = AudioEncoding.Linear16;
     }
 }