public void OnAudioOutFrameFloat(float[] data) { if (disposed) { return; } if (proc == IntPtr.Zero) { return; } foreach (var reverseBufFloat in reverseFramer.Frame(data)) { if (reverseBufFloat.Length != reverseBuf.Length) { AudioUtil.ResampleAndConvert(reverseBufFloat, reverseBuf, reverseBuf.Length, this.reverseChannels); } else { AudioUtil.Convert(reverseBufFloat, reverseBuf, reverseBuf.Length); } int err = webrtc_audio_processor_process_reverse(proc, reverseBuf, reverseBuf.Length); if (lastProcessReverseErr != err) { lastProcessReverseErr = err; this.logger.LogError("WebRTCAudioProcessor OnAudioOutFrameFloat: webrtc_audio_processor_process_reverse() error {0}", err); } } }
/// <summary> /// Creates outgoing audio stream of type automatically assigned and adds procedures (callback or serviceable) for consuming given audio source data. /// Adds audio specific features (e.g. resampling, level meter) to processing pipeline and to returning stream handler. /// </summary> /// <param name="voiceInfo">Outgoing audio stream parameters. Set applicable fields to read them by encoder and by receiving client when voice created.</param> /// <param name="source">Streaming audio source.</param> /// <param name="forceShort">For audio sources producing buffers of 'float' type, creates stream of 'short' type and adds converter.</param> /// <param name="channelId">Transport channel specific to transport. Set to VoiceClient.ChannelAuto to let transport automatically assign channel.</param> /// <param name="encoder">Audio encoder. Set to null to use default Opus encoder.</param> /// <returns>Outgoing stream handler.</returns> /// <remarks> /// voiceInfo.sourceSamplingRate and voiceInfo.SamplingRate may do not match. Automatic resampling will occur in this case. /// </remarks> public Voice.LocalVoice CreateLocalVoiceAudioFromSource(Voice.VoiceInfo voiceInfo, Voice.IAudioDesc source, bool forceShort = false, int channelId = ChannelAuto, IEncoder encoder = null) { if (source is Voice.IAudioPusher <float> ) { if (forceShort) { var localVoice = CreateLocalVoiceAudio <short>(voiceInfo, channelId, encoder); // we can safely reuse the same buffer in callbacks from native code // var bufferFactory = new FactoryReusableArray <float>(0); ((Voice.IAudioPusher <float>)source).SetCallback(buf => { var shortBuf = localVoice.BufferFactory.New(buf.Length); AudioUtil.Convert(buf, shortBuf, buf.Length); localVoice.PushDataAsync(shortBuf); }, bufferFactory); return(localVoice); } else { var localVoice = CreateLocalVoiceAudio <float>(voiceInfo, channelId, encoder); ((Voice.IAudioPusher <float>)source).SetCallback(buf => localVoice.PushDataAsync(buf), localVoice.BufferFactory); return(localVoice); } } else if (source is Voice.IAudioPusher <short> ) { var localVoice = CreateLocalVoiceAudio <short>(voiceInfo, channelId, encoder); ((Voice.IAudioPusher <short>)source).SetCallback(buf => localVoice.PushDataAsync(buf), localVoice.BufferFactory); return(localVoice); } else if (source is Voice.IAudioReader <float> ) { if (forceShort) { transport.LogInfo("[PV] Creating local voice with source samples type conversion from float to short."); var localVoice = CreateLocalVoiceAudio <short>(voiceInfo, channelId, encoder); localVoice.LocalUserServiceable = new Voice.BufferReaderPushAdapterAsyncPoolFloatToShort(localVoice, source as Voice.IAudioReader <float>); return(localVoice); } else { var localVoice = CreateLocalVoiceAudio <float>(voiceInfo, channelId, encoder); localVoice.LocalUserServiceable = new Voice.BufferReaderPushAdapterAsyncPool <float>(localVoice, source as Voice.IAudioReader <float>); return(localVoice); } } else if (source is Voice.IAudioReader <short> ) { var localVoice = CreateLocalVoiceAudio <short>(voiceInfo, channelId, encoder); localVoice.LocalUserServiceable = new Voice.BufferReaderPushAdapterAsyncPool <short>(localVoice, source as Voice.IAudioReader <short>); return(localVoice); } else { transport.LogError("[PV] CreateLocalVoiceAudioFromSource does not support Voice.IAudioDesc of type {0}", source.GetType()); return(Voice.LocalVoiceAudioDummy.Dummy); } }
/// <summary>Do the actual data read/push.</summary> /// <param name="localVoice">LocalVoice instance to push data to. Must be a LocalVoiceFramed<T> of same T.</param> public override void Service(LocalVoice localVoice) { var v = ((LocalVoiceFramed <float>)localVoice); float[] buf = v.BufferFactory.New(); while (this.reader.Read(buffer)) { AudioUtil.Convert(buffer, buf, buf.Length); v.PushDataAsync(buf); buf = v.BufferFactory.New(); } // release unused buffer v.BufferFactory.Free(buf, buf.Length); }
public void Input(byte[] buf, FrameFlags flags) { if (buf == null) { return; } if (buf.Length == 0) { return; } BinaryFormatter bf = new BinaryFormatter(); MemoryStream stream = new MemoryStream(buf); var obj = bf.Deserialize(stream); if (obj.GetType() != outType) { var objFloat = obj as float[]; if (objFloat != null) { var objShort = new short[objFloat.Length]; AudioUtil.Convert(objFloat, objShort, objFloat.Length); output(new FrameOut <T>((T[])(object)objShort, false)); } else { var objShort = obj as short[]; if (objShort != null) { objFloat = new float[objShort.Length]; AudioUtil.Convert(objShort, objFloat, objShort.Length); output(new FrameOut <T>((T[])(object)objFloat, false)); } } } else { output(new FrameOut <T>((T[])obj, false)); } }
public void OnAudioOutFrameFloat(float[] data) { if (disposed) { return; } if (proc == IntPtr.Zero) { return; } foreach (var reverseBufFloat in reverseFramer.Frame(data)) { var reverseBuf = reverseBufferFactory.New(); if (reverseBufFloat.Length != reverseBuf.Length) { AudioUtil.ResampleAndConvert(reverseBufFloat, reverseBuf, reverseBuf.Length, this.reverseChannels); } else { AudioUtil.Convert(reverseBufFloat, reverseBuf, reverseBuf.Length); } lock (reverseStreamQueue) { if (reverseStreamQueue.Count < REVERSE_BUFFER_POOL_CAPACITY - 1) { reverseStreamQueue.Enqueue(reverseBuf); reverseStreamQueueReady.Set(); } else { this.logger.LogError("[PV] WebRTCAudioProcessor Reverse stream queue overflow"); this.reverseBufferFactory.Free(reverseBuf); } } } }
/// <summary> /// Creates outgoing audio stream of type automatically assigned and adds procedures (callback or serviceable) for consuming given audio source data. /// Adds audio specific features (e.g. resampling, level meter) to processing pipeline and to returning stream handler. /// </summary> /// <param name="voiceInfo">Outgoing audio stream parameters. Set applicable fields to read them by encoder and by receiving client when voice created.</param> /// <param name="source">Streaming audio source.</param> /// <param name="sampleType">Voice's audio sample type. If does not match source audio sample type, conversion will occur.</param> /// <param name="channelId">Transport channel specific to transport.</param> /// <param name="encoder">Audio encoder. Set to null to use default Opus encoder.</param> /// <returns>Outgoing stream handler.</returns> /// <remarks> /// audioSourceDesc.SamplingRate and voiceInfo.SamplingRate may do not match. Automatic resampling will occur in this case. /// </remarks> public LocalVoice CreateLocalVoiceAudioFromSource(VoiceInfo voiceInfo, IAudioDesc source, AudioSampleType sampleType, IEncoder encoder = null, int channelId = 0) { // resolve AudioSampleType.Source to concrete type for encoder creation if (sampleType == AudioSampleType.Source) { if (source is IAudioPusher <float> || source is IAudioReader <float> ) { sampleType = AudioSampleType.Float; } else if (source is IAudioPusher <short> || source is IAudioReader <short> ) { sampleType = AudioSampleType.Short; } } if (encoder == null) { switch (sampleType) { case AudioSampleType.Float: encoder = Platform.CreateDefaultAudioEncoder <float>(transport, voiceInfo); break; case AudioSampleType.Short: encoder = Platform.CreateDefaultAudioEncoder <short>(transport, voiceInfo); break; } } if (source is IAudioPusher <float> ) { if (sampleType == AudioSampleType.Short) { transport.LogInfo("[PV] Creating local voice with source samples type conversion from IAudioPusher float to short."); var localVoice = CreateLocalVoiceAudio <short>(voiceInfo, source, encoder, channelId); // we can safely reuse the same buffer in callbacks from native code // var bufferFactory = new FactoryReusableArray <float>(0); ((IAudioPusher <float>)source).SetCallback(buf => { var shortBuf = localVoice.BufferFactory.New(buf.Length); AudioUtil.Convert(buf, shortBuf, buf.Length); localVoice.PushDataAsync(shortBuf); }, bufferFactory); return(localVoice); } else { var localVoice = CreateLocalVoiceAudio <float>(voiceInfo, source, encoder, channelId); ((IAudioPusher <float>)source).SetCallback(buf => localVoice.PushDataAsync(buf), localVoice.BufferFactory); return(localVoice); } } else if (source is IAudioPusher <short> ) { if (sampleType == AudioSampleType.Float) { transport.LogInfo("[PV] Creating local voice with source samples type conversion from IAudioPusher short to float."); var localVoice = CreateLocalVoiceAudio <float>(voiceInfo, source, encoder, channelId); // we can safely reuse the same buffer in callbacks from native code // var bufferFactory = new FactoryReusableArray <short>(0); ((IAudioPusher <short>)source).SetCallback(buf => { var floatBuf = localVoice.BufferFactory.New(buf.Length); AudioUtil.Convert(buf, floatBuf, buf.Length); localVoice.PushDataAsync(floatBuf); }, bufferFactory); return(localVoice); } else { var localVoice = CreateLocalVoiceAudio <short>(voiceInfo, source, encoder, channelId); ((IAudioPusher <short>)source).SetCallback(buf => localVoice.PushDataAsync(buf), localVoice.BufferFactory); return(localVoice); } } else if (source is IAudioReader <float> ) { if (sampleType == AudioSampleType.Short) { transport.LogInfo("[PV] Creating local voice with source samples type conversion from IAudioReader float to short."); var localVoice = CreateLocalVoiceAudio <short>(voiceInfo, source, encoder, channelId); localVoice.LocalUserServiceable = new BufferReaderPushAdapterAsyncPoolFloatToShort(localVoice, source as IAudioReader <float>); return(localVoice); } else { var localVoice = CreateLocalVoiceAudio <float>(voiceInfo, source, encoder, channelId); localVoice.LocalUserServiceable = new BufferReaderPushAdapterAsyncPool <float>(localVoice, source as IAudioReader <float>); return(localVoice); } } else if (source is IAudioReader <short> ) { if (sampleType == AudioSampleType.Float) { transport.LogInfo("[PV] Creating local voice with source samples type conversion from IAudioReader short to float."); var localVoice = CreateLocalVoiceAudio <float>(voiceInfo, source, encoder, channelId); localVoice.LocalUserServiceable = new BufferReaderPushAdapterAsyncPoolShortToFloat(localVoice, source as IAudioReader <short>); return(localVoice); } else { var localVoice = CreateLocalVoiceAudio <short>(voiceInfo, source, encoder, channelId); localVoice.LocalUserServiceable = new BufferReaderPushAdapterAsyncPool <short>(localVoice, source as IAudioReader <short>); return(localVoice); } } else { transport.LogError("[PV] CreateLocalVoiceAudioFromSource does not support Voice.IAudioDesc of type {0}", source.GetType()); return(LocalVoiceAudioDummy.Dummy); } }