public void Input(byte[] buf) { if (buf == null) { return; } if (buf.Length == 0) { return; } BinaryFormatter bf = new BinaryFormatter(); MemoryStream stream = new MemoryStream(buf); var obj = bf.Deserialize(stream); if (obj.GetType() != outType) { var objFloat = obj as float[]; if (objFloat != null) { var objShort = new short[objFloat.Length]; AudioUtil.Convert(objFloat, objShort, objFloat.Length); output((T[])(object)objShort); } else { var objShort = obj as short[]; if (objShort != null) { objFloat = new float[objShort.Length]; AudioUtil.Convert(objShort, objFloat, objShort.Length); output((T[])(object)objFloat); } } } else { output((T[])obj); } }
public void OnAudioOutFrameFloat(float[] data) { if (disposed) { return; } if (proc == IntPtr.Zero) { return; } foreach (var reverseBufFloat in reverseFramer.Frame(data)) { var reverseBuf = reverseBufferFactory.New(); if (reverseBufFloat.Length != reverseBuf.Length) { AudioUtil.ResampleAndConvert(reverseBufFloat, reverseBuf, reverseBuf.Length, this.reverseChannels); } else { AudioUtil.Convert(reverseBufFloat, reverseBuf, reverseBuf.Length); } lock (reverseStreamQueue) { if (reverseStreamQueue.Count < REVERSE_BUFFER_POOL_CAPACITY - 1) { reverseStreamQueue.Enqueue(reverseBuf); reverseStreamQueueReady.Set(); } else { this.logger.LogError("[PV] WebRTCAudioProcessor Reverse stream queue overflow"); this.reverseBufferFactory.Free(reverseBuf); } } } }
public void Read(T[] outBuf, int outChannels, int outSampleRate) { lock (this) { if (this.started) { int outPos = 0; // enough data in remaining frames to fill entire out buffer // framesElemRem / this.sampleRate >= outElemRem / outSampleRate while ((this.frameQueue.Count * this.frameSamples - this.curPlayingFrameSamplePos) * this.channels * outSampleRate >= (outBuf.Length - outPos) * this.sampleRate) { int playingFramePos = this.curPlayingFrameSamplePos * this.channels; var frame = frameQueue.Peek(); int outElemRem = outBuf.Length - outPos; int frameElemRem = frame.Length - playingFramePos; // enough data in the current frame to fill entire out buffer and some will remain for the next call: keeping this frame // frameElemRem / (frCh * frRate) > outElemRem / (outCh * outRate) if (frameElemRem * outChannels * outSampleRate > outElemRem * this.channels * this.sampleRate) { // frame remainder is large enough to fill outBuf remainder, keep this frame and return //int framePosDelta = this.channels * outChannels * this.sampleRate / (outElemRem * outSampleRate); int framePosDelta = outElemRem * this.channels * this.sampleRate / (outChannels * outSampleRate); if (this.sampleRate == outSampleRate && this.channels == outChannels) { System.Buffer.BlockCopy(frame, playingFramePos * elementSize, outBuf, outPos * elementSize, outElemRem * elementSize); } else { AudioUtil.Resample(frame, playingFramePos, framePosDelta, this.channels, outBuf, outPos, outElemRem, outChannels); } this.curPlayingFrameSamplePos += framePosDelta / this.channels; return; } // discarding current frame because it fills exactly out buffer or next frame required to do so else { int outPosDelta = frameElemRem * outChannels * outSampleRate / (this.channels * this.sampleRate); if (this.sampleRate == outSampleRate && this.channels == outChannels) { System.Buffer.BlockCopy(frame, playingFramePos * elementSize, outBuf, outPos * elementSize, frameElemRem * elementSize); } else { AudioUtil.Resample(frame, playingFramePos, frameElemRem, this.channels, outBuf, outPos, outPosDelta, outChannels); } outPos += outPosDelta; this.curPlayingFrameSamplePos = 0; dequeueFrameQueue(); if (outPosDelta == outElemRem) { return; } } } } } }
public T[] Process(T[] buf) { AudioUtil.Resample(buf, this.frameResampled, this.frameResampled.Length, channels); return(this.frameResampled); }
/// <summary> /// Creates outgoing audio stream of type automatically assigned and adds procedures (callback or serviceable) for consuming given audio source data. /// Adds audio specific features (e.g. resampling, level meter) to processing pipeline and to returning stream handler. /// </summary> /// <param name="voiceInfo">Outgoing audio stream parameters. Set applicable fields to read them by encoder and by receiving client when voice created.</param> /// <param name="source">Streaming audio source.</param> /// <param name="sampleType">Voice's audio sample type. If does not match source audio sample type, conversion will occur.</param> /// <param name="channelId">Transport channel specific to transport.</param> /// <param name="encoder">Audio encoder. Set to null to use default Opus encoder.</param> /// <returns>Outgoing stream handler.</returns> /// <remarks> /// audioSourceDesc.SamplingRate and voiceInfo.SamplingRate may do not match. Automatic resampling will occur in this case. /// </remarks> public LocalVoice CreateLocalVoiceAudioFromSource(VoiceInfo voiceInfo, IAudioDesc source, AudioSampleType sampleType, IEncoder encoder = null, int channelId = 0) { // resolve AudioSampleType.Source to concrete type for encoder creation if (sampleType == AudioSampleType.Source) { if (source is IAudioPusher <float> || source is IAudioReader <float> ) { sampleType = AudioSampleType.Float; } else if (source is IAudioPusher <short> || source is IAudioReader <short> ) { sampleType = AudioSampleType.Short; } } if (encoder == null) { switch (sampleType) { case AudioSampleType.Float: encoder = Platform.CreateDefaultAudioEncoder <float>(transport, voiceInfo); break; case AudioSampleType.Short: encoder = Platform.CreateDefaultAudioEncoder <short>(transport, voiceInfo); break; } } if (source is IAudioPusher <float> ) { if (sampleType == AudioSampleType.Short) { transport.LogInfo("[PV] Creating local voice with source samples type conversion from IAudioPusher float to short."); var localVoice = CreateLocalVoiceAudio <short>(voiceInfo, source, encoder, channelId); // we can safely reuse the same buffer in callbacks from native code // var bufferFactory = new FactoryReusableArray <float>(0); ((IAudioPusher <float>)source).SetCallback(buf => { var shortBuf = localVoice.BufferFactory.New(buf.Length); AudioUtil.Convert(buf, shortBuf, buf.Length); localVoice.PushDataAsync(shortBuf); }, bufferFactory); return(localVoice); } else { var localVoice = CreateLocalVoiceAudio <float>(voiceInfo, source, encoder, channelId); ((IAudioPusher <float>)source).SetCallback(buf => localVoice.PushDataAsync(buf), localVoice.BufferFactory); return(localVoice); } } else if (source is IAudioPusher <short> ) { if (sampleType == AudioSampleType.Float) { transport.LogInfo("[PV] Creating local voice with source samples type conversion from IAudioPusher short to float."); var localVoice = CreateLocalVoiceAudio <float>(voiceInfo, source, encoder, channelId); // we can safely reuse the same buffer in callbacks from native code // var bufferFactory = new FactoryReusableArray <short>(0); ((IAudioPusher <short>)source).SetCallback(buf => { var floatBuf = localVoice.BufferFactory.New(buf.Length); AudioUtil.Convert(buf, floatBuf, buf.Length); localVoice.PushDataAsync(floatBuf); }, bufferFactory); return(localVoice); } else { var localVoice = CreateLocalVoiceAudio <short>(voiceInfo, source, encoder, channelId); ((IAudioPusher <short>)source).SetCallback(buf => localVoice.PushDataAsync(buf), localVoice.BufferFactory); return(localVoice); } } else if (source is IAudioReader <float> ) { if (sampleType == AudioSampleType.Short) { transport.LogInfo("[PV] Creating local voice with source samples type conversion from IAudioReader float to short."); var localVoice = CreateLocalVoiceAudio <short>(voiceInfo, source, encoder, channelId); localVoice.LocalUserServiceable = new BufferReaderPushAdapterAsyncPoolFloatToShort(localVoice, source as IAudioReader <float>); return(localVoice); } else { var localVoice = CreateLocalVoiceAudio <float>(voiceInfo, source, encoder, channelId); localVoice.LocalUserServiceable = new BufferReaderPushAdapterAsyncPool <float>(localVoice, source as IAudioReader <float>); return(localVoice); } } else if (source is IAudioReader <short> ) { if (sampleType == AudioSampleType.Float) { transport.LogInfo("[PV] Creating local voice with source samples type conversion from IAudioReader short to float."); var localVoice = CreateLocalVoiceAudio <float>(voiceInfo, source, encoder, channelId); localVoice.LocalUserServiceable = new BufferReaderPushAdapterAsyncPoolShortToFloat(localVoice, source as IAudioReader <short>); return(localVoice); } else { var localVoice = CreateLocalVoiceAudio <short>(voiceInfo, source, encoder, channelId); localVoice.LocalUserServiceable = new BufferReaderPushAdapterAsyncPool <short>(localVoice, source as IAudioReader <short>); return(localVoice); } } else { transport.LogError("[PV] CreateLocalVoiceAudioFromSource does not support Voice.IAudioDesc of type {0}", source.GetType()); return(LocalVoiceAudioDummy.Dummy); } }