internal LocalVoiceFramed(VoiceClient voiceClient, IEncoder encoder, byte id, VoiceInfo voiceInfo, int channelId, int frameSize) : base(voiceClient, encoder, id, voiceInfo, channelId, frameSize) { if (frameSize == 0) { throw new Exception(LogPrefix + ": non 0 frame size required for framed stream"); } this.framer = new Framer <T>(FrameSize); this.bufferFactory = new FactoryPrimitiveArrayPool <T>(DATA_POOL_CAPACITY, Name + " Data", FrameSize); }
internal LocalVoiceFramed(VoiceClient voiceClient, IEncoder encoder, byte id, VoiceInfo voiceInfo, int channelId, int frameSize) : base(voiceClient, encoder, id, voiceInfo, channelId, frameSize) { #if DUMP_TO_FILE file = File.Open("dump-" + fileCnt++ + ".raw", FileMode.Create); #endif if (frameSize == 0) { throw new Exception(LogPrefix + ": non 0 frame size required for framed stream"); } this.framer = new Framer <T>(FrameSize); this.bufferFactory = new FactoryPrimitiveArrayPool <T>(DATA_POOL_CAPACITY, Name + " Data", FrameSize); }
private void InitReverseStream() { lock (this) { if (!aecInited) { if (disposed) { return; } int size = processFrameSize * reverseSamplingRate / samplingRate * reverseChannels; reverseFramer = new Framer <float>(size); reverseBuf = new short[processFrameSize * reverseChannels / channels]; // should match direct stream if (reverseSamplingRate != samplingRate) { logger.LogWarning("WebRTCAudioProcessor AEC: output sampling rate {0} != {1} capture sampling rate. For better AEC, set audio source (microphone) and audio output samping rates to the same value.", reverseSamplingRate, samplingRate); } aecInited = true; } } }
private void InitReverseStream() { lock (this) { if (!aecInited) { if (disposed) { return; } int size = processFrameSize * reverseSamplingRate / samplingRate * reverseChannels; reverseFramer = new Framer <float>(size); reverseBufferFactory = new FactoryPrimitiveArrayPool <short>(REVERSE_BUFFER_POOL_CAPACITY, "WebRTCAudioProcessor Reverse Buffers", this.inFrameSize); logger.LogInfo("[PV] WebRTCAudioProcessor Init reverse stream: frame size {0}, reverseSamplingRate {1}, reverseChannels {2}", size, reverseSamplingRate, reverseChannels); if (!reverseStreamThreadRunning) { #if NETFX_CORE Windows.System.Threading.ThreadPool.RunAsync((x) => { ReverseStreamThread(); }); #else var t = new Thread(ReverseStreamThread); t.Start(); t.Name = "WebRTCAudioProcessor reverse stream"; #endif } if (reverseSamplingRate != samplingRate) { logger.LogWarning("[PV] WebRTCAudioProcessor AEC: output sampling rate {0} != {1} capture sampling rate. For better AEC, set audio source (microphone) and audio output samping rates to the same value.", reverseSamplingRate, samplingRate); } aecInited = true; } } }
internal LocalVoiceFramed(VoiceClient voiceClient, IEncoder encoder, byte id, VoiceInfo voiceInfo, int channelId, int frameSize) : base(voiceClient, encoder, id, voiceInfo, channelId, frameSize) { this.framer = new Framer <T>(FrameSize); this.bufferFactory = new FactoryPrimitiveArrayPool <T>(DATA_POOL_CAPACITY, Name + " Data", FrameSize); }