public DecoderPipeline([NotNull] IVoiceDecoder decoder, uint inputFrameSize, [NotNull] Action<DecoderPipeline> completionHandler, bool softClip = true)
        {
            if (decoder == null) throw new ArgumentNullException("decoder");
            if (completionHandler == null) throw new ArgumentNullException("completionHandler");

            _completionHandler = completionHandler;
            _inputBuffer = new TransferBuffer<VoicePacket>(32);

            // we need buffers to hold the encoded frames, but we have no idea how large an encoded frame is! These buffers are large enough...
            // ...to hold a frame with no compression whatsoever, so they should be large enough to hold the frame wehen it's opus compressed.
            _bytePool = new ConcurrentPool<byte[]>(12, () => new byte[inputFrameSize * decoder.Format.Channels * 4]);
            _channelListPool = new ConcurrentPool<List<RemoteChannel>>(12, () => new List<RemoteChannel>());

            _frameDuration = TimeSpan.FromSeconds((double)inputFrameSize / decoder.Format.SampleRate);
            _firstFrameArrival = null;
            _firstFrameSeq = 0;

            var source = new BufferedDecoder(decoder, inputFrameSize, decoder.Format, RecycleFrame);
            var ramped = new VolumeRampedFrameSource(source, this);
            var samples = new FrameToSampleConverter(ramped);

            ISampleSource toResampler = samples;
            if (softClip)
                toResampler = new SoftClipSampleSource(samples);

            var resampled = new Resampler(toResampler);

            _source = source;
            _output = resampled;
        }
示例#2
0
        public DecoderPipeline(IVoiceDecoder decoder, uint frameSize, Action <DecoderPipeline> completionHandler, bool softClip = true)
        {
            _completionHandler = completionHandler;
            _inputBuffer       = new TransferBuffer <EncodedAudio>();
            _bytePool          = new ConcurrentPool <byte[]>(12, () => new byte[frameSize * decoder.Format.Channels * 4]); // todo wrong frame size (although it should still be large enough)

            _frameDuration     = TimeSpan.FromSeconds((double)frameSize / decoder.Format.SampleRate);
            _firstFrameArrival = null;
            _firstFrameSeq     = 0;

            var source  = new BufferedDecoder(decoder, frameSize, decoder.Format, frame => _bytePool.Put(frame.Data.Array));
            var ramped  = new VolumeRampedFrameSource(source, this);
            var samples = new FrameToSampleConverter(ramped);

            ISampleSource toResampler = samples;

            if (softClip)
            {
                toResampler = new SoftClipSampleSource(samples);
            }

            var resampled = new Resampler(toResampler);

            _source = source;
            _output = resampled;
        }
示例#3
0
        public DecoderPipeline([NotNull] IVoiceDecoder decoder, uint inputFrameSize, [NotNull] Action <DecoderPipeline> completionHandler, string id, bool softClip = true)
        {
            if (decoder == null)
            {
                throw new ArgumentNullException("decoder");
            }
            if (completionHandler == null)
            {
                throw new ArgumentNullException("completionHandler");
            }

            _id = id;

            _completionHandler = completionHandler;
            _inputBuffer       = new TransferBuffer <VoicePacket>(32);

            // we need buffers to hold the encoded frames, but we have no idea how large an encoded frame is! These buffers are large enough...
            // ...to hold a frame with no compression whatsoever, so they should be large enough to hold the frame when it's opus compressed.
            _bytePool        = new ConcurrentPool <byte[]>(12, () => new byte[inputFrameSize * decoder.Format.Channels * 4]);
            _channelListPool = new ConcurrentPool <List <RemoteChannel> >(12, () => new List <RemoteChannel>());

            _frameDuration     = TimeSpan.FromSeconds((double)inputFrameSize / decoder.Format.SampleRate);
            _firstFrameArrival = null;
            _firstFrameSeq     = 0;

            // Buffer decoded packets and decode them on demand
            var source = new BufferedDecoder(decoder, inputFrameSize, decoder.Format, RecycleFrame);

            _source = source;

            // Every time the volume changes, change is smoothly over the course of a single frame
            var ramped = new VolumeRampedFrameSource(source, this);

            // Convert stream of fixed size frames into a stream of samples taken in arbitrary chunks
            var samples = new FrameToSampleConverter(ramped);

            // Monitor stream sync and adjust playback rate to stay close in sync
            _synchronizer = new SynchronizerSampleSource(samples, TimeSpan.FromSeconds(1));

            // Resample the data to the output rate
            var resampled = new Resampler(_synchronizer, this);

            // Chain a soft clip stage if necessary
            _output = softClip ? (ISampleSource) new SoftClipSampleSource(resampled) : resampled;
        }