コード例 #1
0
        public int[] GetBufferData(MemoryManager memory, int maxSamples, out int samplesCount)
        {
            if (!Playing)
            {
                samplesCount = 0;

                return(null);
            }

            if (_bufferReload)
            {
                _bufferReload = false;

                UpdateBuffer(memory);
            }

            WaveBuffer wb = WaveBuffers[_bufferIndex];

            int maxSize = _samples.Length - _offset;

            int size = maxSamples * AudioRendererConsts.HostChannelsCount;

            if (size > maxSize)
            {
                size = maxSize;
            }

            int[] output = new int[size];

            Array.Copy(_samples, _offset, output, 0, size);

            samplesCount = size / AudioRendererConsts.HostChannelsCount;

            _outStatus.PlayedSamplesCount += samplesCount;

            _offset += size;

            if (_offset == _samples.Length)
            {
                _offset = 0;

                if (wb.Looping == 0)
                {
                    SetBufferIndex(_bufferIndex + 1);
                }

                _outStatus.PlayedWaveBuffersCount++;

                if (wb.LastBuffer != 0)
                {
                    PlayState = PlayState.Paused;
                }
            }

            return(output);
        }
コード例 #2
0
        private void UpdateBuffer(MemoryManager memory)
        {
            // TODO: Implement conversion for formats other
            // than interleaved stereo (2 channels).
            // As of now, it assumes that HostChannelsCount == 2.
            WaveBuffer wb = WaveBuffers[_bufferIndex];

            if (wb.Position == 0)
            {
                _samples = new int[0];

                return;
            }

            if (SampleFormat == SampleFormat.PcmInt16)
            {
                int samplesCount = (int)(wb.Size / (sizeof(short) * ChannelsCount));

                _samples = new int[samplesCount * AudioRendererConsts.HostChannelsCount];

                if (ChannelsCount == 1)
                {
                    for (int index = 0; index < samplesCount; index++)
                    {
                        short sample = memory.Read <short>((ulong)(wb.Position + index * 2));

                        _samples[index * 2 + 0] = sample;
                        _samples[index * 2 + 1] = sample;
                    }
                }
                else
                {
                    for (int index = 0; index < samplesCount * 2; index++)
                    {
                        _samples[index] = memory.Read <short>((ulong)(wb.Position + index * 2));
                    }
                }
            }
            else if (SampleFormat == SampleFormat.Adpcm)
            {
                byte[] buffer = new byte[wb.Size];

                memory.Read((ulong)wb.Position, buffer);

                _samples = AdpcmDecoder.Decode(buffer, AdpcmCtx);
            }
            else
            {
                throw new InvalidOperationException();
            }

            if (SampleRate != AudioRendererConsts.HostSampleRate)
            {
                // TODO: We should keep the frames being discarded (see the 4 below)
                // on a buffer and include it on the next samples buffer, to allow
                // the resampler to do seamless interpolation between wave buffers.
                int samplesCount = _samples.Length / AudioRendererConsts.HostChannelsCount;

                samplesCount = Math.Max(samplesCount - 4, 0);

                _samples = Resampler.Resample2Ch(
                    _samples,
                    SampleRate,
                    AudioRendererConsts.HostSampleRate,
                    samplesCount,
                    ref _resamplerFracPart);
            }
        }