コード例 #1
0
ファイル: IAudioRenderer.cs プロジェクト: reuniware/Ryujinx
        private void AppendMixedBuffer(long tag)
        {
            int[] mixBuffer = new int[MixBufferSamplesCount * AudioConsts.HostChannelsCount];

            foreach (VoiceContext voice in _voices)
            {
                if (!voice.Playing)
                {
                    continue;
                }

                int   outOffset      = 0;
                int   pendingSamples = MixBufferSamplesCount;
                float volume         = voice.Volume;

                while (pendingSamples > 0)
                {
                    int[] samples = voice.GetBufferData(_memory, pendingSamples, out int returnedSamples);

                    if (returnedSamples == 0)
                    {
                        break;
                    }

                    pendingSamples -= returnedSamples;

                    for (int offset = 0; offset < samples.Length; offset++)
                    {
                        mixBuffer[outOffset++] += (int)(samples[offset] * voice.Volume);
                    }
                }
            }

            _audioOut.AppendBuffer(_track, tag, GetFinalBuffer(mixBuffer));
        }
コード例 #2
0
        private void AppendMixedBuffer(long tag)
        {
            int[] mixBuffer = new int[MixBufferSamplesCount * AudioRendererConsts.HostChannelsCount];

            foreach (VoiceContext voice in _voices)
            {
                if (!voice.Playing || voice.CurrentWaveBuffer.Size == 0)
                {
                    continue;
                }

                for (int channel = 0; channel < voice.ChannelsCount; channel++)
                {
                    int channel_resource_id = voice.ChannelresourceID[channel];
                    channelinfo[channel] = _channelState[channel_resource_id];
                }

                int outOffset      = 0;
                int pendingSamples = MixBufferSamplesCount;

                while (pendingSamples > 0)
                {
                    int[] samples = voice.GetBufferData(_memory, pendingSamples, out int returnedSamples);

                    if (returnedSamples == 0)
                    {
                        break;
                    }

                    pendingSamples -= returnedSamples;

                    for (int offset = 0; offset < samples.Length; offset++)
                    {
                        float sampleL = samples[offset] * voice.Volume;
                        float sampleR = samples[offset] * voice.Volume;


                        if (channelinfo[0].is_used)
                        {
                            sampleL *= channelinfo[0].mix_volume.vol1;
                        }
                        if (channelinfo[1].is_used)
                        {
                            sampleR *= channelinfo[1].mix_volume.vol2;
                        }

                        if (outOffset % 2 == 0 || voice.ChannelsCount == 1)
                        {
                            mixBuffer[outOffset++] += (int)sampleL;
                        }
                        else
                        {
                            mixBuffer[outOffset++] += (int)sampleR;
                        }
                    }
                }
            }

            _audioOut.AppendBuffer(_track, tag, GetFinalBuffer(mixBuffer));
        }
コード例 #3
0
ファイル: IAudioRenderer.cs プロジェクト: zhubaojian/Ryujinx
        private unsafe void AppendMixedBuffer(long Tag)
        {
            int[] MixBuffer = new int[MixBufferSamplesCount * AudioConsts.HostChannelsCount];

            foreach (VoiceContext Voice in Voices)
            {
                if (!Voice.Playing)
                {
                    continue;
                }

                int   OutOffset      = 0;
                int   PendingSamples = MixBufferSamplesCount;
                float Volume         = Voice.Volume;

                while (PendingSamples > 0)
                {
                    int[] Samples = Voice.GetBufferData(Memory, PendingSamples, out int ReturnedSamples);

                    if (ReturnedSamples == 0)
                    {
                        break;
                    }

                    PendingSamples -= ReturnedSamples;

                    for (int Offset = 0; Offset < Samples.Length; Offset++)
                    {
                        MixBuffer[OutOffset++] += (int)(Samples[Offset] * Voice.Volume);
                    }
                }
            }

            AudioOut.AppendBuffer(Track, Tag, GetFinalBuffer(MixBuffer));
        }
コード例 #4
0
        public ResultCode AppendAudioOutBufferImpl(ServiceCtx context, long position)
        {
            long tag = context.RequestData.ReadInt64();

            AudioOutData data = MemoryHelper.Read <AudioOutData>(
                context.Memory,
                position);

            ReadOnlySpan <byte> buffer = context.Memory.GetSpan((ulong)data.SampleBufferPtr, (int)data.SampleBufferSize);

            _audioOut.AppendBuffer(_track, tag, MemoryMarshal.Cast <byte, short>(buffer));

            return(ResultCode.Success);
        }
コード例 #5
0
ファイル: IAudioOut.cs プロジェクト: Phukndeeveesss/Ryujinx
        public ResultCode AppendAudioOutBufferImpl(ServiceCtx context, long position)
        {
            long tag = context.RequestData.ReadInt64();

            AudioOutData data = MemoryHelper.Read <AudioOutData>(context.Memory, position);

            // NOTE: Assume PCM16 all the time, change if new format are found.
            short[] buffer = new short[data.SampleBufferSize / sizeof(short)];

            context.Memory.Read((ulong)data.SampleBufferPtr, MemoryMarshal.Cast <short, byte>(buffer));

            _audioOut.AppendBuffer(_track, tag, buffer);

            return(ResultCode.Success);
        }
コード例 #6
0
ファイル: IAudioOut.cs プロジェクト: amaron2/Ryujinx
        public long AppendAudioOutBufferImpl(ServiceCtx context, long position)
        {
            long tag = context.RequestData.ReadInt64();

            AudioOutData data = MemoryHelper.Read <AudioOutData>(
                context.Memory,
                position);

            byte[] buffer = context.Memory.ReadBytes(
                data.SampleBufferPtr,
                data.SampleBufferSize);

            _audioOut.AppendBuffer(_track, tag, buffer);

            return(0);
        }
コード例 #7
0
ファイル: IAudioOut.cs プロジェクト: cin619/Ryujinx
        public long AppendAudioOutBufferImpl(ServiceCtx Context, long Position)
        {
            long Tag = Context.RequestData.ReadInt64();

            AudioOutData Data = AMemoryHelper.Read <AudioOutData>(
                Context.Memory,
                Position);

            byte[] Buffer = Context.Memory.ReadBytes(
                Data.SampleBufferPtr,
                Data.SampleBufferSize);

            AudioOut.AppendBuffer(Track, Tag, Buffer);

            return(0);
        }
コード例 #8
0
        public ResultCode AppendAudioOutBufferImpl(ServiceCtx context, long position)
        {
            long tag = context.RequestData.ReadInt64();

            AudioOutData data = MemoryHelper.Read <AudioOutData>(
                context.Memory,
                position);

            byte[] buffer = new byte[data.SampleBufferSize];

            context.Memory.Read((ulong)data.SampleBufferPtr, buffer);

            _audioOut.AppendBuffer(_track, tag, buffer);

            return(ResultCode.Success);
        }
コード例 #9
0
ファイル: AalHardwareDevice.cs プロジェクト: ski982/Ryujinx-1
        public void AppendBuffer(ReadOnlySpan <short> data, uint channelCount)
        {
            data.CopyTo(_buffer.AsSpan());

            _output.AppendBuffer(_trackId, GetReleasedTag(), _buffer);
        }