/// <summary> /// Copy a buffer data from a given address to another. /// </summary> /// <remarks> /// This does a GPU side copy. /// </remarks> /// <param name="memoryManager">GPU memory manager where the buffer is mapped</param> /// <param name="srcVa">GPU virtual address of the copy source</param> /// <param name="dstVa">GPU virtual address of the copy destination</param> /// <param name="size">Size in bytes of the copy</param> public void CopyBuffer(MemoryManager memoryManager, GpuVa srcVa, GpuVa dstVa, ulong size) { ulong srcAddress = TranslateAndCreateBuffer(memoryManager, srcVa.Pack(), size); ulong dstAddress = TranslateAndCreateBuffer(memoryManager, dstVa.Pack(), size); Buffer srcBuffer = GetBuffer(srcAddress, size); Buffer dstBuffer = GetBuffer(dstAddress, size); int srcOffset = (int)(srcAddress - srcBuffer.Address); int dstOffset = (int)(dstAddress - dstBuffer.Address); _context.Renderer.Pipeline.CopyBuffer( srcBuffer.Handle, dstBuffer.Handle, srcOffset, dstOffset, (int)size); if (srcBuffer.IsModified(srcAddress, size)) { dstBuffer.SignalModified(dstAddress, size); } else { // Optimization: If the data being copied is already in memory, then copy it directly instead of flushing from GPU. dstBuffer.ClearModified(dstAddress, size); memoryManager.Physical.WriteUntracked(dstAddress, memoryManager.Physical.GetSpan(srcAddress, (int)size)); } }
/// <summary> /// Clears a buffer at a given address with the specified value. /// </summary> /// <remarks> /// Both the address and size must be aligned to 4 bytes. /// </remarks> /// <param name="memoryManager">GPU memory manager where the buffer is mapped</param> /// <param name="gpuVa">GPU virtual address of the region to clear</param> /// <param name="size">Number of bytes to clear</param> /// <param name="value">Value to be written into the buffer</param> public void ClearBuffer(MemoryManager memoryManager, GpuVa gpuVa, ulong size, uint value) { ulong address = TranslateAndCreateBuffer(memoryManager, gpuVa.Pack(), size); Buffer buffer = GetBuffer(address, size); int offset = (int)(address - buffer.Address); _context.Renderer.Pipeline.ClearBuffer(buffer.Handle, offset, (int)size, value); buffer.SignalModified(address, size); }
/// <summary> /// Updates host vertex buffer bindings based on guest GPU state. /// </summary> /// <param name="state">Current GPU state</param> private void UpdateVertexBufferState(GpuState state) { _isAnyVbInstanced = false; for (int index = 0; index < Constants.TotalVertexBuffers; index++) { var vertexBuffer = state.Get <VertexBufferState>(MethodOffset.VertexBufferState, index); if (!vertexBuffer.UnpackEnable()) { BufferManager.SetVertexBuffer(index, 0, 0, 0, 0); continue; } GpuVa endAddress = state.Get <GpuVa>(MethodOffset.VertexBufferEndAddress, index); ulong address = vertexBuffer.Address.Pack(); int stride = vertexBuffer.UnpackStride(); bool instanced = state.Get <Boolean32>(MethodOffset.VertexBufferInstanced + index); int divisor = instanced ? vertexBuffer.Divisor : 0; _isAnyVbInstanced |= divisor != 0; ulong size; if (_ibStreamer.HasInlineIndexData || _drawIndexed || stride == 0 || instanced) { // This size may be (much) larger than the real vertex buffer size. // Avoid calculating it this way, unless we don't have any other option. size = endAddress.Pack() - address + 1; } else { // For non-indexed draws, we can guess the size from the vertex count // and stride. int firstInstance = state.Get <int>(MethodOffset.FirstInstance); var drawState = state.Get <VertexBufferDrawState>(MethodOffset.VertexBufferDrawState); size = (ulong)((firstInstance + drawState.First + drawState.Count) * stride); } BufferManager.SetVertexBuffer(index, address, size, stride, divisor); } }
/// <summary> /// Updates host vertex buffer bindings based on guest GPU state. /// </summary> private void UpdateVertexBufferState() { _drawState.IsAnyVbInstanced = false; for (int index = 0; index < Constants.TotalVertexBuffers; index++) { var vertexBuffer = _state.State.VertexBufferState[index]; if (!vertexBuffer.UnpackEnable()) { _channel.BufferManager.SetVertexBuffer(index, 0, 0, 0, 0); continue; } GpuVa endAddress = _state.State.VertexBufferEndAddress[index]; ulong address = vertexBuffer.Address.Pack(); int stride = vertexBuffer.UnpackStride(); bool instanced = _state.State.VertexBufferInstanced[index]; int divisor = instanced ? vertexBuffer.Divisor : 0; _drawState.IsAnyVbInstanced |= divisor != 0; ulong size; if (_drawState.IbStreamer.HasInlineIndexData || _drawState.DrawIndexed || stride == 0 || instanced) { // This size may be (much) larger than the real vertex buffer size. // Avoid calculating it this way, unless we don't have any other option. size = endAddress.Pack() - address + 1; } else { // For non-indexed draws, we can guess the size from the vertex count // and stride. int firstInstance = (int)_state.State.FirstInstance; var drawState = _state.State.VertexBufferDrawState; size = (ulong)((firstInstance + drawState.First + drawState.Count) * stride); } _channel.BufferManager.SetVertexBuffer(index, address, size, stride, divisor); } }
/// <summary> /// Checks if draws and clears should be performed, according /// to currently set conditional rendering conditions. /// </summary> /// <param name="context">GPU context</param> /// <param name="memoryManager">Memory manager bound to the channel currently executing</param> /// <param name="address">Conditional rendering buffer address</param> /// <param name="condition">Conditional rendering condition</param> /// <returns>True if rendering is enabled, false otherwise</returns> public static ConditionalRenderEnabled GetRenderEnable(GpuContext context, MemoryManager memoryManager, GpuVa address, Condition condition) { switch (condition) { case Condition.Always: return(ConditionalRenderEnabled.True); case Condition.Never: return(ConditionalRenderEnabled.False); case Condition.ResultNonZero: return(CounterNonZero(context, memoryManager, address.Pack())); case Condition.Equal: return(CounterCompare(context, memoryManager, address.Pack(), true)); case Condition.NotEqual: return(CounterCompare(context, memoryManager, address.Pack(), false)); } Logger.Warning?.Print(LogClass.Gpu, $"Invalid conditional render condition \"{condition}\"."); return(ConditionalRenderEnabled.True); }
/// <summary> /// Copy a buffer data from a given address to another. /// </summary> /// <remarks> /// This does a GPU side copy. /// </remarks> /// <param name="srcVa">GPU virtual address of the copy source</param> /// <param name="dstVa">GPU virtual address of the copy destination</param> /// <param name="size">Size in bytes of the copy</param> public void CopyBuffer(GpuVa srcVa, GpuVa dstVa, ulong size) { ulong srcAddress = TranslateAndCreateBuffer(srcVa.Pack(), size); ulong dstAddress = TranslateAndCreateBuffer(dstVa.Pack(), size); Buffer srcBuffer = GetBuffer(srcAddress, size); Buffer dstBuffer = GetBuffer(dstAddress, size); int srcOffset = (int)(srcAddress - srcBuffer.Address); int dstOffset = (int)(dstAddress - dstBuffer.Address); srcBuffer.HostBuffer.CopyTo( dstBuffer.HostBuffer, srcOffset, dstOffset, (int)size); dstBuffer.Flush(dstAddress, size); }
/// <summary> /// Copy a buffer data from a given address to another. /// </summary> /// <remarks> /// This does a GPU side copy. /// </remarks> /// <param name="srcVa">GPU virtual address of the copy source</param> /// <param name="dstVa">GPU virtual address of the copy destination</param> /// <param name="size">Size in bytes of the copy</param> public void CopyBuffer(GpuVa srcVa, GpuVa dstVa, ulong size) { ulong srcAddress = TranslateAndCreateBuffer(srcVa.Pack(), size); ulong dstAddress = TranslateAndCreateBuffer(dstVa.Pack(), size); Buffer srcBuffer = GetBuffer(srcAddress, size); Buffer dstBuffer = GetBuffer(dstAddress, size); int srcOffset = (int)(srcAddress - srcBuffer.Address); int dstOffset = (int)(dstAddress - dstBuffer.Address); _context.Renderer.Pipeline.CopyBuffer( srcBuffer.Handle, dstBuffer.Handle, srcOffset, dstOffset, (int)size); dstBuffer.Flush(dstAddress, size); }
/// <summary> /// Dispatches compute work. /// </summary> /// <param name="state">Current GPU state</param> /// <param name="argument">Method call argument</param> public void Dispatch(GpuState state, int argument) { FlushUboDirty(); uint qmdAddress = (uint)state.Get <int>(MethodOffset.DispatchParamsAddress); var qmd = _context.MemoryManager.Read <ComputeQmd>((ulong)qmdAddress << 8); GpuVa shaderBaseAddress = state.Get <GpuVa>(MethodOffset.ShaderBaseAddress); ulong shaderGpuVa = shaderBaseAddress.Pack() + (uint)qmd.ProgramOffset; int localMemorySize = qmd.ShaderLocalMemoryLowSize + qmd.ShaderLocalMemoryHighSize; int sharedMemorySize = Math.Min(qmd.SharedMemorySize, _context.Capabilities.MaximumComputeSharedMemorySize); for (int index = 0; index < Constants.TotalCpUniformBuffers; index++) { if (!qmd.ConstantBufferValid(index)) { continue; } ulong gpuVa = (uint)qmd.ConstantBufferAddrLower(index) | (ulong)qmd.ConstantBufferAddrUpper(index) << 32; ulong size = (ulong)qmd.ConstantBufferSize(index); BufferManager.SetComputeUniformBuffer(index, gpuVa, size); } ShaderBundle cs = ShaderCache.GetComputeShader( state, shaderGpuVa, qmd.CtaThreadDimension0, qmd.CtaThreadDimension1, qmd.CtaThreadDimension2, localMemorySize, sharedMemorySize); _context.Renderer.Pipeline.SetProgram(cs.HostProgram); var samplerPool = state.Get <PoolState>(MethodOffset.SamplerPoolState); var texturePool = state.Get <PoolState>(MethodOffset.TexturePoolState); TextureManager.SetComputeSamplerPool(samplerPool.Address.Pack(), samplerPool.MaximumId, qmd.SamplerIndex); TextureManager.SetComputeTexturePool(texturePool.Address.Pack(), texturePool.MaximumId); TextureManager.SetComputeTextureBufferIndex(state.Get <int>(MethodOffset.TextureBufferIndex)); ShaderProgramInfo info = cs.Shaders[0].Info; for (int index = 0; index < info.CBuffers.Count; index++) { BufferDescriptor cb = info.CBuffers[index]; // NVN uses the "hardware" constant buffer for anything that is less than 8, // and those are already bound above. // Anything greater than or equal to 8 uses the emulated constant buffers. // They are emulated using global memory loads. if (cb.Slot < 8) { continue; } ulong cbDescAddress = BufferManager.GetComputeUniformBufferAddress(0); int cbDescOffset = 0x260 + (cb.Slot - 8) * 0x10; cbDescAddress += (ulong)cbDescOffset; SbDescriptor cbDescriptor = _context.PhysicalMemory.Read <SbDescriptor>(cbDescAddress); BufferManager.SetComputeUniformBuffer(cb.Slot, cbDescriptor.PackAddress(), (uint)cbDescriptor.Size); } for (int index = 0; index < info.SBuffers.Count; index++) { BufferDescriptor sb = info.SBuffers[index]; ulong sbDescAddress = BufferManager.GetComputeUniformBufferAddress(0); int sbDescOffset = 0x310 + sb.Slot * 0x10; sbDescAddress += (ulong)sbDescOffset; SbDescriptor sbDescriptor = _context.PhysicalMemory.Read <SbDescriptor>(sbDescAddress); BufferManager.SetComputeStorageBuffer(sb.Slot, sbDescriptor.PackAddress(), (uint)sbDescriptor.Size, sb.Flags); } BufferManager.SetComputeStorageBufferBindings(info.SBuffers); BufferManager.SetComputeUniformBufferBindings(info.CBuffers); var textureBindings = new TextureBindingInfo[info.Textures.Count]; for (int index = 0; index < info.Textures.Count; index++) { var descriptor = info.Textures[index]; Target target = ShaderTexture.GetTarget(descriptor.Type); textureBindings[index] = new TextureBindingInfo( target, descriptor.Binding, descriptor.CbufSlot, descriptor.HandleIndex, descriptor.Flags); } TextureManager.SetComputeTextures(textureBindings); var imageBindings = new TextureBindingInfo[info.Images.Count]; for (int index = 0; index < info.Images.Count; index++) { var descriptor = info.Images[index]; Target target = ShaderTexture.GetTarget(descriptor.Type); Format format = ShaderTexture.GetFormat(descriptor.Format); imageBindings[index] = new TextureBindingInfo( target, format, descriptor.Binding, descriptor.CbufSlot, descriptor.HandleIndex, descriptor.Flags); } TextureManager.SetComputeImages(imageBindings); TextureManager.CommitComputeBindings(); BufferManager.CommitComputeBindings(); _context.Renderer.Pipeline.DispatchCompute( qmd.CtaRasterWidth, qmd.CtaRasterHeight, qmd.CtaRasterDepth); _forceShaderUpdate = true; }
/// <summary> /// Dispatches compute work. /// </summary> /// <param name="state">Current GPU state</param> /// <param name="argument">Method call argument</param> public void Dispatch(GpuState state, int argument) { uint qmdAddress = (uint)state.Get <int>(MethodOffset.DispatchParamsAddress); var qmd = _context.MemoryAccessor.Read <ComputeQmd>((ulong)qmdAddress << 8); GpuVa shaderBaseAddress = state.Get <GpuVa>(MethodOffset.ShaderBaseAddress); ulong shaderGpuVa = shaderBaseAddress.Pack() + (uint)qmd.ProgramOffset; int localMemorySize = qmd.ShaderLocalMemoryLowSize + qmd.ShaderLocalMemoryHighSize; int sharedMemorySize = Math.Min(qmd.SharedMemorySize, _context.Capabilities.MaximumComputeSharedMemorySize); Shader.Shader cs = ShaderCache.GetComputeShader( shaderGpuVa, qmd.CtaThreadDimension0, qmd.CtaThreadDimension1, qmd.CtaThreadDimension2, localMemorySize, sharedMemorySize); CurrentCpMeta = cs.Meta; _context.Renderer.Pipeline.SetProgram(cs.HostProgram); var samplerPool = state.Get <PoolState>(MethodOffset.SamplerPoolState); TextureManager.SetComputeSamplerPool(samplerPool.Address.Pack(), samplerPool.MaximumId, qmd.SamplerIndex); var texturePool = state.Get <PoolState>(MethodOffset.TexturePoolState); TextureManager.SetComputeTexturePool(texturePool.Address.Pack(), texturePool.MaximumId); TextureManager.SetComputeTextureBufferIndex(state.Get <int>(MethodOffset.TextureBufferIndex)); ShaderProgramInfo info = cs.Meta.Info[0]; uint sbEnableMask = 0; uint ubEnableMask = 0; for (int index = 0; index < Constants.TotalCpUniformBuffers; index++) { if (!qmd.ConstantBufferValid(index)) { continue; } ubEnableMask |= 1u << index; ulong gpuVa = (uint)qmd.ConstantBufferAddrLower(index) | (ulong)qmd.ConstantBufferAddrUpper(index) << 32; ulong size = (ulong)qmd.ConstantBufferSize(index); BufferManager.SetComputeUniformBuffer(index, gpuVa, size); } for (int index = 0; index < info.CBuffers.Count; index++) { BufferDescriptor cb = info.CBuffers[index]; // NVN uses the "hardware" constant buffer for anything that is less than 8, // and those are already bound above. // Anything greater than or equal to 8 uses the emulated constant buffers. // They are emulated using global memory loads. if (cb.Slot < 8) { continue; } ubEnableMask |= 1u << cb.Slot; ulong cbDescAddress = BufferManager.GetComputeUniformBufferAddress(0); int cbDescOffset = 0x260 + cb.Slot * 0x10; cbDescAddress += (ulong)cbDescOffset; ReadOnlySpan <byte> cbDescriptorData = _context.PhysicalMemory.GetSpan(cbDescAddress, 0x10); SbDescriptor cbDescriptor = MemoryMarshal.Cast <byte, SbDescriptor>(cbDescriptorData)[0]; BufferManager.SetComputeUniformBuffer(cb.Slot, cbDescriptor.PackAddress(), (uint)cbDescriptor.Size); } for (int index = 0; index < info.SBuffers.Count; index++) { BufferDescriptor sb = info.SBuffers[index]; sbEnableMask |= 1u << sb.Slot; ulong sbDescAddress = BufferManager.GetComputeUniformBufferAddress(0); int sbDescOffset = 0x310 + sb.Slot * 0x10; sbDescAddress += (ulong)sbDescOffset; ReadOnlySpan <byte> sbDescriptorData = _context.PhysicalMemory.GetSpan(sbDescAddress, 0x10); SbDescriptor sbDescriptor = MemoryMarshal.Cast <byte, SbDescriptor>(sbDescriptorData)[0]; BufferManager.SetComputeStorageBuffer(sb.Slot, sbDescriptor.PackAddress(), (uint)sbDescriptor.Size); } ubEnableMask = 0; for (int index = 0; index < info.CBuffers.Count; index++) { ubEnableMask |= 1u << info.CBuffers[index].Slot; } BufferManager.SetComputeStorageBufferEnableMask(sbEnableMask); BufferManager.SetComputeUniformBufferEnableMask(ubEnableMask); var textureBindings = new TextureBindingInfo[info.Textures.Count]; for (int index = 0; index < info.Textures.Count; index++) { var descriptor = info.Textures[index]; Target target = GetTarget(descriptor.Type); if (descriptor.IsBindless) { textureBindings[index] = new TextureBindingInfo(target, descriptor.CbufOffset, descriptor.CbufSlot); } else { textureBindings[index] = new TextureBindingInfo(target, descriptor.HandleIndex); } } TextureManager.SetComputeTextures(textureBindings); var imageBindings = new TextureBindingInfo[info.Images.Count]; for (int index = 0; index < info.Images.Count; index++) { var descriptor = info.Images[index]; Target target = GetTarget(descriptor.Type); imageBindings[index] = new TextureBindingInfo(target, descriptor.HandleIndex); } TextureManager.SetComputeImages(imageBindings); BufferManager.CommitComputeBindings(); TextureManager.CommitComputeBindings(); _context.Renderer.Pipeline.DispatchCompute( qmd.CtaRasterWidth, qmd.CtaRasterHeight, qmd.CtaRasterDepth); UpdateShaderState(state); }
/// <summary> /// Dispatches compute work. /// </summary> /// <param name="state">Current GPU state</param> /// <param name="argument">Method call argument</param> public void Dispatch(GpuState state, int argument) { uint dispatchParamsAddress = (uint)state.Get <int>(MethodOffset.DispatchParamsAddress); var dispatchParams = _context.MemoryAccessor.Read <ComputeParams>((ulong)dispatchParamsAddress << 8); GpuVa shaderBaseAddress = state.Get <GpuVa>(MethodOffset.ShaderBaseAddress); ulong shaderGpuVa = shaderBaseAddress.Pack() + (uint)dispatchParams.ShaderOffset; // Note: A size of 0 is also invalid, the size must be at least 1. int sharedMemorySize = Math.Clamp(dispatchParams.SharedMemorySize & 0xffff, 1, _context.Capabilities.MaximumComputeSharedMemorySize); ComputeShader cs = ShaderCache.GetComputeShader( shaderGpuVa, sharedMemorySize, dispatchParams.UnpackBlockSizeX(), dispatchParams.UnpackBlockSizeY(), dispatchParams.UnpackBlockSizeZ()); _context.Renderer.Pipeline.SetProgram(cs.HostProgram); var samplerPool = state.Get <PoolState>(MethodOffset.SamplerPoolState); TextureManager.SetComputeSamplerPool(samplerPool.Address.Pack(), samplerPool.MaximumId, dispatchParams.SamplerIndex); var texturePool = state.Get <PoolState>(MethodOffset.TexturePoolState); TextureManager.SetComputeTexturePool(texturePool.Address.Pack(), texturePool.MaximumId); TextureManager.SetComputeTextureBufferIndex(state.Get <int>(MethodOffset.TextureBufferIndex)); ShaderProgramInfo info = cs.Shader.Program.Info; uint sbEnableMask = 0; uint ubEnableMask = dispatchParams.UnpackUniformBuffersEnableMask(); for (int index = 0; index < dispatchParams.UniformBuffers.Length; index++) { if ((ubEnableMask & (1 << index)) == 0) { continue; } ulong gpuVa = dispatchParams.UniformBuffers[index].PackAddress(); ulong size = dispatchParams.UniformBuffers[index].UnpackSize(); BufferManager.SetComputeUniformBuffer(index, gpuVa, size); } for (int index = 0; index < info.SBuffers.Count; index++) { BufferDescriptor sb = info.SBuffers[index]; sbEnableMask |= 1u << sb.Slot; ulong sbDescAddress = BufferManager.GetComputeUniformBufferAddress(0); int sbDescOffset = 0x310 + sb.Slot * 0x10; sbDescAddress += (ulong)sbDescOffset; ReadOnlySpan <byte> sbDescriptorData = _context.PhysicalMemory.GetSpan(sbDescAddress, 0x10); SbDescriptor sbDescriptor = MemoryMarshal.Cast <byte, SbDescriptor>(sbDescriptorData)[0]; BufferManager.SetComputeStorageBuffer(sb.Slot, sbDescriptor.PackAddress(), (uint)sbDescriptor.Size); } ubEnableMask = 0; for (int index = 0; index < info.CBuffers.Count; index++) { ubEnableMask |= 1u << info.CBuffers[index].Slot; } BufferManager.SetComputeStorageBufferEnableMask(sbEnableMask); BufferManager.SetComputeUniformBufferEnableMask(ubEnableMask); var textureBindings = new TextureBindingInfo[info.Textures.Count]; for (int index = 0; index < info.Textures.Count; index++) { var descriptor = info.Textures[index]; Target target = GetTarget(descriptor.Type); if (descriptor.IsBindless) { textureBindings[index] = new TextureBindingInfo(target, descriptor.CbufOffset, descriptor.CbufSlot); } else { textureBindings[index] = new TextureBindingInfo(target, descriptor.HandleIndex); } } TextureManager.SetComputeTextures(textureBindings); var imageBindings = new TextureBindingInfo[info.Images.Count]; for (int index = 0; index < info.Images.Count; index++) { var descriptor = info.Images[index]; Target target = GetTarget(descriptor.Type); imageBindings[index] = new TextureBindingInfo(target, descriptor.HandleIndex); } TextureManager.SetComputeImages(imageBindings); BufferManager.CommitComputeBindings(); TextureManager.CommitComputeBindings(); _context.Renderer.Pipeline.DispatchCompute( dispatchParams.UnpackGridSizeX(), dispatchParams.UnpackGridSizeY(), dispatchParams.UnpackGridSizeZ()); UpdateShaderState(state); }
/// <summary> /// Updates host vertex buffer bindings based on guest GPU state. /// </summary> private void UpdateVertexBufferState() { _drawState.IsAnyVbInstanced = false; for (int index = 0; index < Constants.TotalVertexBuffers; index++) { var vertexBuffer = _state.State.VertexBufferState[index]; if (!vertexBuffer.UnpackEnable()) { _channel.BufferManager.SetVertexBuffer(index, 0, 0, 0, 0); continue; } GpuVa endAddress = _state.State.VertexBufferEndAddress[index]; ulong address = vertexBuffer.Address.Pack(); int stride = vertexBuffer.UnpackStride(); bool instanced = _state.State.VertexBufferInstanced[index]; int divisor = instanced ? vertexBuffer.Divisor : 0; _drawState.IsAnyVbInstanced |= divisor != 0; ulong size; if (_drawState.IbStreamer.HasInlineIndexData || _drawState.DrawIndexed || stride == 0 || instanced) { // This size may be (much) larger than the real vertex buffer size. // Avoid calculating it this way, unless we don't have any other option. ulong vbSizeMax = endAddress.Pack() - address + 1; int firstIndex = _drawState.FirstIndex; int indexCount = _drawState.IndexCount; bool ibCountingProfitable = GraphicsConfig.EnableIndexedVbSizeDetection && IbUtils.IsIbCountingProfitable(vbSizeMax, indexCount); if (ibCountingProfitable && !_drawState.IbStreamer.HasInlineIndexData && _drawState.DrawIndexed && stride != 0) { IndexType ibType = _state.State.IndexBufferState.Type; ulong ibGpuVa = _state.State.IndexBufferState.Address.Pack(); ulong vertexCount = IbUtils.GetVertexCount(_channel.MemoryManager, ibType, ibGpuVa, firstIndex, indexCount); size = Math.Min(vertexCount * (ulong)stride, vbSizeMax); } else { size = vbSizeMax; } } else { // For non-indexed draws, we can guess the size from the vertex count // and stride. int firstInstance = (int)_state.State.FirstInstance; var drawState = _state.State.VertexBufferDrawState; size = (ulong)((firstInstance + drawState.First + drawState.Count) * stride); } _channel.BufferManager.SetVertexBuffer(index, address, size, stride, divisor); } }