1
0
Fork 0
mirror of https://github.com/Ryujinx/Ryujinx.git synced 2024-10-01 12:30:00 +02:00
Ryujinx/Ryujinx.Graphics.Gpu/Engine/Compute.cs

173 lines
6.2 KiB
C#
Raw Normal View History

2019-12-29 18:41:50 +01:00
using Ryujinx.Graphics.GAL;
using Ryujinx.Graphics.Gpu.Image;
using Ryujinx.Graphics.Gpu.Shader;
2019-10-13 08:02:07 +02:00
using Ryujinx.Graphics.Gpu.State;
using Ryujinx.Graphics.Shader;
using System;
using System.Runtime.InteropServices;
namespace Ryujinx.Graphics.Gpu.Engine
{
partial class Methods
{
/// <summary>
/// Dispatches compute work.
/// </summary>
/// <param name="state">Current GPU state</param>
/// <param name="argument">Method call argument</param>
2019-11-22 03:46:14 +01:00
public void Dispatch(GpuState state, int argument)
2019-10-13 08:02:07 +02:00
{
uint qmdAddress = (uint)state.Get<int>(MethodOffset.DispatchParamsAddress);
2019-10-13 08:02:07 +02:00
var qmd = _context.MemoryAccessor.Read<ComputeQmd>((ulong)qmdAddress << 8);
2019-10-13 08:02:07 +02:00
2019-11-22 03:46:14 +01:00
GpuVa shaderBaseAddress = state.Get<GpuVa>(MethodOffset.ShaderBaseAddress);
2019-10-13 08:02:07 +02:00
ulong shaderGpuVa = shaderBaseAddress.Pack() + (uint)qmd.ProgramOffset;
2019-10-13 08:02:07 +02:00
int localMemorySize = qmd.ShaderLocalMemoryLowSize + qmd.ShaderLocalMemoryHighSize;
int sharedMemorySize = Math.Min(qmd.SharedMemorySize, _context.Capabilities.MaximumComputeSharedMemorySize);
2019-12-31 23:09:49 +01:00
ComputeShader cs = ShaderCache.GetComputeShader(
2019-10-13 08:02:07 +02:00
shaderGpuVa,
qmd.CtaThreadDimension0,
qmd.CtaThreadDimension1,
qmd.CtaThreadDimension2,
localMemorySize,
sharedMemorySize);
2019-10-13 08:02:07 +02:00
2019-12-29 18:41:50 +01:00
_context.Renderer.Pipeline.SetProgram(cs.HostProgram);
2019-11-22 03:46:14 +01:00
var samplerPool = state.Get<PoolState>(MethodOffset.SamplerPoolState);
TextureManager.SetComputeSamplerPool(samplerPool.Address.Pack(), samplerPool.MaximumId, qmd.SamplerIndex);
2019-11-22 03:46:14 +01:00
var texturePool = state.Get<PoolState>(MethodOffset.TexturePoolState);
2019-12-29 18:41:50 +01:00
TextureManager.SetComputeTexturePool(texturePool.Address.Pack(), texturePool.MaximumId);
2019-12-29 18:41:50 +01:00
TextureManager.SetComputeTextureBufferIndex(state.Get<int>(MethodOffset.TextureBufferIndex));
2019-10-13 08:02:07 +02:00
ShaderProgramInfo info = cs.Shader.Program.Info;
2019-10-13 08:02:07 +02:00
uint sbEnableMask = 0;
uint ubEnableMask = 0;
2019-10-13 08:02:07 +02:00
for (int index = 0; index < Constants.TotalCpUniformBuffers; index++)
2019-10-13 08:02:07 +02:00
{
if (!qmd.ConstantBufferValid(index))
2019-10-13 08:02:07 +02:00
{
continue;
}
ubEnableMask |= 1u << index;
ulong gpuVa = (uint)qmd.ConstantBufferAddrLower(index) | (ulong)qmd.ConstantBufferAddrUpper(index) << 32;
ulong size = (ulong)qmd.ConstantBufferSize(index);
2019-10-13 08:02:07 +02:00
2019-12-29 18:41:50 +01:00
BufferManager.SetComputeUniformBuffer(index, gpuVa, size);
2019-10-13 08:02:07 +02:00
}
for (int index = 0; index < info.CBuffers.Count; index++)
{
BufferDescriptor cb = info.CBuffers[index];
// NVN uses the "hardware" constant buffer for anything that is less than 8,
// and those are already bound above.
// Anything greater than or equal to 8 uses the emulated constant buffers.
// They are emulated using global memory loads.
if (cb.Slot < 8)
{
continue;
}
ubEnableMask |= 1u << cb.Slot;
ulong cbDescAddress = BufferManager.GetComputeUniformBufferAddress(0);
int cbDescOffset = 0x260 + cb.Slot * 0x10;
cbDescAddress += (ulong)cbDescOffset;
ReadOnlySpan<byte> cbDescriptorData = _context.PhysicalMemory.GetSpan(cbDescAddress, 0x10);
SbDescriptor cbDescriptor = MemoryMarshal.Cast<byte, SbDescriptor>(cbDescriptorData)[0];
BufferManager.SetComputeUniformBuffer(cb.Slot, cbDescriptor.PackAddress(), (uint)cbDescriptor.Size);
}
2019-10-13 08:02:07 +02:00
for (int index = 0; index < info.SBuffers.Count; index++)
{
BufferDescriptor sb = info.SBuffers[index];
sbEnableMask |= 1u << sb.Slot;
2019-12-29 18:41:50 +01:00
ulong sbDescAddress = BufferManager.GetComputeUniformBufferAddress(0);
2019-10-13 08:02:07 +02:00
int sbDescOffset = 0x310 + sb.Slot * 0x10;
sbDescAddress += (ulong)sbDescOffset;
ReadOnlySpan<byte> sbDescriptorData = _context.PhysicalMemory.GetSpan(sbDescAddress, 0x10);
2019-10-13 08:02:07 +02:00
SbDescriptor sbDescriptor = MemoryMarshal.Cast<byte, SbDescriptor>(sbDescriptorData)[0];
2019-12-29 18:41:50 +01:00
BufferManager.SetComputeStorageBuffer(sb.Slot, sbDescriptor.PackAddress(), (uint)sbDescriptor.Size);
2019-10-13 08:02:07 +02:00
}
ubEnableMask = 0;
for (int index = 0; index < info.CBuffers.Count; index++)
{
ubEnableMask |= 1u << info.CBuffers[index].Slot;
}
2019-12-29 18:41:50 +01:00
BufferManager.SetComputeStorageBufferEnableMask(sbEnableMask);
BufferManager.SetComputeUniformBufferEnableMask(ubEnableMask);
2019-10-13 08:02:07 +02:00
var textureBindings = new TextureBindingInfo[info.Textures.Count];
for (int index = 0; index < info.Textures.Count; index++)
{
var descriptor = info.Textures[index];
Target target = GetTarget(descriptor.Type);
if (descriptor.IsBindless)
{
textureBindings[index] = new TextureBindingInfo(target, descriptor.CbufOffset, descriptor.CbufSlot);
}
else
{
textureBindings[index] = new TextureBindingInfo(target, descriptor.HandleIndex);
}
}
2019-12-29 18:41:50 +01:00
TextureManager.SetComputeTextures(textureBindings);
var imageBindings = new TextureBindingInfo[info.Images.Count];
for (int index = 0; index < info.Images.Count; index++)
{
var descriptor = info.Images[index];
Target target = GetTarget(descriptor.Type);
imageBindings[index] = new TextureBindingInfo(target, descriptor.HandleIndex);
}
2019-12-29 18:41:50 +01:00
TextureManager.SetComputeImages(imageBindings);
2019-12-29 18:41:50 +01:00
BufferManager.CommitComputeBindings();
TextureManager.CommitComputeBindings();
2019-10-13 08:02:07 +02:00
2019-12-29 18:41:50 +01:00
_context.Renderer.Pipeline.DispatchCompute(
qmd.CtaRasterWidth,
qmd.CtaRasterHeight,
qmd.CtaRasterDepth);
2019-11-22 03:46:14 +01:00
UpdateShaderState(state);
2019-10-13 08:02:07 +02:00
}
}
}