mirror of
https://git.suyu.dev/suyu/suyu
synced 2024-11-01 12:57:52 +00:00
Merge pull request #3506 from namkazt/patch-9
shader_decode: Implement partial ATOM/ATOMS instr
This commit is contained in:
commit
c19425ed69
6 changed files with 120 additions and 37 deletions
|
@ -231,18 +231,6 @@ enum class AtomicOp : u64 {
|
|||
Or = 6,
|
||||
Xor = 7,
|
||||
Exch = 8,
|
||||
};
|
||||
|
||||
enum class GlobalAtomicOp : u64 {
|
||||
Add = 0,
|
||||
Min = 1,
|
||||
Max = 2,
|
||||
Inc = 3,
|
||||
Dec = 4,
|
||||
And = 5,
|
||||
Or = 6,
|
||||
Xor = 7,
|
||||
Exch = 8,
|
||||
SafeAdd = 10,
|
||||
};
|
||||
|
||||
|
@ -1001,7 +989,7 @@ union Instruction {
|
|||
} stg;
|
||||
|
||||
union {
|
||||
BitField<52, 4, GlobalAtomicOp> operation;
|
||||
BitField<52, 4, AtomicOp> operation;
|
||||
BitField<49, 3, GlobalAtomicType> type;
|
||||
BitField<28, 20, s64> offset;
|
||||
} atom;
|
||||
|
|
|
@ -2114,6 +2114,10 @@ private:
|
|||
|
||||
template <const std::string_view& opname, Type type>
|
||||
Expression Atomic(Operation operation) {
|
||||
if ((opname == Func::Min || opname == Func::Max) && type == Type::Int) {
|
||||
UNIMPLEMENTED_MSG("Unimplemented Min & Max for atomic operations");
|
||||
return {};
|
||||
}
|
||||
return {fmt::format("atomic{}({}, {})", opname, Visit(operation[0]).GetCode(),
|
||||
Visit(operation[1]).As(type)),
|
||||
type};
|
||||
|
@ -2307,6 +2311,8 @@ private:
|
|||
~Func() = delete;
|
||||
|
||||
static constexpr std::string_view Add = "Add";
|
||||
static constexpr std::string_view Min = "Min";
|
||||
static constexpr std::string_view Max = "Max";
|
||||
static constexpr std::string_view And = "And";
|
||||
static constexpr std::string_view Or = "Or";
|
||||
static constexpr std::string_view Xor = "Xor";
|
||||
|
@ -2457,7 +2463,21 @@ private:
|
|||
&GLSLDecompiler::AtomicImage<Func::Xor>,
|
||||
&GLSLDecompiler::AtomicImage<Func::Exchange>,
|
||||
|
||||
&GLSLDecompiler::Atomic<Func::Exchange, Type::Uint>,
|
||||
&GLSLDecompiler::Atomic<Func::Add, Type::Uint>,
|
||||
&GLSLDecompiler::Atomic<Func::Min, Type::Uint>,
|
||||
&GLSLDecompiler::Atomic<Func::Max, Type::Uint>,
|
||||
&GLSLDecompiler::Atomic<Func::And, Type::Uint>,
|
||||
&GLSLDecompiler::Atomic<Func::Or, Type::Uint>,
|
||||
&GLSLDecompiler::Atomic<Func::Xor, Type::Uint>,
|
||||
|
||||
&GLSLDecompiler::Atomic<Func::Exchange, Type::Int>,
|
||||
&GLSLDecompiler::Atomic<Func::Add, Type::Int>,
|
||||
&GLSLDecompiler::Atomic<Func::Min, Type::Int>,
|
||||
&GLSLDecompiler::Atomic<Func::Max, Type::Int>,
|
||||
&GLSLDecompiler::Atomic<Func::And, Type::Int>,
|
||||
&GLSLDecompiler::Atomic<Func::Or, Type::Int>,
|
||||
&GLSLDecompiler::Atomic<Func::Xor, Type::Int>,
|
||||
|
||||
&GLSLDecompiler::Branch,
|
||||
&GLSLDecompiler::BranchIndirect,
|
||||
|
|
|
@ -1941,7 +1941,11 @@ private:
|
|||
return {};
|
||||
}
|
||||
|
||||
Expression AtomicAdd(Operation operation) {
|
||||
template <Id (Module::*func)(Id, Id, Id, Id, Id), Type result_type,
|
||||
Type value_type = result_type>
|
||||
Expression Atomic(Operation operation) {
|
||||
const Id type_def = GetTypeDefinition(result_type);
|
||||
|
||||
Id pointer;
|
||||
if (const auto smem = std::get_if<SmemNode>(&*operation[0])) {
|
||||
pointer = GetSharedMemoryPointer(*smem);
|
||||
|
@ -1949,14 +1953,15 @@ private:
|
|||
pointer = GetGlobalMemoryPointer(*gmem);
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
return {Constant(t_uint, 0), Type::Uint};
|
||||
return {Constant(type_def, 0), result_type};
|
||||
}
|
||||
|
||||
const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device));
|
||||
const Id semantics = Constant(t_uint, 0U);
|
||||
const Id value = As(Visit(operation[1]), value_type);
|
||||
|
||||
const Id value = AsUint(Visit(operation[1]));
|
||||
return {OpAtomicIAdd(t_uint, pointer, scope, semantics, value), Type::Uint};
|
||||
const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device));
|
||||
const Id semantics = Constant(type_def, 0);
|
||||
|
||||
return {(this->*func)(type_def, pointer, scope, semantics, value), result_type};
|
||||
}
|
||||
|
||||
Expression Branch(Operation operation) {
|
||||
|
@ -2545,7 +2550,21 @@ private:
|
|||
&SPIRVDecompiler::AtomicImageXor,
|
||||
&SPIRVDecompiler::AtomicImageExchange,
|
||||
|
||||
&SPIRVDecompiler::AtomicAdd,
|
||||
&SPIRVDecompiler::Atomic<&Module::OpAtomicExchange, Type::Uint>,
|
||||
&SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd, Type::Uint>,
|
||||
&SPIRVDecompiler::Atomic<&Module::OpAtomicUMin, Type::Uint>,
|
||||
&SPIRVDecompiler::Atomic<&Module::OpAtomicUMax, Type::Uint>,
|
||||
&SPIRVDecompiler::Atomic<&Module::OpAtomicAnd, Type::Uint>,
|
||||
&SPIRVDecompiler::Atomic<&Module::OpAtomicOr, Type::Uint>,
|
||||
&SPIRVDecompiler::Atomic<&Module::OpAtomicXor, Type::Uint>,
|
||||
|
||||
&SPIRVDecompiler::Atomic<&Module::OpAtomicExchange, Type::Int>,
|
||||
&SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd, Type::Int>,
|
||||
&SPIRVDecompiler::Atomic<&Module::OpAtomicSMin, Type::Int>,
|
||||
&SPIRVDecompiler::Atomic<&Module::OpAtomicSMax, Type::Int>,
|
||||
&SPIRVDecompiler::Atomic<&Module::OpAtomicAnd, Type::Int>,
|
||||
&SPIRVDecompiler::Atomic<&Module::OpAtomicOr, Type::Int>,
|
||||
&SPIRVDecompiler::Atomic<&Module::OpAtomicXor, Type::Int>,
|
||||
|
||||
&SPIRVDecompiler::Branch,
|
||||
&SPIRVDecompiler::BranchIndirect,
|
||||
|
|
|
@ -19,7 +19,6 @@ namespace VideoCommon::Shader {
|
|||
using Tegra::Shader::AtomicOp;
|
||||
using Tegra::Shader::AtomicType;
|
||||
using Tegra::Shader::Attribute;
|
||||
using Tegra::Shader::GlobalAtomicOp;
|
||||
using Tegra::Shader::GlobalAtomicType;
|
||||
using Tegra::Shader::Instruction;
|
||||
using Tegra::Shader::OpCode;
|
||||
|
@ -28,6 +27,28 @@ using Tegra::Shader::StoreType;
|
|||
|
||||
namespace {
|
||||
|
||||
Node GetAtomOperation(AtomicOp op, bool is_signed, Node memory, Node data) {
|
||||
const OperationCode operation_code = [op] {
|
||||
switch (op) {
|
||||
case AtomicOp::Add:
|
||||
return OperationCode::AtomicIAdd;
|
||||
case AtomicOp::Min:
|
||||
return OperationCode::AtomicIMin;
|
||||
case AtomicOp::Max:
|
||||
return OperationCode::AtomicIMax;
|
||||
case AtomicOp::And:
|
||||
return OperationCode::AtomicIAnd;
|
||||
case AtomicOp::Or:
|
||||
return OperationCode::AtomicIOr;
|
||||
case AtomicOp::Xor:
|
||||
return OperationCode::AtomicIXor;
|
||||
case AtomicOp::Exch:
|
||||
return OperationCode::AtomicIExchange;
|
||||
}
|
||||
}();
|
||||
return SignedOperation(operation_code, is_signed, std::move(memory), std::move(data));
|
||||
}
|
||||
|
||||
bool IsUnaligned(Tegra::Shader::UniformType uniform_type) {
|
||||
return uniform_type == Tegra::Shader::UniformType::UnsignedByte ||
|
||||
uniform_type == Tegra::Shader::UniformType::UnsignedShort;
|
||||
|
@ -363,10 +384,13 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
|
|||
break;
|
||||
}
|
||||
case OpCode::Id::ATOM: {
|
||||
UNIMPLEMENTED_IF_MSG(instr.atom.operation != GlobalAtomicOp::Add, "operation={}",
|
||||
static_cast<int>(instr.atom.operation.Value()));
|
||||
UNIMPLEMENTED_IF_MSG(instr.atom.type != GlobalAtomicType::S32, "type={}",
|
||||
static_cast<int>(instr.atom.type.Value()));
|
||||
UNIMPLEMENTED_IF_MSG(instr.atom.operation == AtomicOp::Inc ||
|
||||
instr.atom.operation == AtomicOp::Dec ||
|
||||
instr.atom.operation == AtomicOp::SafeAdd,
|
||||
"operation={}", static_cast<int>(instr.atom.operation.Value()));
|
||||
UNIMPLEMENTED_IF_MSG(instr.atom.type == GlobalAtomicType::S64 ||
|
||||
instr.atom.type == GlobalAtomicType::U64,
|
||||
"type={}", static_cast<int>(instr.atom.type.Value()));
|
||||
|
||||
const auto [real_address, base_address, descriptor] =
|
||||
TrackGlobalMemory(bb, instr, true, true);
|
||||
|
@ -375,25 +399,29 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
|
|||
break;
|
||||
}
|
||||
|
||||
const bool is_signed =
|
||||
instr.atoms.type == AtomicType::S32 || instr.atoms.type == AtomicType::S64;
|
||||
Node gmem = MakeNode<GmemNode>(real_address, base_address, descriptor);
|
||||
Node value = Operation(OperationCode::AtomicAdd, std::move(gmem), GetRegister(instr.gpr20));
|
||||
Node value = GetAtomOperation(static_cast<AtomicOp>(instr.atom.operation), is_signed, gmem,
|
||||
GetRegister(instr.gpr20));
|
||||
SetRegister(bb, instr.gpr0, std::move(value));
|
||||
break;
|
||||
}
|
||||
case OpCode::Id::ATOMS: {
|
||||
UNIMPLEMENTED_IF_MSG(instr.atoms.operation != AtomicOp::Add, "operation={}",
|
||||
static_cast<int>(instr.atoms.operation.Value()));
|
||||
UNIMPLEMENTED_IF_MSG(instr.atoms.type != AtomicType::U32, "type={}",
|
||||
static_cast<int>(instr.atoms.type.Value()));
|
||||
|
||||
UNIMPLEMENTED_IF_MSG(instr.atoms.operation == AtomicOp::Inc ||
|
||||
instr.atoms.operation == AtomicOp::Dec,
|
||||
"operation={}", static_cast<int>(instr.atoms.operation.Value()));
|
||||
UNIMPLEMENTED_IF_MSG(instr.atoms.type == AtomicType::S64 ||
|
||||
instr.atoms.type == AtomicType::U64,
|
||||
"type={}", static_cast<int>(instr.atoms.type.Value()));
|
||||
const bool is_signed =
|
||||
instr.atoms.type == AtomicType::S32 || instr.atoms.type == AtomicType::S64;
|
||||
const s32 offset = instr.atoms.GetImmediateOffset();
|
||||
Node address = GetRegister(instr.gpr8);
|
||||
address = Operation(OperationCode::IAdd, std::move(address), Immediate(offset));
|
||||
|
||||
Node memory = GetSharedMemory(std::move(address));
|
||||
Node data = GetRegister(instr.gpr20);
|
||||
|
||||
Node value = Operation(OperationCode::AtomicAdd, std::move(memory), std::move(data));
|
||||
Node value =
|
||||
GetAtomOperation(static_cast<AtomicOp>(instr.atoms.operation), is_signed,
|
||||
GetSharedMemory(std::move(address)), GetRegister(instr.gpr20));
|
||||
SetRegister(bb, instr.gpr0, std::move(value));
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -162,7 +162,21 @@ enum class OperationCode {
|
|||
AtomicImageXor, /// (MetaImage, int[N] coords) -> void
|
||||
AtomicImageExchange, /// (MetaImage, int[N] coords) -> void
|
||||
|
||||
AtomicAdd, /// (memory, {u}int) -> {u}int
|
||||
AtomicUExchange, /// (memory, uint) -> uint
|
||||
AtomicUAdd, /// (memory, uint) -> uint
|
||||
AtomicUMin, /// (memory, uint) -> uint
|
||||
AtomicUMax, /// (memory, uint) -> uint
|
||||
AtomicUAnd, /// (memory, uint) -> uint
|
||||
AtomicUOr, /// (memory, uint) -> uint
|
||||
AtomicUXor, /// (memory, uint) -> uint
|
||||
|
||||
AtomicIExchange, /// (memory, int) -> int
|
||||
AtomicIAdd, /// (memory, int) -> int
|
||||
AtomicIMin, /// (memory, int) -> int
|
||||
AtomicIMax, /// (memory, int) -> int
|
||||
AtomicIAnd, /// (memory, int) -> int
|
||||
AtomicIOr, /// (memory, int) -> int
|
||||
AtomicIXor, /// (memory, int) -> int
|
||||
|
||||
Branch, /// (uint branch_target) -> void
|
||||
BranchIndirect, /// (uint branch_target) -> void
|
||||
|
|
|
@ -86,6 +86,20 @@ OperationCode SignedToUnsignedCode(OperationCode operation_code, bool is_signed)
|
|||
return OperationCode::LogicalUNotEqual;
|
||||
case OperationCode::LogicalIGreaterEqual:
|
||||
return OperationCode::LogicalUGreaterEqual;
|
||||
case OperationCode::AtomicIExchange:
|
||||
return OperationCode::AtomicUExchange;
|
||||
case OperationCode::AtomicIAdd:
|
||||
return OperationCode::AtomicUAdd;
|
||||
case OperationCode::AtomicIMin:
|
||||
return OperationCode::AtomicUMin;
|
||||
case OperationCode::AtomicIMax:
|
||||
return OperationCode::AtomicUMax;
|
||||
case OperationCode::AtomicIAnd:
|
||||
return OperationCode::AtomicUAnd;
|
||||
case OperationCode::AtomicIOr:
|
||||
return OperationCode::AtomicUOr;
|
||||
case OperationCode::AtomicIXor:
|
||||
return OperationCode::AtomicUXor;
|
||||
case OperationCode::INegate:
|
||||
UNREACHABLE_MSG("Can't negate an unsigned integer");
|
||||
return {};
|
||||
|
|
Loading…
Reference in a new issue