mirror of
https://git.suyu.dev/suyu/suyu
synced 2024-10-31 20:37:52 +00:00
shader: FMUL, select, RRO, and MUFU fixes
This commit is contained in:
parent
18a766b362
commit
e44752ddc8
18 changed files with 507 additions and 119 deletions
|
@ -65,6 +65,7 @@ add_library(shader_recompiler STATIC
|
|||
frontend/maxwell/translate/impl/floating_point_fused_multiply_add.cpp
|
||||
frontend/maxwell/translate/impl/floating_point_multi_function.cpp
|
||||
frontend/maxwell/translate/impl/floating_point_multiply.cpp
|
||||
frontend/maxwell/translate/impl/floating_point_range_reduction.cpp
|
||||
frontend/maxwell/translate/impl/impl.cpp
|
||||
frontend/maxwell/translate/impl/impl.h
|
||||
frontend/maxwell/translate/impl/integer_add.cpp
|
||||
|
|
|
@ -108,10 +108,12 @@ void EmitCompositeConstructF64x4(EmitContext& ctx);
|
|||
void EmitCompositeExtractF64x2(EmitContext& ctx);
|
||||
void EmitCompositeExtractF64x3(EmitContext& ctx);
|
||||
void EmitCompositeExtractF64x4(EmitContext& ctx);
|
||||
void EmitSelect8(EmitContext& ctx);
|
||||
void EmitSelect16(EmitContext& ctx);
|
||||
Id EmitSelect32(EmitContext& ctx, Id cond, Id true_value, Id false_value);
|
||||
void EmitSelect64(EmitContext& ctx);
|
||||
Id EmitSelectU8(EmitContext& ctx, Id cond, Id true_value, Id false_value);
|
||||
Id EmitSelectU16(EmitContext& ctx, Id cond, Id true_value, Id false_value);
|
||||
Id EmitSelectU32(EmitContext& ctx, Id cond, Id true_value, Id false_value);
|
||||
Id EmitSelectU64(EmitContext& ctx, Id cond, Id true_value, Id false_value);
|
||||
Id EmitSelectF16(EmitContext& ctx, Id cond, Id true_value, Id false_value);
|
||||
Id EmitSelectF32(EmitContext& ctx, Id cond, Id true_value, Id false_value);
|
||||
void EmitBitCastU16F16(EmitContext& ctx);
|
||||
Id EmitBitCastU32F32(EmitContext& ctx, Id value);
|
||||
void EmitBitCastU64F64(EmitContext& ctx);
|
||||
|
@ -149,18 +151,15 @@ Id EmitFPMul64(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
|
|||
Id EmitFPNeg16(EmitContext& ctx, Id value);
|
||||
Id EmitFPNeg32(EmitContext& ctx, Id value);
|
||||
Id EmitFPNeg64(EmitContext& ctx, Id value);
|
||||
void EmitFPRecip32(EmitContext& ctx);
|
||||
void EmitFPRecip64(EmitContext& ctx);
|
||||
void EmitFPRecipSqrt32(EmitContext& ctx);
|
||||
void EmitFPRecipSqrt64(EmitContext& ctx);
|
||||
void EmitFPSqrt(EmitContext& ctx);
|
||||
void EmitFPSin(EmitContext& ctx);
|
||||
void EmitFPSinNotReduced(EmitContext& ctx);
|
||||
void EmitFPExp2(EmitContext& ctx);
|
||||
void EmitFPExp2NotReduced(EmitContext& ctx);
|
||||
void EmitFPCos(EmitContext& ctx);
|
||||
void EmitFPCosNotReduced(EmitContext& ctx);
|
||||
void EmitFPLog2(EmitContext& ctx);
|
||||
Id EmitFPSin(EmitContext& ctx, Id value);
|
||||
Id EmitFPCos(EmitContext& ctx, Id value);
|
||||
Id EmitFPExp2(EmitContext& ctx, Id value);
|
||||
Id EmitFPLog2(EmitContext& ctx, Id value);
|
||||
Id EmitFPRecip32(EmitContext& ctx, Id value);
|
||||
Id EmitFPRecip64(EmitContext& ctx, Id value);
|
||||
Id EmitFPRecipSqrt32(EmitContext& ctx, Id value);
|
||||
Id EmitFPRecipSqrt64(EmitContext& ctx, Id value);
|
||||
Id EmitFPSqrt(EmitContext& ctx, Id value);
|
||||
Id EmitFPSaturate16(EmitContext& ctx, Id value);
|
||||
Id EmitFPSaturate32(EmitContext& ctx, Id value);
|
||||
Id EmitFPSaturate64(EmitContext& ctx, Id value);
|
||||
|
@ -176,6 +175,42 @@ Id EmitFPCeil64(EmitContext& ctx, Id value);
|
|||
Id EmitFPTrunc16(EmitContext& ctx, Id value);
|
||||
Id EmitFPTrunc32(EmitContext& ctx, Id value);
|
||||
Id EmitFPTrunc64(EmitContext& ctx, Id value);
|
||||
Id EmitFPOrdEqual16(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPOrdEqual32(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPOrdEqual64(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPUnordEqual16(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPUnordEqual32(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPUnordEqual64(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPOrdNotEqual16(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPOrdNotEqual32(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPOrdNotEqual64(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPUnordNotEqual16(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPUnordNotEqual32(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPUnordNotEqual64(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPOrdLessThan16(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPOrdLessThan32(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPOrdLessThan64(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPUnordLessThan16(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPUnordLessThan32(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPUnordLessThan64(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPOrdGreaterThan16(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPOrdGreaterThan32(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPOrdGreaterThan64(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPUnordGreaterThan16(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPUnordGreaterThan32(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPUnordGreaterThan64(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPOrdLessThanEqual16(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPOrdLessThanEqual32(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPOrdLessThanEqual64(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPUnordLessThanEqual16(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPUnordLessThanEqual32(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPUnordLessThanEqual64(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPOrdGreaterThanEqual16(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPOrdGreaterThanEqual32(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPOrdGreaterThanEqual64(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPUnordGreaterThanEqual16(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPUnordGreaterThanEqual32(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitFPUnordGreaterThanEqual64(EmitContext& ctx, Id lhs, Id rhs);
|
||||
Id EmitIAdd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b);
|
||||
void EmitIAdd64(EmitContext& ctx);
|
||||
Id EmitISub32(EmitContext& ctx, Id a, Id b);
|
||||
|
|
|
@ -100,52 +100,40 @@ Id EmitFPNeg64(EmitContext& ctx, Id value) {
|
|||
return ctx.OpFNegate(ctx.F64[1], value);
|
||||
}
|
||||
|
||||
void EmitFPRecip32(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
Id EmitFPSin(EmitContext& ctx, Id value) {
|
||||
return ctx.OpSin(ctx.F32[1], value);
|
||||
}
|
||||
|
||||
void EmitFPRecip64(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
Id EmitFPCos(EmitContext& ctx, Id value) {
|
||||
return ctx.OpCos(ctx.F32[1], value);
|
||||
}
|
||||
|
||||
void EmitFPRecipSqrt32(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
Id EmitFPExp2(EmitContext& ctx, Id value) {
|
||||
return ctx.OpExp2(ctx.F32[1], value);
|
||||
}
|
||||
|
||||
void EmitFPRecipSqrt64(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
Id EmitFPLog2(EmitContext& ctx, Id value) {
|
||||
return ctx.OpLog2(ctx.F32[1], value);
|
||||
}
|
||||
|
||||
void EmitFPSqrt(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
Id EmitFPRecip32(EmitContext& ctx, Id value) {
|
||||
return ctx.OpFDiv(ctx.F32[1], ctx.Constant(ctx.F32[1], 1.0f), value);
|
||||
}
|
||||
|
||||
void EmitFPSin(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
Id EmitFPRecip64(EmitContext& ctx, Id value) {
|
||||
return ctx.OpFDiv(ctx.F64[1], ctx.Constant(ctx.F64[1], 1.0f), value);
|
||||
}
|
||||
|
||||
void EmitFPSinNotReduced(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
Id EmitFPRecipSqrt32(EmitContext& ctx, Id value) {
|
||||
return ctx.OpInverseSqrt(ctx.F32[1], value);
|
||||
}
|
||||
|
||||
void EmitFPExp2(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
Id EmitFPRecipSqrt64(EmitContext& ctx, Id value) {
|
||||
return ctx.OpInverseSqrt(ctx.F64[1], value);
|
||||
}
|
||||
|
||||
void EmitFPExp2NotReduced(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
}
|
||||
|
||||
void EmitFPCos(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
}
|
||||
|
||||
void EmitFPCosNotReduced(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
}
|
||||
|
||||
void EmitFPLog2(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
Id EmitFPSqrt(EmitContext& ctx, Id value) {
|
||||
return ctx.OpSqrt(ctx.F32[1], value);
|
||||
}
|
||||
|
||||
Id EmitFPSaturate16(EmitContext& ctx, Id value) {
|
||||
|
@ -214,4 +202,148 @@ Id EmitFPTrunc64(EmitContext& ctx, Id value) {
|
|||
return ctx.OpTrunc(ctx.F64[1], value);
|
||||
}
|
||||
|
||||
Id EmitFPOrdEqual16(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFOrdEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPOrdEqual32(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFOrdEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPOrdEqual64(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFOrdEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPUnordEqual16(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFUnordEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPUnordEqual32(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFUnordEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPUnordEqual64(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFUnordEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPOrdNotEqual16(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFOrdNotEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPOrdNotEqual32(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFOrdNotEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPOrdNotEqual64(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFOrdNotEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPUnordNotEqual16(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFUnordNotEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPUnordNotEqual32(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFUnordNotEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPUnordNotEqual64(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFUnordNotEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPOrdLessThan16(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFOrdLessThan(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPOrdLessThan32(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFOrdLessThan(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPOrdLessThan64(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFOrdLessThan(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPUnordLessThan16(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFUnordLessThan(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPUnordLessThan32(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFUnordLessThan(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPUnordLessThan64(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFUnordLessThan(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPOrdGreaterThan16(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFOrdGreaterThan(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPOrdGreaterThan32(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFOrdGreaterThan(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPOrdGreaterThan64(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFOrdGreaterThan(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPUnordGreaterThan16(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFUnordGreaterThan(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPUnordGreaterThan32(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFUnordGreaterThan(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPUnordGreaterThan64(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFUnordGreaterThan(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPOrdLessThanEqual16(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFOrdLessThanEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPOrdLessThanEqual32(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFOrdLessThanEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPOrdLessThanEqual64(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFOrdLessThanEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPUnordLessThanEqual16(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFUnordLessThanEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPUnordLessThanEqual32(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFUnordLessThanEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPUnordLessThanEqual64(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFUnordLessThanEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPOrdGreaterThanEqual16(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFOrdGreaterThanEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPOrdGreaterThanEqual32(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFOrdGreaterThanEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPOrdGreaterThanEqual64(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFOrdGreaterThanEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPUnordGreaterThanEqual16(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFUnordGreaterThanEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPUnordGreaterThanEqual32(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFUnordGreaterThanEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
Id EmitFPUnordGreaterThanEqual64(EmitContext& ctx, Id lhs, Id rhs) {
|
||||
return ctx.OpFUnordGreaterThanEqual(ctx.U1, lhs, rhs);
|
||||
}
|
||||
|
||||
} // namespace Shader::Backend::SPIRV
|
||||
|
|
|
@ -6,20 +6,29 @@
|
|||
|
||||
namespace Shader::Backend::SPIRV {
|
||||
|
||||
void EmitSelect8(EmitContext&) {
|
||||
Id EmitSelectU8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Id cond,
|
||||
[[maybe_unused]] Id true_value, [[maybe_unused]] Id false_value) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
}
|
||||
|
||||
void EmitSelect16(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
Id EmitSelectU16(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
|
||||
return ctx.OpSelect(ctx.U16, cond, true_value, false_value);
|
||||
}
|
||||
|
||||
Id EmitSelect32(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
|
||||
Id EmitSelectU32(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
|
||||
return ctx.OpSelect(ctx.U32[1], cond, true_value, false_value);
|
||||
}
|
||||
|
||||
void EmitSelect64(EmitContext&) {
|
||||
throw NotImplementedException("SPIR-V Instruction");
|
||||
Id EmitSelectU64(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
|
||||
return ctx.OpSelect(ctx.U64, cond, true_value, false_value);
|
||||
}
|
||||
|
||||
Id EmitSelectF16(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
|
||||
return ctx.OpSelect(ctx.F16[1], cond, true_value, false_value);
|
||||
}
|
||||
|
||||
Id EmitSelectF32(EmitContext& ctx, Id cond, Id true_value, Id false_value) {
|
||||
return ctx.OpSelect(ctx.F32[1], cond, true_value, false_value);
|
||||
}
|
||||
|
||||
} // namespace Shader::Backend::SPIRV
|
||||
|
|
|
@ -361,19 +361,21 @@ Value IREmitter::CompositeExtract(const Value& vector, size_t element) {
|
|||
}
|
||||
}
|
||||
|
||||
UAny IREmitter::Select(const U1& condition, const UAny& true_value, const UAny& false_value) {
|
||||
Value IREmitter::Select(const U1& condition, const Value& true_value, const Value& false_value) {
|
||||
if (true_value.Type() != false_value.Type()) {
|
||||
throw InvalidArgument("Mismatching types {} and {}", true_value.Type(), false_value.Type());
|
||||
}
|
||||
switch (true_value.Type()) {
|
||||
case Type::U8:
|
||||
return Inst<UAny>(Opcode::Select8, condition, true_value, false_value);
|
||||
return Inst(Opcode::SelectU8, condition, true_value, false_value);
|
||||
case Type::U16:
|
||||
return Inst<UAny>(Opcode::Select16, condition, true_value, false_value);
|
||||
return Inst(Opcode::SelectU16, condition, true_value, false_value);
|
||||
case Type::U32:
|
||||
return Inst<UAny>(Opcode::Select32, condition, true_value, false_value);
|
||||
return Inst(Opcode::SelectU32, condition, true_value, false_value);
|
||||
case Type::U64:
|
||||
return Inst<UAny>(Opcode::Select64, condition, true_value, false_value);
|
||||
return Inst(Opcode::SelectU64, condition, true_value, false_value);
|
||||
case Type::F32:
|
||||
return Inst(Opcode::SelectF32, condition, true_value, false_value);
|
||||
default:
|
||||
throw InvalidArgument("Invalid type {}", true_value.Type());
|
||||
}
|
||||
|
@ -503,12 +505,16 @@ F16F32F64 IREmitter::FPAbsNeg(const F16F32F64& value, bool abs, bool neg) {
|
|||
return result;
|
||||
}
|
||||
|
||||
F32 IREmitter::FPCosNotReduced(const F32& value) {
|
||||
return Inst<F32>(Opcode::FPCosNotReduced, value);
|
||||
F32 IREmitter::FPCos(const F32& value) {
|
||||
return Inst<F32>(Opcode::FPCos, value);
|
||||
}
|
||||
|
||||
F32 IREmitter::FPExp2NotReduced(const F32& value) {
|
||||
return Inst<F32>(Opcode::FPExp2NotReduced, value);
|
||||
F32 IREmitter::FPSin(const F32& value) {
|
||||
return Inst<F32>(Opcode::FPSin, value);
|
||||
}
|
||||
|
||||
F32 IREmitter::FPExp2(const F32& value) {
|
||||
return Inst<F32>(Opcode::FPExp2, value);
|
||||
}
|
||||
|
||||
F32 IREmitter::FPLog2(const F32& value) {
|
||||
|
@ -517,9 +523,9 @@ F32 IREmitter::FPLog2(const F32& value) {
|
|||
|
||||
F32F64 IREmitter::FPRecip(const F32F64& value) {
|
||||
switch (value.Type()) {
|
||||
case Type::U32:
|
||||
case Type::F32:
|
||||
return Inst<F32>(Opcode::FPRecip32, value);
|
||||
case Type::U64:
|
||||
case Type::F64:
|
||||
return Inst<F64>(Opcode::FPRecip64, value);
|
||||
default:
|
||||
ThrowInvalidType(value.Type());
|
||||
|
@ -528,19 +534,15 @@ F32F64 IREmitter::FPRecip(const F32F64& value) {
|
|||
|
||||
F32F64 IREmitter::FPRecipSqrt(const F32F64& value) {
|
||||
switch (value.Type()) {
|
||||
case Type::U32:
|
||||
case Type::F32:
|
||||
return Inst<F32>(Opcode::FPRecipSqrt32, value);
|
||||
case Type::U64:
|
||||
case Type::F64:
|
||||
return Inst<F64>(Opcode::FPRecipSqrt64, value);
|
||||
default:
|
||||
ThrowInvalidType(value.Type());
|
||||
}
|
||||
}
|
||||
|
||||
F32 IREmitter::FPSinNotReduced(const F32& value) {
|
||||
return Inst<F32>(Opcode::FPSinNotReduced, value);
|
||||
}
|
||||
|
||||
F32 IREmitter::FPSqrt(const F32& value) {
|
||||
return Inst<F32>(Opcode::FPSqrt, value);
|
||||
}
|
||||
|
@ -610,6 +612,114 @@ F16F32F64 IREmitter::FPTrunc(const F16F32F64& value, FpControl control) {
|
|||
}
|
||||
}
|
||||
|
||||
U1 IREmitter::FPEqual(const F16F32F64& lhs, const F16F32F64& rhs, bool ordered) {
|
||||
if (lhs.Type() != rhs.Type()) {
|
||||
throw InvalidArgument("Mismatching types {} and {}", lhs.Type(), rhs.Type());
|
||||
}
|
||||
switch (lhs.Type()) {
|
||||
case Type::F16:
|
||||
return Inst<U1>(ordered ? Opcode::FPOrdEqual16 : Opcode::FPUnordEqual16, lhs, rhs);
|
||||
case Type::F32:
|
||||
return Inst<U1>(ordered ? Opcode::FPOrdEqual32 : Opcode::FPUnordEqual32, lhs, rhs);
|
||||
case Type::F64:
|
||||
return Inst<U1>(ordered ? Opcode::FPOrdEqual64 : Opcode::FPUnordEqual64, lhs, rhs);
|
||||
default:
|
||||
ThrowInvalidType(lhs.Type());
|
||||
}
|
||||
}
|
||||
|
||||
U1 IREmitter::FPNotEqual(const F16F32F64& lhs, const F16F32F64& rhs, bool ordered) {
|
||||
if (lhs.Type() != rhs.Type()) {
|
||||
throw InvalidArgument("Mismatching types {} and {}", lhs.Type(), rhs.Type());
|
||||
}
|
||||
switch (lhs.Type()) {
|
||||
case Type::F16:
|
||||
return Inst<U1>(ordered ? Opcode::FPOrdNotEqual16 : Opcode::FPUnordNotEqual16, lhs, rhs);
|
||||
case Type::F32:
|
||||
return Inst<U1>(ordered ? Opcode::FPOrdNotEqual32 : Opcode::FPUnordNotEqual32, lhs, rhs);
|
||||
case Type::F64:
|
||||
return Inst<U1>(ordered ? Opcode::FPOrdNotEqual64 : Opcode::FPUnordNotEqual64, lhs, rhs);
|
||||
default:
|
||||
ThrowInvalidType(lhs.Type());
|
||||
}
|
||||
}
|
||||
|
||||
U1 IREmitter::FPLessThan(const F16F32F64& lhs, const F16F32F64& rhs, bool ordered) {
|
||||
if (lhs.Type() != rhs.Type()) {
|
||||
throw InvalidArgument("Mismatching types {} and {}", lhs.Type(), rhs.Type());
|
||||
}
|
||||
switch (lhs.Type()) {
|
||||
case Type::F16:
|
||||
return Inst<U1>(ordered ? Opcode::FPOrdLessThan16 : Opcode::FPUnordLessThan16, lhs, rhs);
|
||||
case Type::F32:
|
||||
return Inst<U1>(ordered ? Opcode::FPOrdLessThan32 : Opcode::FPUnordLessThan32, lhs, rhs);
|
||||
case Type::F64:
|
||||
return Inst<U1>(ordered ? Opcode::FPOrdLessThan64 : Opcode::FPUnordLessThan64, lhs, rhs);
|
||||
default:
|
||||
ThrowInvalidType(lhs.Type());
|
||||
}
|
||||
}
|
||||
|
||||
U1 IREmitter::FPGreaterThan(const F16F32F64& lhs, const F16F32F64& rhs, bool ordered) {
|
||||
if (lhs.Type() != rhs.Type()) {
|
||||
throw InvalidArgument("Mismatching types {} and {}", lhs.Type(), rhs.Type());
|
||||
}
|
||||
switch (lhs.Type()) {
|
||||
case Type::F16:
|
||||
return Inst<U1>(ordered ? Opcode::FPOrdGreaterThan16 : Opcode::FPUnordGreaterThan16, lhs,
|
||||
rhs);
|
||||
case Type::F32:
|
||||
return Inst<U1>(ordered ? Opcode::FPOrdGreaterThan32 : Opcode::FPUnordGreaterThan32, lhs,
|
||||
rhs);
|
||||
case Type::F64:
|
||||
return Inst<U1>(ordered ? Opcode::FPOrdGreaterThan64 : Opcode::FPUnordGreaterThan64, lhs,
|
||||
rhs);
|
||||
default:
|
||||
ThrowInvalidType(lhs.Type());
|
||||
}
|
||||
}
|
||||
|
||||
U1 IREmitter::FPLessThanEqual(const F16F32F64& lhs, const F16F32F64& rhs, bool ordered) {
|
||||
if (lhs.Type() != rhs.Type()) {
|
||||
throw InvalidArgument("Mismatching types {} and {}", lhs.Type(), rhs.Type());
|
||||
}
|
||||
switch (lhs.Type()) {
|
||||
case Type::F16:
|
||||
return Inst<U1>(ordered ? Opcode::FPOrdLessThanEqual16 : Opcode::FPUnordLessThanEqual16,
|
||||
lhs, rhs);
|
||||
case Type::F32:
|
||||
return Inst<U1>(ordered ? Opcode::FPOrdLessThanEqual32 : Opcode::FPUnordLessThanEqual32,
|
||||
lhs, rhs);
|
||||
case Type::F64:
|
||||
return Inst<U1>(ordered ? Opcode::FPOrdLessThanEqual64 : Opcode::FPUnordLessThanEqual64,
|
||||
lhs, rhs);
|
||||
default:
|
||||
ThrowInvalidType(lhs.Type());
|
||||
}
|
||||
}
|
||||
|
||||
U1 IREmitter::FPGreaterThanEqual(const F16F32F64& lhs, const F16F32F64& rhs, bool ordered) {
|
||||
if (lhs.Type() != rhs.Type()) {
|
||||
throw InvalidArgument("Mismatching types {} and {}", lhs.Type(), rhs.Type());
|
||||
}
|
||||
switch (lhs.Type()) {
|
||||
case Type::F16:
|
||||
return Inst<U1>(ordered ? Opcode::FPOrdGreaterThanEqual16
|
||||
: Opcode::FPUnordGreaterThanEqual16,
|
||||
lhs, rhs);
|
||||
case Type::F32:
|
||||
return Inst<U1>(ordered ? Opcode::FPOrdGreaterThanEqual32
|
||||
: Opcode::FPUnordGreaterThanEqual32,
|
||||
lhs, rhs);
|
||||
case Type::F64:
|
||||
return Inst<U1>(ordered ? Opcode::FPOrdGreaterThanEqual64
|
||||
: Opcode::FPUnordGreaterThanEqual64,
|
||||
lhs, rhs);
|
||||
default:
|
||||
ThrowInvalidType(lhs.Type());
|
||||
}
|
||||
}
|
||||
|
||||
U32U64 IREmitter::IAdd(const U32U64& a, const U32U64& b) {
|
||||
if (a.Type() != b.Type()) {
|
||||
throw InvalidArgument("Mismatching types {} and {}", a.Type(), b.Type());
|
||||
|
|
|
@ -98,7 +98,8 @@ public:
|
|||
const Value& e4);
|
||||
[[nodiscard]] Value CompositeExtract(const Value& vector, size_t element);
|
||||
|
||||
[[nodiscard]] UAny Select(const U1& condition, const UAny& true_value, const UAny& false_value);
|
||||
[[nodiscard]] Value Select(const U1& condition, const Value& true_value,
|
||||
const Value& false_value);
|
||||
|
||||
template <typename Dest, typename Source>
|
||||
[[nodiscard]] Dest BitCast(const Source& value);
|
||||
|
@ -121,12 +122,12 @@ public:
|
|||
[[nodiscard]] F16F32F64 FPNeg(const F16F32F64& value);
|
||||
[[nodiscard]] F16F32F64 FPAbsNeg(const F16F32F64& value, bool abs, bool neg);
|
||||
|
||||
[[nodiscard]] F32 FPCosNotReduced(const F32& value);
|
||||
[[nodiscard]] F32 FPExp2NotReduced(const F32& value);
|
||||
[[nodiscard]] F32 FPCos(const F32& value);
|
||||
[[nodiscard]] F32 FPSin(const F32& value);
|
||||
[[nodiscard]] F32 FPExp2(const F32& value);
|
||||
[[nodiscard]] F32 FPLog2(const F32& value);
|
||||
[[nodiscard]] F32F64 FPRecip(const F32F64& value);
|
||||
[[nodiscard]] F32F64 FPRecipSqrt(const F32F64& value);
|
||||
[[nodiscard]] F32 FPSinNotReduced(const F32& value);
|
||||
[[nodiscard]] F32 FPSqrt(const F32& value);
|
||||
[[nodiscard]] F16F32F64 FPSaturate(const F16F32F64& value);
|
||||
[[nodiscard]] F16F32F64 FPRoundEven(const F16F32F64& value, FpControl control = {});
|
||||
|
@ -134,6 +135,15 @@ public:
|
|||
[[nodiscard]] F16F32F64 FPCeil(const F16F32F64& value, FpControl control = {});
|
||||
[[nodiscard]] F16F32F64 FPTrunc(const F16F32F64& value, FpControl control = {});
|
||||
|
||||
[[nodiscard]] U1 FPEqual(const F16F32F64& lhs, const F16F32F64& rhs, bool ordered = true);
|
||||
[[nodiscard]] U1 FPNotEqual(const F16F32F64& lhs, const F16F32F64& rhs, bool ordered = true);
|
||||
[[nodiscard]] U1 FPLessThan(const F16F32F64& lhs, const F16F32F64& rhs, bool ordered = true);
|
||||
[[nodiscard]] U1 FPGreaterThan(const F16F32F64& lhs, const F16F32F64& rhs, bool ordered = true);
|
||||
[[nodiscard]] U1 FPLessThanEqual(const F16F32F64& lhs, const F16F32F64& rhs,
|
||||
bool ordered = true);
|
||||
[[nodiscard]] U1 FPGreaterThanEqual(const F16F32F64& lhs, const F16F32F64& rhs,
|
||||
bool ordered = true);
|
||||
|
||||
[[nodiscard]] U32U64 IAdd(const U32U64& a, const U32U64& b);
|
||||
[[nodiscard]] U32U64 ISub(const U32U64& a, const U32U64& b);
|
||||
[[nodiscard]] U32 IMul(const U32& a, const U32& b);
|
||||
|
|
|
@ -103,10 +103,12 @@ OPCODE(CompositeExtractF64x3, F64, F64x
|
|||
OPCODE(CompositeExtractF64x4, F64, F64x4, U32, )
|
||||
|
||||
// Select operations
|
||||
OPCODE(Select8, U8, U1, U8, U8, )
|
||||
OPCODE(Select16, U16, U1, U16, U16, )
|
||||
OPCODE(Select32, U32, U1, U32, U32, )
|
||||
OPCODE(Select64, U64, U1, U64, U64, )
|
||||
OPCODE(SelectU8, U8, U1, U8, U8, )
|
||||
OPCODE(SelectU16, U16, U1, U16, U16, )
|
||||
OPCODE(SelectU32, U32, U1, U32, U32, )
|
||||
OPCODE(SelectU64, U64, U1, U64, U64, )
|
||||
OPCODE(SelectF16, F16, U1, F16, F16, )
|
||||
OPCODE(SelectF32, F32, U1, F32, F32, )
|
||||
|
||||
// Bitwise conversions
|
||||
OPCODE(BitCastU16F16, U16, F16, )
|
||||
|
@ -156,11 +158,8 @@ OPCODE(FPRecipSqrt32, F32, F32,
|
|||
OPCODE(FPRecipSqrt64, F64, F64, )
|
||||
OPCODE(FPSqrt, F32, F32, )
|
||||
OPCODE(FPSin, F32, F32, )
|
||||
OPCODE(FPSinNotReduced, F32, F32, )
|
||||
OPCODE(FPExp2, F32, F32, )
|
||||
OPCODE(FPExp2NotReduced, F32, F32, )
|
||||
OPCODE(FPCos, F32, F32, )
|
||||
OPCODE(FPCosNotReduced, F32, F32, )
|
||||
OPCODE(FPLog2, F32, F32, )
|
||||
OPCODE(FPSaturate16, F16, F16, )
|
||||
OPCODE(FPSaturate32, F32, F32, )
|
||||
|
@ -178,6 +177,43 @@ OPCODE(FPTrunc16, F16, F16,
|
|||
OPCODE(FPTrunc32, F32, F32, )
|
||||
OPCODE(FPTrunc64, F64, F64, )
|
||||
|
||||
OPCODE(FPOrdEqual16, U1, F16, F16, )
|
||||
OPCODE(FPOrdEqual32, U1, F32, F32, )
|
||||
OPCODE(FPOrdEqual64, U1, F64, F64, )
|
||||
OPCODE(FPUnordEqual16, U1, F16, F16, )
|
||||
OPCODE(FPUnordEqual32, U1, F32, F32, )
|
||||
OPCODE(FPUnordEqual64, U1, F64, F64, )
|
||||
OPCODE(FPOrdNotEqual16, U1, F16, F16, )
|
||||
OPCODE(FPOrdNotEqual32, U1, F32, F32, )
|
||||
OPCODE(FPOrdNotEqual64, U1, F64, F64, )
|
||||
OPCODE(FPUnordNotEqual16, U1, F16, F16, )
|
||||
OPCODE(FPUnordNotEqual32, U1, F32, F32, )
|
||||
OPCODE(FPUnordNotEqual64, U1, F64, F64, )
|
||||
OPCODE(FPOrdLessThan16, U1, F16, F16, )
|
||||
OPCODE(FPOrdLessThan32, U1, F32, F32, )
|
||||
OPCODE(FPOrdLessThan64, U1, F64, F64, )
|
||||
OPCODE(FPUnordLessThan16, U1, F16, F16, )
|
||||
OPCODE(FPUnordLessThan32, U1, F32, F32, )
|
||||
OPCODE(FPUnordLessThan64, U1, F64, F64, )
|
||||
OPCODE(FPOrdGreaterThan16, U1, F16, F16, )
|
||||
OPCODE(FPOrdGreaterThan32, U1, F32, F32, )
|
||||
OPCODE(FPOrdGreaterThan64, U1, F64, F64, )
|
||||
OPCODE(FPUnordGreaterThan16, U1, F16, F16, )
|
||||
OPCODE(FPUnordGreaterThan32, U1, F32, F32, )
|
||||
OPCODE(FPUnordGreaterThan64, U1, F64, F64, )
|
||||
OPCODE(FPOrdLessThanEqual16, U1, F16, F16, )
|
||||
OPCODE(FPOrdLessThanEqual32, U1, F32, F32, )
|
||||
OPCODE(FPOrdLessThanEqual64, U1, F64, F64, )
|
||||
OPCODE(FPUnordLessThanEqual16, U1, F16, F16, )
|
||||
OPCODE(FPUnordLessThanEqual32, U1, F32, F32, )
|
||||
OPCODE(FPUnordLessThanEqual64, U1, F64, F64, )
|
||||
OPCODE(FPOrdGreaterThanEqual16, U1, F16, F16, )
|
||||
OPCODE(FPOrdGreaterThanEqual32, U1, F32, F32, )
|
||||
OPCODE(FPOrdGreaterThanEqual64, U1, F64, F64, )
|
||||
OPCODE(FPUnordGreaterThanEqual16, U1, F16, F16, )
|
||||
OPCODE(FPUnordGreaterThanEqual32, U1, F32, F32, )
|
||||
OPCODE(FPUnordGreaterThanEqual64, U1, F64, F64, )
|
||||
|
||||
// Integer operations
|
||||
OPCODE(IAdd32, U32, U32, U32, )
|
||||
OPCODE(IAdd64, U64, U64, U64, )
|
||||
|
|
|
@ -46,7 +46,8 @@ inline IR::FmzMode CastFmzMode(FmzMode fmz_mode) {
|
|||
case FmzMode::FTZ:
|
||||
return IR::FmzMode::FTZ;
|
||||
case FmzMode::FMZ:
|
||||
return IR::FmzMode::FMZ;
|
||||
// FMZ is manually handled in the instruction
|
||||
return IR::FmzMode::FTZ;
|
||||
case FmzMode::INVALIDFMZ3:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ void FADD(TranslatorVisitor& v, u64 insn, const IR::F32& src_b) {
|
|||
} // Anonymous namespace
|
||||
|
||||
void TranslatorVisitor::FADD_reg(u64 insn) {
|
||||
FADD(*this, insn, GetRegFloat20(insn));
|
||||
FADD(*this, insn, GetFloatReg20(insn));
|
||||
}
|
||||
|
||||
void TranslatorVisitor::FADD_cbuf(u64 insn) {
|
||||
|
|
|
@ -51,7 +51,7 @@ void FFMA(TranslatorVisitor& v, u64 insn, const IR::F32& src_b, const IR::F32& s
|
|||
} // Anonymous namespace
|
||||
|
||||
void TranslatorVisitor::FFMA_reg(u64 insn) {
|
||||
FFMA(*this, insn, GetRegFloat20(insn), GetRegFloat39(insn));
|
||||
FFMA(*this, insn, GetFloatReg20(insn), GetFloatReg39(insn));
|
||||
}
|
||||
|
||||
void TranslatorVisitor::FFMA_rc(u64) {
|
||||
|
@ -59,7 +59,7 @@ void TranslatorVisitor::FFMA_rc(u64) {
|
|||
}
|
||||
|
||||
void TranslatorVisitor::FFMA_cr(u64 insn) {
|
||||
FFMA(*this, insn, GetFloatCbuf(insn), GetRegFloat39(insn));
|
||||
FFMA(*this, insn, GetFloatCbuf(insn), GetFloatReg39(insn));
|
||||
}
|
||||
|
||||
void TranslatorVisitor::FFMA_imm(u64) {
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
|
||||
namespace Shader::Maxwell {
|
||||
namespace {
|
||||
enum class Operation {
|
||||
enum class Operation : u64 {
|
||||
Cos = 0,
|
||||
Sin = 1,
|
||||
Ex2 = 2, // Base 2 exponent
|
||||
|
@ -39,11 +39,11 @@ void TranslatorVisitor::MUFU(u64 insn) {
|
|||
IR::F32 value{[&]() -> IR::F32 {
|
||||
switch (mufu.operation) {
|
||||
case Operation::Cos:
|
||||
return ir.FPCosNotReduced(op_a);
|
||||
return ir.FPCos(op_a);
|
||||
case Operation::Sin:
|
||||
return ir.FPSinNotReduced(op_a);
|
||||
return ir.FPSin(op_a);
|
||||
case Operation::Ex2:
|
||||
return ir.FPExp2NotReduced(op_a);
|
||||
return ir.FPExp2(op_a);
|
||||
case Operation::Lg2:
|
||||
return ir.FPLog2(op_a);
|
||||
case Operation::Rcp:
|
||||
|
|
|
@ -55,9 +55,6 @@ void FMUL(TranslatorVisitor& v, u64 insn, const IR::F32& src_b, FmzMode fmz_mode
|
|||
if (cc) {
|
||||
throw NotImplementedException("FMUL CC");
|
||||
}
|
||||
if (sat) {
|
||||
throw NotImplementedException("FMUL SAT");
|
||||
}
|
||||
IR::F32 op_a{v.F(fmul.src_a)};
|
||||
if (scale != Scale::None) {
|
||||
if (fmz_mode != FmzMode::FTZ || fp_rounding != FpRounding::RN) {
|
||||
|
@ -71,7 +68,20 @@ void FMUL(TranslatorVisitor& v, u64 insn, const IR::F32& src_b, FmzMode fmz_mode
|
|||
.rounding{CastFpRounding(fp_rounding)},
|
||||
.fmz_mode{CastFmzMode(fmz_mode)},
|
||||
};
|
||||
v.F(fmul.dest_reg, v.ir.FPMul(op_a, op_b, fp_control));
|
||||
IR::F32 value{v.ir.FPMul(op_a, op_b, fp_control)};
|
||||
if (fmz_mode == FmzMode::FMZ && !sat) {
|
||||
// Do not implement FMZ if SAT is enabled, as it does the logic for us.
|
||||
// On D3D9 mode, anything * 0 is zero, even NAN and infinity
|
||||
const IR::F32 zero{v.ir.Imm32(0.0f)};
|
||||
const IR::U1 zero_a{v.ir.FPEqual(op_a, zero)};
|
||||
const IR::U1 zero_b{v.ir.FPEqual(op_b, zero)};
|
||||
const IR::U1 any_zero{v.ir.LogicalOr(zero_a, zero_b)};
|
||||
value = IR::F32{v.ir.Select(any_zero, zero, value)};
|
||||
}
|
||||
if (sat) {
|
||||
value = v.ir.FPSaturate(value);
|
||||
}
|
||||
v.F(fmul.dest_reg, value);
|
||||
}
|
||||
|
||||
void FMUL(TranslatorVisitor& v, u64 insn, const IR::F32& src_b) {
|
||||
|
@ -83,27 +93,33 @@ void FMUL(TranslatorVisitor& v, u64 insn, const IR::F32& src_b) {
|
|||
BitField<47, 1, u64> cc;
|
||||
BitField<48, 1, u64> neg_b;
|
||||
BitField<50, 1, u64> sat;
|
||||
} fmul{insn};
|
||||
|
||||
} const fmul{insn};
|
||||
FMUL(v, insn, src_b, fmul.fmz, fmul.fp_rounding, fmul.scale, fmul.sat != 0, fmul.cc != 0,
|
||||
fmul.neg_b != 0);
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
void TranslatorVisitor::FMUL_reg(u64 insn) {
|
||||
return FMUL(*this, insn, GetRegFloat20(insn));
|
||||
return FMUL(*this, insn, GetFloatReg20(insn));
|
||||
}
|
||||
|
||||
void TranslatorVisitor::FMUL_cbuf(u64) {
|
||||
throw NotImplementedException("FMUL (cbuf)");
|
||||
void TranslatorVisitor::FMUL_cbuf(u64 insn) {
|
||||
return FMUL(*this, insn, GetFloatCbuf(insn));
|
||||
}
|
||||
|
||||
void TranslatorVisitor::FMUL_imm(u64) {
|
||||
throw NotImplementedException("FMUL (imm)");
|
||||
void TranslatorVisitor::FMUL_imm(u64 insn) {
|
||||
return FMUL(*this, insn, GetFloatImm20(insn));
|
||||
}
|
||||
|
||||
void TranslatorVisitor::FMUL32I(u64) {
|
||||
throw NotImplementedException("FMUL32I");
|
||||
void TranslatorVisitor::FMUL32I(u64 insn) {
|
||||
union {
|
||||
u64 raw;
|
||||
BitField<52, 1, u64> cc;
|
||||
BitField<53, 2, FmzMode> fmz;
|
||||
BitField<55, 1, u64> sat;
|
||||
} const fmul32i{insn};
|
||||
FMUL(*this, insn, GetFloatImm32(insn), fmul32i.fmz, FpRounding::RN, Scale::None,
|
||||
fmul32i.sat != 0, fmul32i.cc != 0, false);
|
||||
}
|
||||
|
||||
} // namespace Shader::Maxwell
|
|
@ -0,0 +1,41 @@
|
|||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
#include "shader_recompiler/frontend/maxwell/translate/impl/impl.h"
|
||||
|
||||
namespace Shader::Maxwell {
|
||||
namespace {
|
||||
enum class Mode : u64 {
|
||||
SINCOS,
|
||||
EX2,
|
||||
};
|
||||
|
||||
void RRO(TranslatorVisitor& v, u64 insn, const IR::F32& src) {
|
||||
union {
|
||||
u64 raw;
|
||||
BitField<0, 8, IR::Reg> dest_reg;
|
||||
BitField<39, 1, Mode> mode;
|
||||
BitField<45, 1, u64> neg;
|
||||
BitField<49, 1, u64> abs;
|
||||
} const rro{insn};
|
||||
|
||||
v.F(rro.dest_reg, v.ir.FPAbsNeg(src, rro.abs != 0, rro.neg != 0));
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
void TranslatorVisitor::RRO_reg(u64 insn) {
|
||||
RRO(*this, insn, GetFloatReg20(insn));
|
||||
}
|
||||
|
||||
void TranslatorVisitor::RRO_cbuf(u64 insn) {
|
||||
RRO(*this, insn, GetFloatCbuf(insn));
|
||||
}
|
||||
|
||||
void TranslatorVisitor::RRO_imm(u64) {
|
||||
throw NotImplementedException("RRO (imm)");
|
||||
}
|
||||
|
||||
} // namespace Shader::Maxwell
|
|
@ -48,11 +48,11 @@ IR::U32 TranslatorVisitor::GetReg39(u64 insn) {
|
|||
return X(reg.index);
|
||||
}
|
||||
|
||||
IR::F32 TranslatorVisitor::GetRegFloat20(u64 insn) {
|
||||
IR::F32 TranslatorVisitor::GetFloatReg20(u64 insn) {
|
||||
return ir.BitCast<IR::F32>(GetReg20(insn));
|
||||
}
|
||||
|
||||
IR::F32 TranslatorVisitor::GetRegFloat39(u64 insn) {
|
||||
IR::F32 TranslatorVisitor::GetFloatReg39(u64 insn) {
|
||||
return ir.BitCast<IR::F32>(GetReg39(insn));
|
||||
}
|
||||
|
||||
|
@ -110,6 +110,14 @@ IR::U32 TranslatorVisitor::GetImm32(u64 insn) {
|
|||
return ir.Imm32(static_cast<u32>(imm.value));
|
||||
}
|
||||
|
||||
IR::F32 TranslatorVisitor::GetFloatImm32(u64 insn) {
|
||||
union {
|
||||
u64 raw;
|
||||
BitField<20, 32, u64> value;
|
||||
} const imm{insn};
|
||||
return ir.Imm32(Common::BitCast<f32>(static_cast<u32>(imm.value)));
|
||||
}
|
||||
|
||||
void TranslatorVisitor::SetZFlag(const IR::U1& value) {
|
||||
ir.SetZFlag(value);
|
||||
}
|
||||
|
|
|
@ -304,8 +304,8 @@ public:
|
|||
[[nodiscard]] IR::U32 GetReg8(u64 insn);
|
||||
[[nodiscard]] IR::U32 GetReg20(u64 insn);
|
||||
[[nodiscard]] IR::U32 GetReg39(u64 insn);
|
||||
[[nodiscard]] IR::F32 GetRegFloat20(u64 insn);
|
||||
[[nodiscard]] IR::F32 GetRegFloat39(u64 insn);
|
||||
[[nodiscard]] IR::F32 GetFloatReg20(u64 insn);
|
||||
[[nodiscard]] IR::F32 GetFloatReg39(u64 insn);
|
||||
|
||||
[[nodiscard]] IR::U32 GetCbuf(u64 insn);
|
||||
[[nodiscard]] IR::F32 GetFloatCbuf(u64 insn);
|
||||
|
@ -314,6 +314,7 @@ public:
|
|||
[[nodiscard]] IR::F32 GetFloatImm20(u64 insn);
|
||||
|
||||
[[nodiscard]] IR::U32 GetImm32(u64 insn);
|
||||
[[nodiscard]] IR::F32 GetFloatImm32(u64 insn);
|
||||
|
||||
void SetZFlag(const IR::U1& value);
|
||||
void SetSFlag(const IR::U1& value);
|
||||
|
|
|
@ -50,7 +50,7 @@ void SHL(TranslatorVisitor& v, u64 insn, const IR::U32& unsafe_shift) {
|
|||
//
|
||||
const IR::U1 is_safe{v.ir.ILessThan(unsafe_shift, v.ir.Imm32(32), false)};
|
||||
const IR::U32 unsafe_result{v.ir.ShiftLeftLogical(base, unsafe_shift)};
|
||||
result = v.ir.Select(is_safe, unsafe_result, v.ir.Imm32(0));
|
||||
result = IR::U32{v.ir.Select(is_safe, unsafe_result, v.ir.Imm32(0))};
|
||||
}
|
||||
v.X(shl.dest_reg, result);
|
||||
}
|
||||
|
|
|
@ -721,18 +721,6 @@ void TranslatorVisitor::RET(u64) {
|
|||
ThrowNotImplemented(Opcode::RET);
|
||||
}
|
||||
|
||||
void TranslatorVisitor::RRO_reg(u64) {
|
||||
ThrowNotImplemented(Opcode::RRO_reg);
|
||||
}
|
||||
|
||||
void TranslatorVisitor::RRO_cbuf(u64) {
|
||||
ThrowNotImplemented(Opcode::RRO_cbuf);
|
||||
}
|
||||
|
||||
void TranslatorVisitor::RRO_imm(u64) {
|
||||
ThrowNotImplemented(Opcode::RRO_imm);
|
||||
}
|
||||
|
||||
void TranslatorVisitor::RTT(u64) {
|
||||
ThrowNotImplemented(Opcode::RTT);
|
||||
}
|
||||
|
|
|
@ -330,7 +330,7 @@ void ConstantPropagation(IR::Block& block, IR::Inst& inst) {
|
|||
return FoldBitCast<u32, f32>(inst, IR::Opcode::BitCastF32U32);
|
||||
case IR::Opcode::IAdd64:
|
||||
return FoldAdd<u64>(block, inst);
|
||||
case IR::Opcode::Select32:
|
||||
case IR::Opcode::SelectU32:
|
||||
return FoldSelect<u32>(inst);
|
||||
case IR::Opcode::LogicalAnd:
|
||||
return FoldLogicalAnd(inst);
|
||||
|
|
Loading…
Reference in a new issue