diff --git a/README.md b/README.md
index db898e56..5cf307e3 100644
--- a/README.md
+++ b/README.md
@@ -159,6 +159,20 @@ Open a PR and we'll check it :)
+
+# Special Thanks
+
+A few noteworthy teams/projects who've helped us along the way are:
+
+- [**Panda3DS**](https://github.com/wheremyfoodat/Panda3DS): A multiplatform 3DS emulator from our co-author wheremyfoodat. They have been incredibly helpful in understanding and solving problems that came up from natively executing the x64 code of PS4 binaries
+
+- [**fpPS4**](https://github.com/red-prig/fpPS4): The fpPS4 team has assisted massively with understanding some of the more complex parts of the PS4 operating system and libraries, by helping with reverse engineering work and research.
+
+- **yuzu**: Our shader compiler has been designed with yuzu's Hades compiler as a blueprint. This allowed us to focus on the challenges of emulating a modern AMD GPU while having a high-quality optimizing shader compiler implementation as a base.
+
+- [**hydra**](https://github.com/hydra-emu/hydra): A multisystem, multiplatform emulator (chip-8, GB, NES, N64) from Paris.
+
+
# Sister Projects
- [**Panda3DS**](https://github.com/wheremyfoodat/Panda3DS): A multiplatform 3DS emulator from our co-author wheremyfoodat.
diff --git a/src/emulator.cpp b/src/emulator.cpp
index 9d1bb00d..a469a31c 100644
--- a/src/emulator.cpp
+++ b/src/emulator.cpp
@@ -195,7 +195,7 @@ void Emulator::Run(const std::filesystem::path& file) {
}
void Emulator::LoadSystemModules(const std::filesystem::path& file) {
- constexpr std::array ModulesToLoad{
+ constexpr std::array ModulesToLoad{
{{"libSceNgs2.sprx", &Libraries::Ngs2::RegisterlibSceNgs2},
{"libSceFiber.sprx", nullptr},
{"libSceUlt.sprx", nullptr},
@@ -204,7 +204,8 @@ void Emulator::LoadSystemModules(const std::filesystem::path& file) {
{"libSceLibcInternal.sprx", &Libraries::LibcInternal::RegisterlibSceLibcInternal},
{"libSceDiscMap.sprx", &Libraries::DiscMap::RegisterlibSceDiscMap},
{"libSceRtc.sprx", &Libraries::Rtc::RegisterlibSceRtc},
- {"libSceJpegEnc.sprx", nullptr}},
+ {"libSceJpegEnc.sprx", nullptr},
+ {"libSceFont.sprx", nullptr}},
};
std::vector found_modules;
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
index 1d553dc5..a58b2778 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
@@ -152,4 +152,20 @@ Id EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id co
return ImageAtomicU32(ctx, inst, handle, coords, value, &Sirit::Module::OpAtomicExchange);
}
+Id EmitDataAppend(EmitContext& ctx, u32 gds_addr, u32 binding) {
+ auto& buffer = ctx.buffers[binding];
+ const Id ptr = ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value,
+ ctx.ConstU32(gds_addr));
+ const auto [scope, semantics]{AtomicArgs(ctx)};
+ return ctx.OpAtomicIIncrement(ctx.U32[1], ptr, scope, semantics);
+}
+
+Id EmitDataConsume(EmitContext& ctx, u32 gds_addr, u32 binding) {
+ auto& buffer = ctx.buffers[binding];
+ const Id ptr = ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value,
+ ctx.ConstU32(gds_addr));
+ const auto [scope, semantics]{AtomicArgs(ctx)};
+ return ctx.OpAtomicIDecrement(ctx.U32[1], ptr, scope, semantics);
+}
+
} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
index 39a214fa..64ce532b 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
@@ -133,10 +133,6 @@ Id EmitReadConstBuffer(EmitContext& ctx, u32 handle, Id index) {
return ctx.OpLoad(buffer.data_types->Get(1), ptr);
}
-Id EmitReadConstBufferU32(EmitContext& ctx, u32 handle, Id index) {
- return ctx.OpBitcast(ctx.U32[1], EmitReadConstBuffer(ctx, handle, index));
-}
-
Id EmitReadStepRate(EmitContext& ctx, int rate_idx) {
return ctx.OpLoad(
ctx.U32[1], ctx.OpAccessChain(ctx.TypePointer(spv::StorageClass::PushConstant, ctx.U32[1]),
@@ -222,12 +218,8 @@ void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value, u32 elemen
ctx.OpStore(pointer, ctx.OpBitcast(ctx.F32[1], value));
}
-Id EmitLoadBufferU32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
- return EmitLoadBufferF32(ctx, inst, handle, address);
-}
-
template
-static Id EmitLoadBufferF32xN(EmitContext& ctx, u32 handle, Id address) {
+static Id EmitLoadBufferU32xN(EmitContext& ctx, u32 handle, Id address) {
auto& buffer = ctx.buffers[handle];
address = ctx.OpIAdd(ctx.U32[1], address, buffer.offset);
const Id index = ctx.OpShiftRightLogical(ctx.U32[1], address, ctx.ConstU32(2u));
@@ -246,20 +238,20 @@ static Id EmitLoadBufferF32xN(EmitContext& ctx, u32 handle, Id address) {
}
}
-Id EmitLoadBufferF32(EmitContext& ctx, IR::Inst*, u32 handle, Id address) {
- return EmitLoadBufferF32xN<1>(ctx, handle, address);
+Id EmitLoadBufferU32(EmitContext& ctx, IR::Inst*, u32 handle, Id address) {
+ return EmitLoadBufferU32xN<1>(ctx, handle, address);
}
-Id EmitLoadBufferF32x2(EmitContext& ctx, IR::Inst*, u32 handle, Id address) {
- return EmitLoadBufferF32xN<2>(ctx, handle, address);
+Id EmitLoadBufferU32x2(EmitContext& ctx, IR::Inst*, u32 handle, Id address) {
+ return EmitLoadBufferU32xN<2>(ctx, handle, address);
}
-Id EmitLoadBufferF32x3(EmitContext& ctx, IR::Inst*, u32 handle, Id address) {
- return EmitLoadBufferF32xN<3>(ctx, handle, address);
+Id EmitLoadBufferU32x3(EmitContext& ctx, IR::Inst*, u32 handle, Id address) {
+ return EmitLoadBufferU32xN<3>(ctx, handle, address);
}
-Id EmitLoadBufferF32x4(EmitContext& ctx, IR::Inst*, u32 handle, Id address) {
- return EmitLoadBufferF32xN<4>(ctx, handle, address);
+Id EmitLoadBufferU32x4(EmitContext& ctx, IR::Inst*, u32 handle, Id address) {
+ return EmitLoadBufferU32xN<4>(ctx, handle, address);
}
Id EmitLoadBufferFormatF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address) {
@@ -275,7 +267,7 @@ Id EmitLoadBufferFormatF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id addr
}
template
-static void EmitStoreBufferF32xN(EmitContext& ctx, u32 handle, Id address, Id value) {
+static void EmitStoreBufferU32xN(EmitContext& ctx, u32 handle, Id address, Id value) {
auto& buffer = ctx.buffers[handle];
address = ctx.OpIAdd(ctx.U32[1], address, buffer.offset);
const Id index = ctx.OpShiftRightLogical(ctx.U32[1], address, ctx.ConstU32(2u));
@@ -287,29 +279,25 @@ static void EmitStoreBufferF32xN(EmitContext& ctx, u32 handle, Id address, Id va
const Id index_i = ctx.OpIAdd(ctx.U32[1], index, ctx.ConstU32(i));
const Id ptr =
ctx.OpAccessChain(buffer.pointer_type, buffer.id, ctx.u32_zero_value, index_i);
- ctx.OpStore(ptr, ctx.OpCompositeExtract(ctx.F32[1], value, i));
+ ctx.OpStore(ptr, ctx.OpCompositeExtract(buffer.data_types->Get(1), value, i));
}
}
}
-void EmitStoreBufferF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
- EmitStoreBufferF32xN<1>(ctx, handle, address, value);
-}
-
-void EmitStoreBufferF32x2(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
- EmitStoreBufferF32xN<2>(ctx, handle, address, value);
-}
-
-void EmitStoreBufferF32x3(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
- EmitStoreBufferF32xN<3>(ctx, handle, address, value);
-}
-
-void EmitStoreBufferF32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
- EmitStoreBufferF32xN<4>(ctx, handle, address, value);
-}
-
void EmitStoreBufferU32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
- EmitStoreBufferF32xN<1>(ctx, handle, address, value);
+ EmitStoreBufferU32xN<1>(ctx, handle, address, value);
+}
+
+void EmitStoreBufferU32x2(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
+ EmitStoreBufferU32xN<2>(ctx, handle, address, value);
+}
+
+void EmitStoreBufferU32x3(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
+ EmitStoreBufferU32xN<3>(ctx, handle, address, value);
+}
+
+void EmitStoreBufferU32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
+ EmitStoreBufferU32xN<4>(ctx, handle, address, value);
}
void EmitStoreBufferFormatF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value) {
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
index 0cd59175..e506ced3 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
@@ -64,25 +64,16 @@ void EmitGetGotoVariable(EmitContext& ctx);
void EmitSetScc(EmitContext& ctx);
Id EmitReadConst(EmitContext& ctx);
Id EmitReadConstBuffer(EmitContext& ctx, u32 handle, Id index);
-Id EmitReadConstBufferU32(EmitContext& ctx, u32 handle, Id index);
-Id EmitLoadBufferF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
-Id EmitLoadBufferF32x2(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
-Id EmitLoadBufferF32x3(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
-Id EmitLoadBufferF32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
-Id EmitLoadBufferFormatF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
-Id EmitLoadBufferFormatF32x2(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
-Id EmitLoadBufferFormatF32x3(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
-Id EmitLoadBufferFormatF32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
Id EmitLoadBufferU32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
-void EmitStoreBufferF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
-void EmitStoreBufferF32x2(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
-void EmitStoreBufferF32x3(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
-void EmitStoreBufferF32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
-void EmitStoreBufferFormatF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
-void EmitStoreBufferFormatF32x2(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
-void EmitStoreBufferFormatF32x3(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
-void EmitStoreBufferFormatF32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
+Id EmitLoadBufferU32x2(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
+Id EmitLoadBufferU32x3(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
+Id EmitLoadBufferU32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
+Id EmitLoadBufferFormatF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address);
void EmitStoreBufferU32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
+void EmitStoreBufferU32x2(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
+void EmitStoreBufferU32x3(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
+void EmitStoreBufferU32x4(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
+void EmitStoreBufferFormatF32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
Id EmitBufferAtomicIAdd32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
Id EmitBufferAtomicSMin32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
Id EmitBufferAtomicUMin32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id address, Id value);
@@ -406,12 +397,13 @@ Id EmitImageAtomicAnd32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id coords,
Id EmitImageAtomicOr32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id coords, Id value);
Id EmitImageAtomicXor32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id coords, Id value);
Id EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst* inst, u32 handle, Id coords, Id value);
-
Id EmitLaneId(EmitContext& ctx);
Id EmitWarpId(EmitContext& ctx);
Id EmitQuadShuffle(EmitContext& ctx, Id value, Id index);
Id EmitReadFirstLane(EmitContext& ctx, Id value);
Id EmitReadLane(EmitContext& ctx, Id value, u32 lane);
Id EmitWriteLane(EmitContext& ctx, Id value, Id write_value, u32 lane);
+Id EmitDataAppend(EmitContext& ctx, u32 gds_addr, u32 binding);
+Id EmitDataConsume(EmitContext& ctx, u32 gds_addr, u32 binding);
} // namespace Shader::Backend::SPIRV
diff --git a/src/shader_recompiler/frontend/translate/data_share.cpp b/src/shader_recompiler/frontend/translate/data_share.cpp
index c0f0fa27..d01c1977 100644
--- a/src/shader_recompiler/frontend/translate/data_share.cpp
+++ b/src/shader_recompiler/frontend/translate/data_share.cpp
@@ -43,6 +43,10 @@ void Translator::EmitDataShare(const GcnInst& inst) {
return DS_MIN_U32(inst, false, true);
case Opcode::DS_MAX_RTN_U32:
return DS_MAX_U32(inst, false, true);
+ case Opcode::DS_APPEND:
+ return DS_APPEND(inst);
+ case Opcode::DS_CONSUME:
+ return DS_CONSUME(inst);
default:
LogMissingOpcode(inst);
}
@@ -192,4 +196,18 @@ void Translator::V_WRITELANE_B32(const GcnInst& inst) {
ir.SetVectorReg(dst, ir.WriteLane(old_value, value, lane));
}
+void Translator::DS_APPEND(const GcnInst& inst) {
+ const u32 inst_offset = inst.control.ds.offset0;
+ const IR::U32 gds_offset = ir.IAdd(ir.GetM0(), ir.Imm32(inst_offset));
+ const IR::U32 prev = ir.DataAppend(gds_offset);
+ SetDst(inst.dst[0], prev);
+}
+
+void Translator::DS_CONSUME(const GcnInst& inst) {
+ const u32 inst_offset = inst.control.ds.offset0;
+ const IR::U32 gds_offset = ir.IAdd(ir.GetM0(), ir.Imm32(inst_offset));
+ const IR::U32 prev = ir.DataConsume(gds_offset);
+ SetDst(inst.dst[0], prev);
+}
+
} // namespace Shader::Gcn
diff --git a/src/shader_recompiler/frontend/translate/export.cpp b/src/shader_recompiler/frontend/translate/export.cpp
index d4db09a6..18e830f7 100644
--- a/src/shader_recompiler/frontend/translate/export.cpp
+++ b/src/shader_recompiler/frontend/translate/export.cpp
@@ -31,6 +31,12 @@ void Translator::EmitExport(const GcnInst& inst) {
case MrtSwizzle::Alt:
static constexpr std::array AltSwizzle = {2, 1, 0, 3};
return AltSwizzle[comp];
+ case MrtSwizzle::Reverse:
+ static constexpr std::array RevSwizzle = {3, 2, 1, 0};
+ return RevSwizzle[comp];
+ case MrtSwizzle::ReverseAlt:
+ static constexpr std::array AltRevSwizzle = {3, 0, 1, 2};
+ return AltRevSwizzle[comp];
default:
UNREACHABLE();
}
diff --git a/src/shader_recompiler/frontend/translate/scalar_alu.cpp b/src/shader_recompiler/frontend/translate/scalar_alu.cpp
index af258cd1..adc127f1 100644
--- a/src/shader_recompiler/frontend/translate/scalar_alu.cpp
+++ b/src/shader_recompiler/frontend/translate/scalar_alu.cpp
@@ -73,9 +73,13 @@ void Translator::EmitScalarAlu(const GcnInst& inst) {
case Opcode::S_SUB_I32:
return S_SUB_U32(inst);
case Opcode::S_MIN_U32:
- return S_MIN_U32(inst);
+ return S_MIN_U32(false, inst);
+ case Opcode::S_MIN_I32:
+ return S_MIN_U32(true, inst);
case Opcode::S_MAX_U32:
- return S_MAX_U32(inst);
+ return S_MAX_U32(false, inst);
+ case Opcode::S_MAX_I32:
+ return S_MAX_U32(true, inst);
case Opcode::S_WQM_B64:
break;
default:
@@ -533,18 +537,18 @@ void Translator::S_ADDC_U32(const GcnInst& inst) {
SetDst(inst.dst[0], ir.IAdd(ir.IAdd(src0, src1), carry));
}
-void Translator::S_MAX_U32(const GcnInst& inst) {
+void Translator::S_MAX_U32(bool is_signed, const GcnInst& inst) {
const IR::U32 src0{GetSrc(inst.src[0])};
const IR::U32 src1{GetSrc(inst.src[1])};
- const IR::U32 result = ir.UMax(src0, src1);
+ const IR::U32 result = ir.IMax(src0, src1, is_signed);
SetDst(inst.dst[0], result);
ir.SetScc(ir.IEqual(result, src0));
}
-void Translator::S_MIN_U32(const GcnInst& inst) {
+void Translator::S_MIN_U32(bool is_signed, const GcnInst& inst) {
const IR::U32 src0{GetSrc(inst.src[0])};
const IR::U32 src1{GetSrc(inst.src[1])};
- const IR::U32 result = ir.UMin(src0, src1);
+ const IR::U32 result = ir.IMin(src0, src1, is_signed);
SetDst(inst.dst[0], result);
ir.SetScc(ir.IEqual(result, src0));
}
diff --git a/src/shader_recompiler/frontend/translate/translate.h b/src/shader_recompiler/frontend/translate/translate.h
index d6887818..e4be298e 100644
--- a/src/shader_recompiler/frontend/translate/translate.h
+++ b/src/shader_recompiler/frontend/translate/translate.h
@@ -101,8 +101,8 @@ public:
void S_ADDC_U32(const GcnInst& inst);
void S_MULK_I32(const GcnInst& inst);
void S_ADDK_I32(const GcnInst& inst);
- void S_MAX_U32(const GcnInst& inst);
- void S_MIN_U32(const GcnInst& inst);
+ void S_MAX_U32(bool is_signed, const GcnInst& inst);
+ void S_MIN_U32(bool is_signed, const GcnInst& inst);
void S_CMPK(ConditionOp cond, bool is_signed, const GcnInst& inst);
// Scalar Memory
@@ -173,7 +173,7 @@ public:
void V_BCNT_U32_B32(const GcnInst& inst);
void V_COS_F32(const GcnInst& inst);
void V_MAX3_F32(const GcnInst& inst);
- void V_MAX3_U32(const GcnInst& inst);
+ void V_MAX3_U32(bool is_signed, const GcnInst& inst);
void V_CVT_I32_F32(const GcnInst& inst);
void V_MIN_I32(const GcnInst& inst);
void V_MUL_LO_U32(const GcnInst& inst);
@@ -217,6 +217,8 @@ public:
void V_READFIRSTLANE_B32(const GcnInst& inst);
void V_READLANE_B32(const GcnInst& inst);
void V_WRITELANE_B32(const GcnInst& inst);
+ void DS_APPEND(const GcnInst& inst);
+ void DS_CONSUME(const GcnInst& inst);
void S_BARRIER();
// MIMG
diff --git a/src/shader_recompiler/frontend/translate/vector_alu.cpp b/src/shader_recompiler/frontend/translate/vector_alu.cpp
index 2024b706..b4470ee3 100644
--- a/src/shader_recompiler/frontend/translate/vector_alu.cpp
+++ b/src/shader_recompiler/frontend/translate/vector_alu.cpp
@@ -227,7 +227,9 @@ void Translator::EmitVectorAlu(const GcnInst& inst) {
case Opcode::V_MAX3_F32:
return V_MAX3_F32(inst);
case Opcode::V_MAX3_U32:
- return V_MAX3_U32(inst);
+ return V_MAX3_U32(false, inst);
+ case Opcode::V_MAX3_I32:
+ return V_MAX_U32(true, inst);
case Opcode::V_TRUNC_F32:
return V_TRUNC_F32(inst);
case Opcode::V_CEIL_F32:
@@ -831,11 +833,11 @@ void Translator::V_MAX3_F32(const GcnInst& inst) {
SetDst(inst.dst[0], ir.FPMax(src0, ir.FPMax(src1, src2)));
}
-void Translator::V_MAX3_U32(const GcnInst& inst) {
+void Translator::V_MAX3_U32(bool is_signed, const GcnInst& inst) {
const IR::U32 src0{GetSrc(inst.src[0])};
const IR::U32 src1{GetSrc(inst.src[1])};
const IR::U32 src2{GetSrc(inst.src[2])};
- SetDst(inst.dst[0], ir.UMax(src0, ir.UMax(src1, src2)));
+ SetDst(inst.dst[0], ir.IMax(src0, ir.IMax(src1, src2, is_signed), is_signed));
}
void Translator::V_CVT_I32_F32(const GcnInst& inst) {
@@ -967,14 +969,29 @@ void Translator::V_FFBL_B32(const GcnInst& inst) {
}
void Translator::V_MBCNT_U32_B32(bool is_low, const GcnInst& inst) {
- const IR::U32 src0{GetSrc(inst.src[0])};
- const IR::U32 src1{GetSrc(inst.src[1])};
if (!is_low) {
- ASSERT(src0.IsImmediate() && src0.U32() == ~0U && src1.IsImmediate() && src1.U32() == 0U);
- return;
+ // v_mbcnt_hi_u32_b32 v2, -1, 0
+ if (inst.src[0].field == OperandField::SignedConstIntNeg && inst.src[0].code == 193 &&
+ inst.src[1].field == OperandField::ConstZero) {
+ return;
+ }
+ // v_mbcnt_hi_u32_b32 vX, exec_hi, 0
+ if (inst.src[0].field == OperandField::ExecHi &&
+ inst.src[1].field == OperandField::ConstZero) {
+ return;
+ }
+ } else {
+ // v_mbcnt_lo_u32_b32 v2, -1, vX
+ // used combined with above to fetch lane id in non-compute stages
+ if (inst.src[0].field == OperandField::SignedConstIntNeg && inst.src[0].code == 193) {
+ SetDst(inst.dst[0], ir.LaneId());
+ }
+ // v_mbcnt_lo_u32_b32 v20, exec_lo, vX
+ // used combined in above for append buffer indexing.
+ if (inst.src[0].field == OperandField::ExecLo) {
+ SetDst(inst.dst[0], ir.Imm32(0));
+ }
}
- ASSERT(src0.IsImmediate() && src0.U32() == ~0U);
- SetDst(inst.dst[0], ir.LaneId());
}
void Translator::V_BFM_B32(const GcnInst& inst) {
diff --git a/src/shader_recompiler/frontend/translate/vector_memory.cpp b/src/shader_recompiler/frontend/translate/vector_memory.cpp
index 04b9b50d..5af28336 100644
--- a/src/shader_recompiler/frontend/translate/vector_memory.cpp
+++ b/src/shader_recompiler/frontend/translate/vector_memory.cpp
@@ -147,10 +147,6 @@ void Translator::IMAGE_GET_RESINFO(const GcnInst& inst) {
void Translator::IMAGE_SAMPLE(const GcnInst& inst) {
const auto& mimg = inst.control.mimg;
- if (mimg.da) {
- LOG_WARNING(Render_Vulkan, "Image instruction declares an array");
- }
-
IR::VectorReg addr_reg{inst.src[0].code};
IR::VectorReg dest_reg{inst.dst[0].code};
const IR::ScalarReg tsharp_reg{inst.src[2].code * 4};
@@ -388,11 +384,11 @@ void Translator::BUFFER_LOAD(u32 num_dwords, bool is_typed, const GcnInst& inst)
const IR::Value value = ir.LoadBuffer(num_dwords, handle, address, info);
const IR::VectorReg dst_reg{inst.src[1].code};
if (num_dwords == 1) {
- ir.SetVectorReg(dst_reg, IR::F32{value});
+ ir.SetVectorReg(dst_reg, IR::U32{value});
return;
}
for (u32 i = 0; i < num_dwords; i++) {
- ir.SetVectorReg(dst_reg + i, IR::F32{ir.CompositeExtract(value, i)});
+ ir.SetVectorReg(dst_reg + i, IR::U32{ir.CompositeExtract(value, i)});
}
}
@@ -456,21 +452,18 @@ void Translator::BUFFER_STORE(u32 num_dwords, bool is_typed, const GcnInst& inst
const IR::VectorReg src_reg{inst.src[1].code};
switch (num_dwords) {
case 1:
- value = ir.GetVectorReg(src_reg);
+ value = ir.GetVectorReg(src_reg);
break;
case 2:
- value = ir.CompositeConstruct(ir.GetVectorReg(src_reg),
- ir.GetVectorReg(src_reg + 1));
+ value = ir.CompositeConstruct(ir.GetVectorReg(src_reg), ir.GetVectorReg(src_reg + 1));
break;
case 3:
- value = ir.CompositeConstruct(ir.GetVectorReg(src_reg),
- ir.GetVectorReg(src_reg + 1),
- ir.GetVectorReg(src_reg + 2));
+ value = ir.CompositeConstruct(ir.GetVectorReg(src_reg), ir.GetVectorReg(src_reg + 1),
+ ir.GetVectorReg(src_reg + 2));
break;
case 4:
- value = ir.CompositeConstruct(
- ir.GetVectorReg(src_reg), ir.GetVectorReg(src_reg + 1),
- ir.GetVectorReg(src_reg + 2), ir.GetVectorReg(src_reg + 3));
+ value = ir.CompositeConstruct(ir.GetVectorReg(src_reg), ir.GetVectorReg(src_reg + 1),
+ ir.GetVectorReg(src_reg + 2), ir.GetVectorReg(src_reg + 3));
break;
}
const IR::Value handle =
@@ -518,6 +511,15 @@ void Translator::BUFFER_ATOMIC(AtomicOp op, const GcnInst& inst) {
const IR::VectorReg vaddr{inst.src[0].code};
const IR::VectorReg vdata{inst.src[1].code};
const IR::ScalarReg srsrc{inst.src[2].code * 4};
+ const IR::Value address = [&] -> IR::Value {
+ if (mubuf.idxen && mubuf.offen) {
+ return ir.CompositeConstruct(ir.GetVectorReg(vaddr), ir.GetVectorReg(vaddr + 1));
+ }
+ if (mubuf.idxen || mubuf.offen) {
+ return ir.GetVectorReg(vaddr);
+ }
+ return {};
+ }();
const IR::U32 soffset{GetSrc(inst.src[3])};
ASSERT_MSG(soffset.IsImmediate() && soffset.U32() == 0, "Non immediate offset not supported");
@@ -527,7 +529,6 @@ void Translator::BUFFER_ATOMIC(AtomicOp op, const GcnInst& inst) {
info.offset_enable.Assign(mubuf.offen);
IR::Value vdata_val = ir.GetVectorReg(vdata);
- const IR::U32 address = ir.GetVectorReg(vaddr);
const IR::Value handle =
ir.CompositeConstruct(ir.GetScalarReg(srsrc), ir.GetScalarReg(srsrc + 1),
ir.GetScalarReg(srsrc + 2), ir.GetScalarReg(srsrc + 3));
diff --git a/src/shader_recompiler/info.h b/src/shader_recompiler/info.h
index cdc17304..0184a7f6 100644
--- a/src/shader_recompiler/info.h
+++ b/src/shader_recompiler/info.h
@@ -37,12 +37,13 @@ struct BufferResource {
u32 dword_offset;
IR::Type used_types;
AmdGpu::Buffer inline_cbuf;
+ bool is_gds_buffer{};
bool is_instance_data{};
bool is_written{};
bool IsStorage(AmdGpu::Buffer buffer) const noexcept {
static constexpr size_t MaxUboSize = 65536;
- return buffer.GetSize() > MaxUboSize || is_written;
+ return buffer.GetSize() > MaxUboSize || is_written || is_gds_buffer;
}
constexpr AmdGpu::Buffer GetSharp(const Info& info) const noexcept;
diff --git a/src/shader_recompiler/ir/basic_block.h b/src/shader_recompiler/ir/basic_block.h
index 1eb11469..11ae969b 100644
--- a/src/shader_recompiler/ir/basic_block.h
+++ b/src/shader_recompiler/ir/basic_block.h
@@ -147,6 +147,7 @@ public:
/// Intrusively store the value of a register in the block.
std::array ssa_sreg_values;
+ std::array ssa_sbit_values;
std::array ssa_vreg_values;
bool has_multiple_predecessors{false};
diff --git a/src/shader_recompiler/ir/ir_emitter.cpp b/src/shader_recompiler/ir/ir_emitter.cpp
index 2be0c1ac..7e52cfb5 100644
--- a/src/shader_recompiler/ir/ir_emitter.cpp
+++ b/src/shader_recompiler/ir/ir_emitter.cpp
@@ -313,21 +313,21 @@ U32 IREmitter::ReadConst(const Value& base, const U32& offset) {
return Inst(Opcode::ReadConst, base, offset);
}
-F32 IREmitter::ReadConstBuffer(const Value& handle, const U32& index) {
- return Inst(Opcode::ReadConstBuffer, handle, index);
+U32 IREmitter::ReadConstBuffer(const Value& handle, const U32& index) {
+ return Inst(Opcode::ReadConstBuffer, handle, index);
}
Value IREmitter::LoadBuffer(int num_dwords, const Value& handle, const Value& address,
BufferInstInfo info) {
switch (num_dwords) {
case 1:
- return Inst(Opcode::LoadBufferF32, Flags{info}, handle, address);
+ return Inst(Opcode::LoadBufferU32, Flags{info}, handle, address);
case 2:
- return Inst(Opcode::LoadBufferF32x2, Flags{info}, handle, address);
+ return Inst(Opcode::LoadBufferU32x2, Flags{info}, handle, address);
case 3:
- return Inst(Opcode::LoadBufferF32x3, Flags{info}, handle, address);
+ return Inst(Opcode::LoadBufferU32x3, Flags{info}, handle, address);
case 4:
- return Inst(Opcode::LoadBufferF32x4, Flags{info}, handle, address);
+ return Inst(Opcode::LoadBufferU32x4, Flags{info}, handle, address);
default:
UNREACHABLE_MSG("Invalid number of dwords {}", num_dwords);
}
@@ -341,17 +341,16 @@ void IREmitter::StoreBuffer(int num_dwords, const Value& handle, const Value& ad
const Value& data, BufferInstInfo info) {
switch (num_dwords) {
case 1:
- Inst(data.Type() == Type::F32 ? Opcode::StoreBufferF32 : Opcode::StoreBufferU32,
- Flags{info}, handle, address, data);
+ Inst(Opcode::StoreBufferU32, Flags{info}, handle, address, data);
break;
case 2:
- Inst(Opcode::StoreBufferF32x2, Flags{info}, handle, address, data);
+ Inst(Opcode::StoreBufferU32x2, Flags{info}, handle, address, data);
break;
case 3:
- Inst(Opcode::StoreBufferF32x3, Flags{info}, handle, address, data);
+ Inst(Opcode::StoreBufferU32x3, Flags{info}, handle, address, data);
break;
case 4:
- Inst(Opcode::StoreBufferF32x4, Flags{info}, handle, address, data);
+ Inst(Opcode::StoreBufferU32x4, Flags{info}, handle, address, data);
break;
default:
UNREACHABLE_MSG("Invalid number of dwords {}", num_dwords);
@@ -410,6 +409,14 @@ void IREmitter::StoreBufferFormat(const Value& handle, const Value& address, con
Inst(Opcode::StoreBufferFormatF32, Flags{info}, handle, address, data);
}
+U32 IREmitter::DataAppend(const U32& counter) {
+ return Inst(Opcode::DataAppend, counter, Imm32(0));
+}
+
+U32 IREmitter::DataConsume(const U32& counter) {
+ return Inst(Opcode::DataConsume, counter, Imm32(0));
+}
+
U32 IREmitter::LaneId() {
return Inst(Opcode::LaneId);
}
diff --git a/src/shader_recompiler/ir/ir_emitter.h b/src/shader_recompiler/ir/ir_emitter.h
index 22d524fb..01e71893 100644
--- a/src/shader_recompiler/ir/ir_emitter.h
+++ b/src/shader_recompiler/ir/ir_emitter.h
@@ -90,7 +90,7 @@ public:
[[nodiscard]] U32 SharedAtomicIMax(const U32& address, const U32& data, bool is_signed);
[[nodiscard]] U32 ReadConst(const Value& base, const U32& offset);
- [[nodiscard]] F32 ReadConstBuffer(const Value& handle, const U32& index);
+ [[nodiscard]] U32 ReadConstBuffer(const Value& handle, const U32& index);
[[nodiscard]] Value LoadBuffer(int num_dwords, const Value& handle, const Value& address,
BufferInstInfo info);
@@ -120,6 +120,8 @@ public:
[[nodiscard]] Value BufferAtomicSwap(const Value& handle, const Value& address,
const Value& value, BufferInstInfo info);
+ [[nodiscard]] U32 DataAppend(const U32& counter);
+ [[nodiscard]] U32 DataConsume(const U32& counter);
[[nodiscard]] U32 LaneId();
[[nodiscard]] U32 WarpId();
[[nodiscard]] U32 QuadShuffle(const U32& value, const U32& index);
diff --git a/src/shader_recompiler/ir/microinstruction.cpp b/src/shader_recompiler/ir/microinstruction.cpp
index d6ef49cf..601c453d 100644
--- a/src/shader_recompiler/ir/microinstruction.cpp
+++ b/src/shader_recompiler/ir/microinstruction.cpp
@@ -51,12 +51,11 @@ bool Inst::MayHaveSideEffects() const noexcept {
case Opcode::Discard:
case Opcode::DiscardCond:
case Opcode::SetAttribute:
- case Opcode::StoreBufferF32:
- case Opcode::StoreBufferF32x2:
- case Opcode::StoreBufferF32x3:
- case Opcode::StoreBufferF32x4:
- case Opcode::StoreBufferFormatF32:
case Opcode::StoreBufferU32:
+ case Opcode::StoreBufferU32x2:
+ case Opcode::StoreBufferU32x3:
+ case Opcode::StoreBufferU32x4:
+ case Opcode::StoreBufferFormatF32:
case Opcode::BufferAtomicIAdd32:
case Opcode::BufferAtomicSMin32:
case Opcode::BufferAtomicUMin32:
@@ -68,6 +67,8 @@ bool Inst::MayHaveSideEffects() const noexcept {
case Opcode::BufferAtomicOr32:
case Opcode::BufferAtomicXor32:
case Opcode::BufferAtomicSwap32:
+ case Opcode::DataAppend:
+ case Opcode::DataConsume:
case Opcode::WriteSharedU128:
case Opcode::WriteSharedU64:
case Opcode::WriteSharedU32:
diff --git a/src/shader_recompiler/ir/opcodes.inc b/src/shader_recompiler/ir/opcodes.inc
index 4df8d13d..4b922d55 100644
--- a/src/shader_recompiler/ir/opcodes.inc
+++ b/src/shader_recompiler/ir/opcodes.inc
@@ -17,8 +17,7 @@ OPCODE(DiscardCond, Void, U1,
// Constant memory operations
OPCODE(ReadConst, U32, U32x2, U32, )
-OPCODE(ReadConstBuffer, F32, Opaque, U32, )
-OPCODE(ReadConstBufferU32, U32, Opaque, U32, )
+OPCODE(ReadConstBuffer, U32, Opaque, U32, )
// Barriers
OPCODE(Barrier, Void, )
@@ -77,21 +76,19 @@ OPCODE(UndefU32, U32,
OPCODE(UndefU64, U64, )
// Buffer operations
-OPCODE(LoadBufferF32, F32, Opaque, Opaque, )
-OPCODE(LoadBufferF32x2, F32x2, Opaque, Opaque, )
-OPCODE(LoadBufferF32x3, F32x3, Opaque, Opaque, )
-OPCODE(LoadBufferF32x4, F32x4, Opaque, Opaque, )
-OPCODE(LoadBufferFormatF32, F32x4, Opaque, Opaque, )
OPCODE(LoadBufferU32, U32, Opaque, Opaque, )
-OPCODE(StoreBufferF32, Void, Opaque, Opaque, F32, )
-OPCODE(StoreBufferF32x2, Void, Opaque, Opaque, F32x2, )
-OPCODE(StoreBufferF32x3, Void, Opaque, Opaque, F32x3, )
-OPCODE(StoreBufferF32x4, Void, Opaque, Opaque, F32x4, )
-OPCODE(StoreBufferFormatF32, Void, Opaque, Opaque, F32x4, )
+OPCODE(LoadBufferU32x2, U32x2, Opaque, Opaque, )
+OPCODE(LoadBufferU32x3, U32x3, Opaque, Opaque, )
+OPCODE(LoadBufferU32x4, U32x4, Opaque, Opaque, )
+OPCODE(LoadBufferFormatF32, F32x4, Opaque, Opaque, )
OPCODE(StoreBufferU32, Void, Opaque, Opaque, U32, )
+OPCODE(StoreBufferU32x2, Void, Opaque, Opaque, U32x2, )
+OPCODE(StoreBufferU32x3, Void, Opaque, Opaque, U32x3, )
+OPCODE(StoreBufferU32x4, Void, Opaque, Opaque, U32x4, )
+OPCODE(StoreBufferFormatF32, Void, Opaque, Opaque, U32x4, )
// Buffer atomic operations
-OPCODE(BufferAtomicIAdd32, U32, Opaque, Opaque, U32 )
+OPCODE(BufferAtomicIAdd32, U32, Opaque, Opaque, U32 )
OPCODE(BufferAtomicSMin32, U32, Opaque, Opaque, U32 )
OPCODE(BufferAtomicUMin32, U32, Opaque, Opaque, U32 )
OPCODE(BufferAtomicSMax32, U32, Opaque, Opaque, U32 )
@@ -101,7 +98,7 @@ OPCODE(BufferAtomicDec32, U32, Opaq
OPCODE(BufferAtomicAnd32, U32, Opaque, Opaque, U32, )
OPCODE(BufferAtomicOr32, U32, Opaque, Opaque, U32, )
OPCODE(BufferAtomicXor32, U32, Opaque, Opaque, U32, )
-OPCODE(BufferAtomicSwap32, U32, Opaque, Opaque, U32, )
+OPCODE(BufferAtomicSwap32, U32, Opaque, Opaque, U32, )
// Vector utility
OPCODE(CompositeConstructU32x2, U32x2, U32, U32, )
@@ -345,3 +342,5 @@ OPCODE(QuadShuffle, U32, U32,
OPCODE(ReadFirstLane, U32, U32, )
OPCODE(ReadLane, U32, U32, U32 )
OPCODE(WriteLane, U32, U32, U32, U32 )
+OPCODE(DataAppend, U32, U32, U32 )
+OPCODE(DataConsume, U32, U32, U32 )
diff --git a/src/shader_recompiler/ir/passes/resource_tracking_pass.cpp b/src/shader_recompiler/ir/passes/resource_tracking_pass.cpp
index 025bb98c..aa5d39ae 100644
--- a/src/shader_recompiler/ir/passes/resource_tracking_pass.cpp
+++ b/src/shader_recompiler/ir/passes/resource_tracking_pass.cpp
@@ -3,7 +3,6 @@
#include
#include
-#include "common/alignment.h"
#include "shader_recompiler/info.h"
#include "shader_recompiler/ir/basic_block.h"
#include "shader_recompiler/ir/breadth_first_search.h"
@@ -42,11 +41,10 @@ bool IsBufferAtomic(const IR::Inst& inst) {
bool IsBufferStore(const IR::Inst& inst) {
switch (inst.GetOpcode()) {
- case IR::Opcode::StoreBufferF32:
- case IR::Opcode::StoreBufferF32x2:
- case IR::Opcode::StoreBufferF32x3:
- case IR::Opcode::StoreBufferF32x4:
case IR::Opcode::StoreBufferU32:
+ case IR::Opcode::StoreBufferU32x2:
+ case IR::Opcode::StoreBufferU32x3:
+ case IR::Opcode::StoreBufferU32x4:
return true;
default:
return IsBufferAtomic(inst);
@@ -55,25 +53,28 @@ bool IsBufferStore(const IR::Inst& inst) {
bool IsBufferInstruction(const IR::Inst& inst) {
switch (inst.GetOpcode()) {
- case IR::Opcode::LoadBufferF32:
- case IR::Opcode::LoadBufferF32x2:
- case IR::Opcode::LoadBufferF32x3:
- case IR::Opcode::LoadBufferF32x4:
case IR::Opcode::LoadBufferU32:
+ case IR::Opcode::LoadBufferU32x2:
+ case IR::Opcode::LoadBufferU32x3:
+ case IR::Opcode::LoadBufferU32x4:
case IR::Opcode::ReadConstBuffer:
- case IR::Opcode::ReadConstBufferU32:
return true;
default:
return IsBufferStore(inst);
}
}
+bool IsDataRingInstruction(const IR::Inst& inst) {
+ return inst.GetOpcode() == IR::Opcode::DataAppend ||
+ inst.GetOpcode() == IR::Opcode::DataConsume;
+}
+
bool IsTextureBufferInstruction(const IR::Inst& inst) {
return inst.GetOpcode() == IR::Opcode::LoadBufferFormatF32 ||
inst.GetOpcode() == IR::Opcode::StoreBufferFormatF32;
}
-static bool UseFP16(AmdGpu::DataFormat data_format, AmdGpu::NumberFormat num_format) {
+bool UseFP16(AmdGpu::DataFormat data_format, AmdGpu::NumberFormat num_format) {
switch (num_format) {
case AmdGpu::NumberFormat::Float:
switch (data_format) {
@@ -98,19 +99,15 @@ static bool UseFP16(AmdGpu::DataFormat data_format, AmdGpu::NumberFormat num_for
IR::Type BufferDataType(const IR::Inst& inst, AmdGpu::NumberFormat num_format) {
switch (inst.GetOpcode()) {
- case IR::Opcode::LoadBufferF32:
- case IR::Opcode::LoadBufferF32x2:
- case IR::Opcode::LoadBufferF32x3:
- case IR::Opcode::LoadBufferF32x4:
- case IR::Opcode::ReadConstBuffer:
- case IR::Opcode::StoreBufferF32:
- case IR::Opcode::StoreBufferF32x2:
- case IR::Opcode::StoreBufferF32x3:
- case IR::Opcode::StoreBufferF32x4:
- return IR::Type::F32;
case IR::Opcode::LoadBufferU32:
- case IR::Opcode::ReadConstBufferU32:
+ case IR::Opcode::LoadBufferU32x2:
+ case IR::Opcode::LoadBufferU32x3:
+ case IR::Opcode::LoadBufferU32x4:
case IR::Opcode::StoreBufferU32:
+ case IR::Opcode::StoreBufferU32x2:
+ case IR::Opcode::StoreBufferU32x3:
+ case IR::Opcode::StoreBufferU32x4:
+ case IR::Opcode::ReadConstBuffer:
case IR::Opcode::BufferAtomicIAdd32:
case IR::Opcode::BufferAtomicSwap32:
return IR::Type::U32;
@@ -191,6 +188,10 @@ public:
u32 Add(const BufferResource& desc) {
const u32 index{Add(buffer_resources, desc, [&desc](const auto& existing) {
+ // Only one GDS binding can exist.
+ if (desc.is_gds_buffer && existing.is_gds_buffer) {
+ return true;
+ }
return desc.sgpr_base == existing.sgpr_base &&
desc.dword_offset == existing.dword_offset &&
desc.inline_cbuf == existing.inline_cbuf;
@@ -399,8 +400,7 @@ void PatchBufferInstruction(IR::Block& block, IR::Inst& inst, Info& info,
ASSERT(!buffer.swizzle_enable && !buffer.add_tid_enable);
// Address of constant buffer reads can be calculated at IR emittion time.
- if (inst.GetOpcode() == IR::Opcode::ReadConstBuffer ||
- inst.GetOpcode() == IR::Opcode::ReadConstBufferU32) {
+ if (inst.GetOpcode() == IR::Opcode::ReadConstBuffer) {
return;
}
@@ -609,6 +609,51 @@ void PatchImageInstruction(IR::Block& block, IR::Inst& inst, Info& info, Descrip
}
}
+void PatchDataRingInstruction(IR::Block& block, IR::Inst& inst, Info& info,
+ Descriptors& descriptors) {
+ // Insert gds binding in the shader if it doesn't exist already.
+ // The buffer is used for append/consume counters.
+ constexpr static AmdGpu::Buffer GdsSharp{.base_address = 1};
+ const u32 binding = descriptors.Add(BufferResource{
+ .used_types = IR::Type::U32,
+ .inline_cbuf = GdsSharp,
+ .is_gds_buffer = true,
+ .is_written = true,
+ });
+
+ const auto pred = [](const IR::Inst* inst) -> std::optional {
+ if (inst->GetOpcode() == IR::Opcode::GetUserData) {
+ return inst;
+ }
+ return std::nullopt;
+ };
+
+ // Attempt to deduce the GDS address of counter at compile time.
+ const u32 gds_addr = [&] {
+ const IR::Value& gds_offset = inst.Arg(0);
+ if (gds_offset.IsImmediate()) {
+ // Nothing to do, offset is known.
+ return gds_offset.U32() & 0xFFFF;
+ }
+ const auto result = IR::BreadthFirstSearch(&inst, pred);
+ ASSERT_MSG(result, "Unable to track M0 source");
+
+ // M0 must be set by some user data register.
+ const IR::Inst* prod = gds_offset.InstRecursive();
+ const u32 ud_reg = u32(result.value()->Arg(0).ScalarReg());
+ u32 m0_val = info.user_data[ud_reg] >> 16;
+ if (prod->GetOpcode() == IR::Opcode::IAdd32) {
+ m0_val += prod->Arg(1).U32();
+ }
+ return m0_val & 0xFFFF;
+ }();
+
+ // Patch instruction.
+ IR::IREmitter ir{block, IR::Block::InstructionList::s_iterator_to(inst)};
+ inst.SetArg(0, ir.Imm32(gds_addr >> 2));
+ inst.SetArg(1, ir.Imm32(binding));
+}
+
void ResourceTrackingPass(IR::Program& program) {
// Iterate resource instructions and patch them after finding the sharp.
auto& info = program.info;
@@ -625,6 +670,10 @@ void ResourceTrackingPass(IR::Program& program) {
}
if (IsImageInstruction(inst)) {
PatchImageInstruction(*block, inst, info, descriptors);
+ continue;
+ }
+ if (IsDataRingInstruction(inst)) {
+ PatchDataRingInstruction(*block, inst, info, descriptors);
}
}
}
diff --git a/src/shader_recompiler/ir/passes/ssa_rewrite_pass.cpp b/src/shader_recompiler/ir/passes/ssa_rewrite_pass.cpp
index ea27c64f..54dce035 100644
--- a/src/shader_recompiler/ir/passes/ssa_rewrite_pass.cpp
+++ b/src/shader_recompiler/ir/passes/ssa_rewrite_pass.cpp
@@ -44,8 +44,17 @@ struct GotoVariable : FlagTag {
u32 index;
};
-using Variant = std::variant;
+struct ThreadBitScalar : FlagTag {
+ ThreadBitScalar() = default;
+ explicit ThreadBitScalar(IR::ScalarReg sgpr_) : sgpr{sgpr_} {}
+
+ auto operator<=>(const ThreadBitScalar&) const noexcept = default;
+
+ IR::ScalarReg sgpr;
+};
+
+using Variant = std::variant;
using ValueMap = std::unordered_map;
struct DefTable {
@@ -70,6 +79,13 @@ struct DefTable {
goto_vars[variable.index].insert_or_assign(block, value);
}
+ const IR::Value& Def(IR::Block* block, ThreadBitScalar variable) {
+ return block->ssa_sreg_values[RegIndex(variable.sgpr)];
+ }
+ void SetDef(IR::Block* block, ThreadBitScalar variable, const IR::Value& value) {
+ block->ssa_sreg_values[RegIndex(variable.sgpr)] = value;
+ }
+
const IR::Value& Def(IR::Block* block, SccFlagTag) {
return scc_flag[block];
}
@@ -173,7 +189,7 @@ public:
}
template
- IR::Value ReadVariable(Type variable, IR::Block* root_block, bool is_thread_bit = false) {
+ IR::Value ReadVariable(Type variable, IR::Block* root_block) {
boost::container::small_vector, 64> stack{
ReadState(nullptr),
ReadState(root_block),
@@ -201,7 +217,7 @@ public:
} else if (!block->IsSsaSealed()) {
// Incomplete CFG
IR::Inst* phi{&*block->PrependNewInst(block->begin(), IR::Opcode::Phi)};
- phi->SetFlags(is_thread_bit ? IR::Type::U1 : IR::TypeOf(UndefOpcode(variable)));
+ phi->SetFlags(IR::TypeOf(UndefOpcode(variable)));
incomplete_phis[block].insert_or_assign(variable, phi);
stack.back().result = IR::Value{&*phi};
@@ -214,7 +230,7 @@ public:
} else {
// Break potential cycles with operandless phi
IR::Inst* const phi{&*block->PrependNewInst(block->begin(), IR::Opcode::Phi)};
- phi->SetFlags(is_thread_bit ? IR::Type::U1 : IR::TypeOf(UndefOpcode(variable)));
+ phi->SetFlags(IR::TypeOf(UndefOpcode(variable)));
WriteVariable(variable, block, IR::Value{phi});
@@ -263,9 +279,7 @@ private:
template
IR::Value AddPhiOperands(Type variable, IR::Inst& phi, IR::Block* block) {
for (IR::Block* const imm_pred : block->ImmPredecessors()) {
- const bool is_thread_bit =
- std::is_same_v && phi.Flags() == IR::Type::U1;
- phi.AddPhiOperand(imm_pred, ReadVariable(variable, imm_pred, is_thread_bit));
+ phi.AddPhiOperand(imm_pred, ReadVariable(variable, imm_pred));
}
return TryRemoveTrivialPhi(phi, block, UndefOpcode(variable));
}
@@ -313,7 +327,11 @@ private:
void VisitInst(Pass& pass, IR::Block* block, IR::Inst& inst) {
const IR::Opcode opcode{inst.GetOpcode()};
switch (opcode) {
- case IR::Opcode::SetThreadBitScalarReg:
+ case IR::Opcode::SetThreadBitScalarReg: {
+ const IR::ScalarReg reg{inst.Arg(0).ScalarReg()};
+ pass.WriteVariable(ThreadBitScalar{reg}, block, inst.Arg(1));
+ break;
+ }
case IR::Opcode::SetScalarRegister: {
const IR::ScalarReg reg{inst.Arg(0).ScalarReg()};
pass.WriteVariable(reg, block, inst.Arg(1));
@@ -345,11 +363,15 @@ void VisitInst(Pass& pass, IR::Block* block, IR::Inst& inst) {
case IR::Opcode::SetM0:
pass.WriteVariable(M0Tag{}, block, inst.Arg(0));
break;
- case IR::Opcode::GetThreadBitScalarReg:
+ case IR::Opcode::GetThreadBitScalarReg: {
+ const IR::ScalarReg reg{inst.Arg(0).ScalarReg()};
+ const IR::Value value = pass.ReadVariable(ThreadBitScalar{reg}, block);
+ inst.ReplaceUsesWith(value);
+ break;
+ }
case IR::Opcode::GetScalarRegister: {
const IR::ScalarReg reg{inst.Arg(0).ScalarReg()};
- const bool thread_bit = opcode == IR::Opcode::GetThreadBitScalarReg;
- const IR::Value value = pass.ReadVariable(reg, block, thread_bit);
+ const IR::Value value = pass.ReadVariable(reg, block);
inst.ReplaceUsesWith(value);
break;
}
diff --git a/src/video_core/amdgpu/liverpool.cpp b/src/video_core/amdgpu/liverpool.cpp
index cee30f75..cbc18aa4 100644
--- a/src/video_core/amdgpu/liverpool.cpp
+++ b/src/video_core/amdgpu/liverpool.cpp
@@ -465,6 +465,14 @@ Liverpool::Task Liverpool::ProcessGraphics(std::span dcb, std::span(header);
event_eos->SignalFence();
+ if (event_eos->command == PM4CmdEventWriteEos::Command::GdsStore) {
+ ASSERT(event_eos->size == 1);
+ if (rasterizer) {
+ rasterizer->Finish();
+ const u32 value = rasterizer->ReadDataFromGds(event_eos->gds_index);
+ *event_eos->Address() = value;
+ }
+ }
break;
}
case PM4ItOpcode::EventWriteEop: {
@@ -474,6 +482,9 @@ Liverpool::Task Liverpool::ProcessGraphics(std::span dcb, std::span(header);
+ if (dma_data->src_sel == DmaDataSrc::Data && dma_data->dst_sel == DmaDataDst::Gds) {
+ rasterizer->InlineDataToGds(dma_data->dst_addr_lo, dma_data->data);
+ }
break;
}
case PM4ItOpcode::WriteData: {
diff --git a/src/video_core/amdgpu/pm4_cmds.h b/src/video_core/amdgpu/pm4_cmds.h
index 58ade221..fd7980c1 100644
--- a/src/video_core/amdgpu/pm4_cmds.h
+++ b/src/video_core/amdgpu/pm4_cmds.h
@@ -350,6 +350,17 @@ struct PM4CmdEventWriteEop {
}
};
+enum class DmaDataDst : u32 {
+ Memory = 0,
+ Gds = 1,
+};
+
+enum class DmaDataSrc : u32 {
+ Memory = 0,
+ Gds = 1,
+ Data = 2,
+};
+
struct PM4DmaData {
PM4Type3Header header;
union {
@@ -357,11 +368,11 @@ struct PM4DmaData {
BitField<12, 1, u32> src_atc;
BitField<13, 2, u32> src_cache_policy;
BitField<15, 1, u32> src_volatile;
- BitField<20, 2, u32> dst_sel;
+ BitField<20, 2, DmaDataDst> dst_sel;
BitField<24, 1, u32> dst_atc;
BitField<25, 2, u32> dst_cache_policy;
BitField<27, 1, u32> dst_volatile;
- BitField<29, 2, u32> src_sel;
+ BitField<29, 2, DmaDataSrc> src_sel;
BitField<31, 1, u32> cp_sync;
};
union {
@@ -502,13 +513,17 @@ struct PM4CmdEventWriteEos {
}
void SignalFence() const {
- switch (command.Value()) {
+ const auto cmd = command.Value();
+ switch (cmd) {
case Command::SingalFence: {
*Address() = DataDWord();
break;
}
+ case Command::GdsStore: {
+ break;
+ }
default: {
- UNREACHABLE();
+ UNREACHABLE_MSG("Unknown command {}", u32(cmd));
}
}
}
diff --git a/src/video_core/buffer_cache/buffer_cache.cpp b/src/video_core/buffer_cache/buffer_cache.cpp
index 89032e99..86af05bf 100644
--- a/src/video_core/buffer_cache/buffer_cache.cpp
+++ b/src/video_core/buffer_cache/buffer_cache.cpp
@@ -15,8 +15,9 @@
namespace VideoCore {
static constexpr size_t NumVertexBuffers = 32;
-static constexpr size_t StagingBufferSize = 512_MB;
-static constexpr size_t UboStreamBufferSize = 64_MB;
+static constexpr size_t GdsBufferSize = 64_KB;
+static constexpr size_t StagingBufferSize = 1_GB;
+static constexpr size_t UboStreamBufferSize = 128_MB;
BufferCache::BufferCache(const Vulkan::Instance& instance_, Vulkan::Scheduler& scheduler_,
const AmdGpu::Liverpool* liverpool_, TextureCache& texture_cache_,
@@ -25,7 +26,10 @@ BufferCache::BufferCache(const Vulkan::Instance& instance_, Vulkan::Scheduler& s
texture_cache{texture_cache_}, tracker{tracker_},
staging_buffer{instance, scheduler, MemoryUsage::Upload, StagingBufferSize},
stream_buffer{instance, scheduler, MemoryUsage::Stream, UboStreamBufferSize},
+ gds_buffer{instance, scheduler, MemoryUsage::Stream, 0, AllFlags, GdsBufferSize},
memory_tracker{&tracker} {
+ Vulkan::SetObjectName(instance.GetDevice(), gds_buffer.Handle(), "GDS Buffer");
+
// Ensure the first slot is used for the null buffer
void(slot_buffers.insert(instance, scheduler, MemoryUsage::DeviceLocal, 0, ReadFlags, 1));
}
@@ -232,6 +236,27 @@ u32 BufferCache::BindIndexBuffer(bool& is_indexed, u32 index_offset) {
return regs.num_indices;
}
+void BufferCache::InlineDataToGds(u32 gds_offset, u32 value) {
+ ASSERT_MSG(gds_offset % 4 == 0, "GDS offset must be dword aligned");
+ scheduler.EndRendering();
+ const auto cmdbuf = scheduler.CommandBuffer();
+ const vk::BufferMemoryBarrier2 buf_barrier = {
+ .srcStageMask = vk::PipelineStageFlagBits2::eTransfer,
+ .srcAccessMask = vk::AccessFlagBits2::eTransferWrite,
+ .dstStageMask = vk::PipelineStageFlagBits2::eAllCommands,
+ .dstAccessMask = vk::AccessFlagBits2::eMemoryRead,
+ .buffer = gds_buffer.Handle(),
+ .offset = gds_offset,
+ .size = sizeof(u32),
+ };
+ cmdbuf.pipelineBarrier2(vk::DependencyInfo{
+ .dependencyFlags = vk::DependencyFlagBits::eByRegion,
+ .bufferMemoryBarrierCount = 1,
+ .pBufferMemoryBarriers = &buf_barrier,
+ });
+ cmdbuf.updateBuffer(gds_buffer.Handle(), gds_offset, sizeof(u32), &value);
+}
+
std::pair BufferCache::ObtainBuffer(VAddr device_addr, u32 size, bool is_written,
bool is_texel_buffer) {
static constexpr u64 StreamThreshold = CACHING_PAGESIZE;
@@ -258,6 +283,7 @@ std::pair BufferCache::ObtainTempBuffer(VAddr gpu_addr, u32 size)
if (buffer_id) {
Buffer& buffer = slot_buffers[buffer_id];
if (buffer.IsInBounds(gpu_addr, size)) {
+ SynchronizeBuffer(buffer, gpu_addr, size, false);
return {&buffer, buffer.Offset(gpu_addr)};
}
}
@@ -541,64 +567,48 @@ void BufferCache::SynchronizeBuffer(Buffer& buffer, VAddr device_addr, u32 size,
}
bool BufferCache::SynchronizeBufferFromImage(Buffer& buffer, VAddr device_addr, u32 size) {
- boost::container::small_vector image_ids;
- const u32 inv_size = std::min(size, MaxInvalidateDist);
- texture_cache.ForEachImageInRegion(device_addr, inv_size, [&](ImageId image_id, Image& image) {
- // Only consider GPU modified images, i.e render targets or storage images.
- // Also avoid any CPU modified images as the image data is likely to be stale.
- if (True(image.flags & ImageFlagBits::CpuModified) ||
- False(image.flags & ImageFlagBits::GpuModified)) {
- return;
- }
- // Image must fully overlap with the provided buffer range.
- if (image.cpu_addr < device_addr || image.cpu_addr_end > device_addr + size) {
- return;
- }
- image_ids.push_back(image_id);
- });
- if (image_ids.empty()) {
+ static constexpr FindFlags find_flags =
+ FindFlags::NoCreate | FindFlags::RelaxDim | FindFlags::RelaxFmt | FindFlags::RelaxSize;
+ ImageInfo info{};
+ info.guest_address = device_addr;
+ info.guest_size_bytes = size;
+ const ImageId image_id = texture_cache.FindImage(info, find_flags);
+ if (!image_id) {
return false;
}
- // Sort images by modification tick. If there are overlaps we want to
- // copy from least to most recently modified.
- std::ranges::sort(image_ids, [&](ImageId lhs_id, ImageId rhs_id) {
- const Image& lhs = texture_cache.GetImage(lhs_id);
- const Image& rhs = texture_cache.GetImage(rhs_id);
- return lhs.tick_accessed_last < rhs.tick_accessed_last;
- });
- boost::container::small_vector copies;
- for (const ImageId image_id : image_ids) {
- copies.clear();
- Image& image = texture_cache.GetImage(image_id);
- u32 offset = buffer.Offset(image.cpu_addr);
- const u32 num_layers = image.info.resources.layers;
- for (u32 m = 0; m < image.info.resources.levels; m++) {
- const u32 width = std::max(image.info.size.width >> m, 1u);
- const u32 height = std::max(image.info.size.height >> m, 1u);
- const u32 depth =
- image.info.props.is_volume ? std::max(image.info.size.depth >> m, 1u) : 1u;
- const auto& [mip_size, mip_pitch, mip_height, mip_ofs] = image.info.mips_layout[m];
- copies.push_back({
- .bufferOffset = offset,
- .bufferRowLength = static_cast(mip_pitch),
- .bufferImageHeight = static_cast(mip_height),
- .imageSubresource{
- .aspectMask = image.aspect_mask & ~vk::ImageAspectFlagBits::eStencil,
- .mipLevel = m,
- .baseArrayLayer = 0,
- .layerCount = num_layers,
- },
- .imageOffset = {0, 0, 0},
- .imageExtent = {width, height, depth},
- });
- offset += mip_ofs * num_layers;
- }
- scheduler.EndRendering();
- image.Transit(vk::ImageLayout::eTransferSrcOptimal, vk::AccessFlagBits::eTransferRead);
- const auto cmdbuf = scheduler.CommandBuffer();
- cmdbuf.copyImageToBuffer(image.image, vk::ImageLayout::eTransferSrcOptimal, buffer.buffer,
- copies);
+ Image& image = texture_cache.GetImage(image_id);
+ if (image.info.guest_size_bytes > size) {
+ return false;
}
+ boost::container::small_vector copies;
+ u32 offset = buffer.Offset(image.cpu_addr);
+ const u32 num_layers = image.info.resources.layers;
+ for (u32 m = 0; m < image.info.resources.levels; m++) {
+ const u32 width = std::max(image.info.size.width >> m, 1u);
+ const u32 height = std::max(image.info.size.height >> m, 1u);
+ const u32 depth =
+ image.info.props.is_volume ? std::max(image.info.size.depth >> m, 1u) : 1u;
+ const auto& [mip_size, mip_pitch, mip_height, mip_ofs] = image.info.mips_layout[m];
+ copies.push_back({
+ .bufferOffset = offset,
+ .bufferRowLength = static_cast(mip_pitch),
+ .bufferImageHeight = static_cast(mip_height),
+ .imageSubresource{
+ .aspectMask = image.aspect_mask & ~vk::ImageAspectFlagBits::eStencil,
+ .mipLevel = m,
+ .baseArrayLayer = 0,
+ .layerCount = num_layers,
+ },
+ .imageOffset = {0, 0, 0},
+ .imageExtent = {width, height, depth},
+ });
+ offset += mip_ofs * num_layers;
+ }
+ scheduler.EndRendering();
+ image.Transit(vk::ImageLayout::eTransferSrcOptimal, vk::AccessFlagBits::eTransferRead);
+ const auto cmdbuf = scheduler.CommandBuffer();
+ cmdbuf.copyImageToBuffer(image.image, vk::ImageLayout::eTransferSrcOptimal, buffer.buffer,
+ copies);
return true;
}
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index b38b00f0..cd6ea28f 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -57,6 +57,11 @@ public:
PageManager& tracker);
~BufferCache();
+ /// Returns a pointer to GDS device local buffer.
+ [[nodiscard]] const Buffer* GetGdsBuffer() const noexcept {
+ return &gds_buffer;
+ }
+
/// Invalidates any buffer in the logical page range.
void InvalidateMemory(VAddr device_addr, u64 size);
@@ -66,6 +71,9 @@ public:
/// Bind host index buffer for the current draw.
u32 BindIndexBuffer(bool& is_indexed, u32 index_offset);
+ /// Writes a value to GDS buffer.
+ void InlineDataToGds(u32 gds_offset, u32 value);
+
/// Obtains a buffer for the specified region.
[[nodiscard]] std::pair ObtainBuffer(VAddr gpu_addr, u32 size, bool is_written,
bool is_texel_buffer = false);
@@ -130,6 +138,7 @@ private:
PageManager& tracker;
StreamBuffer staging_buffer;
StreamBuffer stream_buffer;
+ Buffer gds_buffer;
std::mutex mutex;
Common::SlotVector slot_buffers;
MemoryTracker memory_tracker;
diff --git a/src/video_core/renderer_vulkan/liverpool_to_vk.cpp b/src/video_core/renderer_vulkan/liverpool_to_vk.cpp
index 40a1124a..430fb9ed 100644
--- a/src/video_core/renderer_vulkan/liverpool_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/liverpool_to_vk.cpp
@@ -585,11 +585,10 @@ vk::Format SurfaceFormat(AmdGpu::DataFormat data_format, AmdGpu::NumberFormat nu
vk::Format AdjustColorBufferFormat(vk::Format base_format,
Liverpool::ColorBuffer::SwapMode comp_swap, bool is_vo_surface) {
- ASSERT_MSG(comp_swap == Liverpool::ColorBuffer::SwapMode::Standard ||
- comp_swap == Liverpool::ColorBuffer::SwapMode::Alternate,
- "Unsupported component swap mode {}", static_cast(comp_swap));
-
const bool comp_swap_alt = comp_swap == Liverpool::ColorBuffer::SwapMode::Alternate;
+ const bool comp_swap_reverse = comp_swap == Liverpool::ColorBuffer::SwapMode::StandardReverse;
+ const bool comp_swap_alt_reverse =
+ comp_swap == Liverpool::ColorBuffer::SwapMode::AlternateReverse;
if (comp_swap_alt) {
switch (base_format) {
case vk::Format::eR8G8B8A8Unorm:
@@ -605,6 +604,18 @@ vk::Format AdjustColorBufferFormat(vk::Format base_format,
default:
break;
}
+ } else if (comp_swap_reverse) {
+ switch (base_format) {
+ case vk::Format::eR8G8B8A8Unorm:
+ return vk::Format::eA8B8G8R8UnormPack32;
+ case vk::Format::eR8G8B8A8Srgb:
+ return is_vo_surface ? vk::Format::eA8B8G8R8UnormPack32
+ : vk::Format::eA8B8G8R8SrgbPack32;
+ default:
+ break;
+ }
+ } else if (comp_swap_alt_reverse) {
+ return base_format;
} else {
if (is_vo_surface && base_format == vk::Format::eR8G8B8A8Srgb) {
return vk::Format::eR8G8B8A8Unorm;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index b87d3c91..aeae0813 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -109,37 +109,42 @@ bool ComputePipeline::BindResources(VideoCore::BufferCache& buffer_cache,
u32 binding{};
for (const auto& desc : info->buffers) {
- const auto vsharp = desc.GetSharp(*info);
- const bool is_storage = desc.IsStorage(vsharp);
- const VAddr address = vsharp.base_address;
- // Most of the time when a metadata is updated with a shader it gets cleared. It means we
- // can skip the whole dispatch and update the tracked state instead. Also, it is not
- // intended to be consumed and in such rare cases (e.g. HTile introspection, CRAA) we will
- // need its full emulation anyways. For cases of metadata read a warning will be logged.
- if (desc.is_written) {
- if (texture_cache.TouchMeta(address, true)) {
- LOG_TRACE(Render_Vulkan, "Metadata update skipped");
- return false;
- }
+ bool is_storage = true;
+ if (desc.is_gds_buffer) {
+ auto* vk_buffer = buffer_cache.GetGdsBuffer();
+ buffer_infos.emplace_back(vk_buffer->Handle(), 0, vk_buffer->SizeBytes());
} else {
- if (texture_cache.IsMeta(address)) {
- LOG_WARNING(Render_Vulkan, "Unexpected metadata read by a CS shader (buffer)");
+ const auto vsharp = desc.GetSharp(*info);
+ is_storage = desc.IsStorage(vsharp);
+ const VAddr address = vsharp.base_address;
+ // Most of the time when a metadata is updated with a shader it gets cleared. It means
+ // we can skip the whole dispatch and update the tracked state instead. Also, it is not
+ // intended to be consumed and in such rare cases (e.g. HTile introspection, CRAA) we
+ // will need its full emulation anyways. For cases of metadata read a warning will be
+ // logged.
+ if (desc.is_written) {
+ if (texture_cache.TouchMeta(address, true)) {
+ LOG_TRACE(Render_Vulkan, "Metadata update skipped");
+ return false;
+ }
+ } else {
+ if (texture_cache.IsMeta(address)) {
+ LOG_WARNING(Render_Vulkan, "Unexpected metadata read by a CS shader (buffer)");
+ }
}
+ const u32 size = vsharp.GetSize();
+ const u32 alignment =
+ is_storage ? instance.StorageMinAlignment() : instance.UniformMinAlignment();
+ const auto [vk_buffer, offset] =
+ buffer_cache.ObtainBuffer(address, size, desc.is_written);
+ const u32 offset_aligned = Common::AlignDown(offset, alignment);
+ const u32 adjust = offset - offset_aligned;
+ if (adjust != 0) {
+ ASSERT(adjust % 4 == 0);
+ push_data.AddOffset(binding, adjust);
+ }
+ buffer_infos.emplace_back(vk_buffer->Handle(), offset_aligned, size + adjust);
}
- const u32 size = vsharp.GetSize();
- if (desc.is_written) {
- texture_cache.InvalidateMemory(address, size);
- }
- const u32 alignment =
- is_storage ? instance.StorageMinAlignment() : instance.UniformMinAlignment();
- const auto [vk_buffer, offset] = buffer_cache.ObtainBuffer(address, size, desc.is_written);
- const u32 offset_aligned = Common::AlignDown(offset, alignment);
- const u32 adjust = offset - offset_aligned;
- if (adjust != 0) {
- ASSERT(adjust % 4 == 0);
- push_data.AddOffset(binding, adjust);
- }
- buffer_infos.emplace_back(vk_buffer->Handle(), offset_aligned, size + adjust);
set_writes.push_back({
.dstSet = VK_NULL_HANDLE,
.dstBinding = binding++,
@@ -188,7 +193,7 @@ bool ComputePipeline::BindResources(VideoCore::BufferCache& buffer_cache,
buffer_barriers.emplace_back(*barrier);
}
if (desc.is_written) {
- texture_cache.InvalidateMemory(address, size);
+ texture_cache.MarkWritten(address, size);
}
}
set_writes.push_back({
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index 6ac4dcf1..a548b70a 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -432,7 +432,7 @@ void GraphicsPipeline::BindResources(const Liverpool::Regs& regs,
buffer_barriers.emplace_back(*barrier);
}
if (desc.is_written) {
- texture_cache.InvalidateMemory(address, size);
+ texture_cache.MarkWritten(address, size);
}
}
set_writes.push_back({
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 4419b0f8..b4b256bb 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -298,6 +298,16 @@ bool PipelineCache::RefreshGraphicsKey() {
return false;
}
+ static bool TessMissingLogged = false;
+ if (auto* pgm = regs.ProgramForStage(3);
+ regs.stage_enable.IsStageEnabled(3) && pgm->Address() != 0) {
+ if (!TessMissingLogged) {
+ LOG_WARNING(Render_Vulkan, "Tess pipeline compilation skipped");
+ TessMissingLogged = true;
+ }
+ return false;
+ }
+
std::tie(infos[i], modules[i], key.stage_hashes[i]) = GetProgram(stage, params, binding);
}
return true;
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 9f72d044..6344315a 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -175,6 +175,10 @@ u64 Rasterizer::Flush() {
return current_tick;
}
+void Rasterizer::Finish() {
+ scheduler.Finish();
+}
+
void Rasterizer::BeginRendering() {
const auto& regs = liverpool->regs;
RenderState state;
@@ -251,6 +255,17 @@ void Rasterizer::BeginRendering() {
scheduler.BeginRendering(state);
}
+void Rasterizer::InlineDataToGds(u32 gds_offset, u32 value) {
+ buffer_cache.InlineDataToGds(gds_offset, value);
+}
+
+u32 Rasterizer::ReadDataFromGds(u32 gds_offset) {
+ auto* gds_buf = buffer_cache.GetGdsBuffer();
+ u32 value;
+ std::memcpy(&value, gds_buf->mapped_data.data() + gds_offset, sizeof(u32));
+ return value;
+}
+
void Rasterizer::InvalidateMemory(VAddr addr, u64 size) {
buffer_cache.InvalidateMemory(addr, size);
texture_cache.InvalidateMemory(addr, size);
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 43ab4756..5aa90c5c 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -41,12 +41,15 @@ public:
void ScopeMarkerEnd();
void ScopedMarkerInsert(const std::string_view& str);
+ void InlineDataToGds(u32 gds_offset, u32 value);
+ u32 ReadDataFromGds(u32 gsd_offset);
void InvalidateMemory(VAddr addr, u64 size);
void MapMemory(VAddr addr, u64 size);
void UnmapMemory(VAddr addr, u64 size);
void CpSync();
u64 Flush();
+ void Finish();
private:
void BeginRendering();
diff --git a/src/video_core/texture_cache/image.h b/src/video_core/texture_cache/image.h
index f932b25a..1bbb975b 100644
--- a/src/video_core/texture_cache/image.h
+++ b/src/video_core/texture_cache/image.h
@@ -32,7 +32,6 @@ enum ImageFlagBits : u32 {
Registered = 1 << 6, ///< True when the image is registered
Picked = 1 << 7, ///< Temporary flag to mark the image as picked
MetaRegistered = 1 << 8, ///< True when metadata for this surface is known and registered
- Deleted = 1 << 9, ///< Indicates that images was marked for deletion once frame is done
};
DECLARE_ENUM_FLAG_OPERATORS(ImageFlagBits)
diff --git a/src/video_core/texture_cache/image_info.cpp b/src/video_core/texture_cache/image_info.cpp
index 66fde5c8..7d87fb66 100644
--- a/src/video_core/texture_cache/image_info.cpp
+++ b/src/video_core/texture_cache/image_info.cpp
@@ -205,7 +205,7 @@ ImageInfo::ImageInfo(const AmdGpu::Image& image, bool force_depth /*= false*/) n
pixel_format = LiverpoolToVK::SurfaceFormat(image.GetDataFmt(), image.GetNumberFmt());
// Override format if image is forced to be a depth target
if (force_depth || tiling_mode == AmdGpu::TilingMode::Depth_MacroTiled) {
- if (pixel_format == vk::Format::eR32Sfloat) {
+ if (pixel_format == vk::Format::eR32Sfloat || pixel_format == vk::Format::eR8Unorm) {
pixel_format = vk::Format::eD32SfloatS8Uint;
} else if (pixel_format == vk::Format::eR16Unorm) {
pixel_format = vk::Format::eD16UnormS8Uint;
diff --git a/src/video_core/texture_cache/image_view.cpp b/src/video_core/texture_cache/image_view.cpp
index e554bad7..bb2d9053 100644
--- a/src/video_core/texture_cache/image_view.cpp
+++ b/src/video_core/texture_cache/image_view.cpp
@@ -128,6 +128,10 @@ ImageView::ImageView(const Vulkan::Instance& instance, const ImageViewInfo& info
format = image.info.pixel_format;
aspect = vk::ImageAspectFlagBits::eDepth;
}
+ if (image.aspect_mask & vk::ImageAspectFlagBits::eStencil && format == vk::Format::eR8Unorm) {
+ format = image.info.pixel_format;
+ aspect = vk::ImageAspectFlagBits::eStencil;
+ }
const vk::ImageViewCreateInfo image_view_ci = {
.pNext = usage_override ? &usage_ci : nullptr,
diff --git a/src/video_core/texture_cache/texture_cache.cpp b/src/video_core/texture_cache/texture_cache.cpp
index 996fcad0..37bb5da1 100644
--- a/src/video_core/texture_cache/texture_cache.cpp
+++ b/src/video_core/texture_cache/texture_cache.cpp
@@ -40,17 +40,27 @@ TextureCache::~TextureCache() = default;
void TextureCache::InvalidateMemory(VAddr address, size_t size) {
std::scoped_lock lock{mutex};
ForEachImageInRegion(address, size, [&](ImageId image_id, Image& image) {
- const size_t image_dist =
- image.cpu_addr > address ? image.cpu_addr - address : address - image.cpu_addr;
- if (image_dist < MaxInvalidateDist) {
- // Ensure image is reuploaded when accessed again.
- image.flags |= ImageFlagBits::CpuModified;
- }
+ // Ensure image is reuploaded when accessed again.
+ image.flags |= ImageFlagBits::CpuModified;
// Untrack image, so the range is unprotected and the guest can write freely.
UntrackImage(image_id);
});
}
+void TextureCache::MarkWritten(VAddr address, size_t max_size) {
+ static constexpr FindFlags find_flags =
+ FindFlags::NoCreate | FindFlags::RelaxDim | FindFlags::RelaxFmt | FindFlags::RelaxSize;
+ ImageInfo info{};
+ info.guest_address = address;
+ info.guest_size_bytes = max_size;
+ const ImageId image_id = FindImage(info, find_flags);
+ if (!image_id) {
+ return;
+ }
+ // Ensure image is copied when accessed again.
+ slot_images[image_id].flags |= ImageFlagBits::CpuModified;
+}
+
void TextureCache::UnmapMemory(VAddr cpu_addr, size_t size) {
std::scoped_lock lk{mutex};
@@ -199,10 +209,14 @@ ImageId TextureCache::FindImage(const ImageInfo& info, FindFlags flags) {
!IsVulkanFormatCompatible(info.pixel_format, cache_image.info.pixel_format)) {
continue;
}
- ASSERT(cache_image.info.type == info.type);
+ ASSERT(cache_image.info.type == info.type || True(flags & FindFlags::RelaxFmt));
image_id = cache_id;
}
+ if (True(flags & FindFlags::NoCreate) && !image_id) {
+ return {};
+ }
+
// Try to resolve overlaps (if any)
if (!image_id) {
for (const auto& cache_id : image_ids) {
@@ -211,10 +225,6 @@ ImageId TextureCache::FindImage(const ImageInfo& info, FindFlags flags) {
}
}
- if (True(flags & FindFlags::NoCreate) && !image_id) {
- return {};
- }
-
// Create and register a new image
if (!image_id) {
image_id = slot_images.insert(instance, scheduler, info);
@@ -251,9 +261,6 @@ ImageView& TextureCache::RegisterImageView(ImageId image_id, const ImageViewInfo
ImageView& TextureCache::FindTexture(const ImageInfo& info, const ImageViewInfo& view_info) {
const ImageId image_id = FindImage(info);
Image& image = slot_images[image_id];
- if (view_info.is_storage) {
- image.flags |= ImageFlagBits::GpuModified;
- }
UpdateImage(image_id);
auto& usage = image.info.usage;
@@ -351,7 +358,6 @@ void TextureCache::RefreshImage(Image& image, Vulkan::Scheduler* custom_schedule
if (False(image.flags & ImageFlagBits::CpuModified)) {
return;
}
-
// Mark image as validated.
image.flags &= ~ImageFlagBits::CpuModified;
@@ -485,8 +491,6 @@ void TextureCache::DeleteImage(ImageId image_id) {
ASSERT_MSG(False(image.flags & ImageFlagBits::Tracked), "Image was not untracked");
ASSERT_MSG(False(image.flags & ImageFlagBits::Registered), "Image was not unregistered");
- image.flags |= ImageFlagBits::Deleted;
-
// Remove any registered meta areas.
const auto& meta_info = image.info.meta_info;
if (meta_info.cmask_addr) {
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 44bc2b43..cc19ac4a 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -50,6 +50,9 @@ public:
/// Invalidates any image in the logical page range.
void InvalidateMemory(VAddr address, size_t size);
+ /// Marks an image as dirty if it exists at the provided address.
+ void MarkWritten(VAddr address, size_t max_size);
+
/// Evicts any images that overlap the unmapped range.
void UnmapMemory(VAddr cpu_addr, size_t size);