diff options
Diffstat (limited to 'src/video_core')
| -rw-r--r-- | src/video_core/buffer_cache/buffer_cache.h | 3 | ||||
| -rw-r--r-- | src/video_core/macro/macro_jit_x64.cpp | 55 | ||||
| -rw-r--r-- | src/video_core/macro/macro_jit_x64.h | 1 | ||||
| -rw-r--r-- | src/video_core/memory_manager.cpp | 40 | ||||
| -rw-r--r-- | src/video_core/memory_manager.h | 12 | ||||
| -rw-r--r-- | src/video_core/query_cache.h | 10 | ||||
| -rw-r--r-- | src/video_core/renderer_opengl/gl_arb_decompiler.cpp | 63 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/renderer_vulkan.cpp | 4 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_rasterizer.cpp | 6 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_update_descriptor.cpp | 36 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_update_descriptor.h | 32 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/wrapper.cpp | 3 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/wrapper.h | 2 | ||||
| -rw-r--r-- | src/video_core/shader/memory_util.cpp | 4 | ||||
| -rw-r--r-- | src/video_core/shader_cache.h | 10 | ||||
| -rw-r--r-- | src/video_core/texture_cache/texture_cache.h | 2 |
16 files changed, 141 insertions, 142 deletions
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 308d8b55f..bae1d527c 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -47,7 +47,7 @@ public: bool is_written = false, bool use_fast_cbuf = false) { std::lock_guard lock{mutex}; - const auto& memory_manager = system.GPU().MemoryManager(); + auto& memory_manager = system.GPU().MemoryManager(); const std::optional<VAddr> cpu_addr_opt = memory_manager.GpuToCpuAddress(gpu_addr); if (!cpu_addr_opt) { return {GetEmptyBuffer(size), 0}; @@ -59,7 +59,6 @@ public: constexpr std::size_t max_stream_size = 0x800; if (use_fast_cbuf || size < max_stream_size) { if (!is_written && !IsRegionWritten(cpu_addr, cpu_addr + size - 1)) { - auto& memory_manager = system.GPU().MemoryManager(); const bool is_granular = memory_manager.IsGranularRange(gpu_addr, size); if (use_fast_cbuf) { u8* dest; diff --git a/src/video_core/macro/macro_jit_x64.cpp b/src/video_core/macro/macro_jit_x64.cpp index bee34a7c0..30abb66e5 100644 --- a/src/video_core/macro/macro_jit_x64.cpp +++ b/src/video_core/macro/macro_jit_x64.cpp @@ -54,7 +54,7 @@ void MacroJITx64Impl::Compile_ALU(Macro::Opcode opcode) { const bool is_a_zero = opcode.src_a == 0; const bool is_b_zero = opcode.src_b == 0; const bool valid_operation = !is_a_zero && !is_b_zero; - const bool is_move_operation = !is_a_zero && is_b_zero; + [[maybe_unused]] const bool is_move_operation = !is_a_zero && is_b_zero; const bool has_zero_register = is_a_zero || is_b_zero; const bool no_zero_reg_skip = opcode.alu_operation == Macro::ALUOperation::AddWithCarry || opcode.alu_operation == Macro::ALUOperation::SubtractWithBorrow; @@ -73,7 +73,6 @@ void MacroJITx64Impl::Compile_ALU(Macro::Opcode opcode) { src_b = Compile_GetRegister(opcode.src_b, eax); } } - Xbyak::Label skip_carry{}; bool has_emitted = false; @@ -240,10 +239,10 @@ void MacroJITx64Impl::Compile_ExtractInsert(Macro::Opcode opcode) { } void MacroJITx64Impl::Compile_ExtractShiftLeftImmediate(Macro::Opcode opcode) { - auto dst = Compile_GetRegister(opcode.src_a, eax); - auto src = Compile_GetRegister(opcode.src_b, RESULT); + const auto dst = Compile_GetRegister(opcode.src_a, ecx); + const auto src = Compile_GetRegister(opcode.src_b, RESULT); - shr(src, al); + shr(src, dst.cvt8()); if (opcode.bf_size != 0 && opcode.bf_size != 31) { and_(src, opcode.GetBitfieldMask()); } else if (opcode.bf_size == 0) { @@ -259,8 +258,8 @@ void MacroJITx64Impl::Compile_ExtractShiftLeftImmediate(Macro::Opcode opcode) { } void MacroJITx64Impl::Compile_ExtractShiftLeftRegister(Macro::Opcode opcode) { - auto dst = Compile_GetRegister(opcode.src_a, eax); - auto src = Compile_GetRegister(opcode.src_b, RESULT); + const auto dst = Compile_GetRegister(opcode.src_a, ecx); + const auto src = Compile_GetRegister(opcode.src_b, RESULT); if (opcode.bf_src_bit != 0) { shr(src, opcode.bf_src_bit); @@ -269,16 +268,9 @@ void MacroJITx64Impl::Compile_ExtractShiftLeftRegister(Macro::Opcode opcode) { if (opcode.bf_size != 31) { and_(src, opcode.GetBitfieldMask()); } - shl(src, al); - Compile_ProcessResult(opcode.result_operation, opcode.dst); -} + shl(src, dst.cvt8()); -static u32 Read(Engines::Maxwell3D* maxwell3d, u32 method) { - return maxwell3d->GetRegisterValue(method); -} - -static void Send(Engines::Maxwell3D* maxwell3d, Macro::MethodAddress method_address, u32 value) { - maxwell3d->CallMethodFromMME(method_address.address, value); + Compile_ProcessResult(opcode.result_operation, opcode.dst); } void MacroJITx64Impl::Compile_Read(Macro::Opcode opcode) { @@ -298,15 +290,27 @@ void MacroJITx64Impl::Compile_Read(Macro::Opcode opcode) { sub(result, opcode.immediate * -1); } } - Common::X64::ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0); - mov(Common::X64::ABI_PARAM1, qword[STATE]); - mov(Common::X64::ABI_PARAM2, RESULT); - Common::X64::CallFarFunction(*this, &Read); - Common::X64::ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0); - mov(RESULT, Common::X64::ABI_RETURN.cvt32()); + + // Equivalent to Engines::Maxwell3D::GetRegisterValue: + if (optimizer.enable_asserts) { + Xbyak::Label pass_range_check; + cmp(RESULT, static_cast<u32>(Engines::Maxwell3D::Regs::NUM_REGS)); + jb(pass_range_check); + int3(); + L(pass_range_check); + } + mov(rax, qword[STATE]); + mov(RESULT, + dword[rax + offsetof(Engines::Maxwell3D, regs) + + offsetof(Engines::Maxwell3D::Regs, reg_array) + RESULT.cvt64() * sizeof(u32)]); + Compile_ProcessResult(opcode.result_operation, opcode.dst); } +static void Send(Engines::Maxwell3D* maxwell3d, Macro::MethodAddress method_address, u32 value) { + maxwell3d->CallMethodFromMME(method_address.address, value); +} + void Tegra::MacroJITx64Impl::Compile_Send(Xbyak::Reg32 value) { Common::X64::ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0); mov(Common::X64::ABI_PARAM1, qword[STATE]); @@ -438,6 +442,9 @@ void MacroJITx64Impl::Compile() { // one if our register isn't "dirty" optimizer.optimize_for_method_move = true; + // Enable run-time assertions in JITted code + optimizer.enable_asserts = false; + // Check to see if we can skip emitting certain instructions Optimizer_ScanFlags(); @@ -546,7 +553,7 @@ Xbyak::Reg32 MacroJITx64Impl::Compile_GetRegister(u32 index, Xbyak::Reg32 dst) { } void MacroJITx64Impl::Compile_ProcessResult(Macro::ResultOperation operation, u32 reg) { - auto SetRegister = [=](u32 reg, Xbyak::Reg32 result) { + const auto SetRegister = [this](u32 reg, const Xbyak::Reg32& result) { // Register 0 is supposed to always return 0. NOP is implemented as a store to the zero // register. if (reg == 0) { @@ -554,7 +561,7 @@ void MacroJITx64Impl::Compile_ProcessResult(Macro::ResultOperation operation, u3 } mov(dword[STATE + offsetof(JITState, registers) + reg * sizeof(u32)], result); }; - auto SetMethodAddress = [=](Xbyak::Reg32 reg) { mov(METHOD_ADDRESS, reg); }; + const auto SetMethodAddress = [this](const Xbyak::Reg32& reg) { mov(METHOD_ADDRESS, reg); }; switch (operation) { case Macro::ResultOperation::IgnoreAndFetch: diff --git a/src/video_core/macro/macro_jit_x64.h b/src/video_core/macro/macro_jit_x64.h index 51ec090b8..a180e7428 100644 --- a/src/video_core/macro/macro_jit_x64.h +++ b/src/video_core/macro/macro_jit_x64.h @@ -76,6 +76,7 @@ private: bool zero_reg_skip{}; bool skip_dummy_addimmediate{}; bool optimize_for_method_move{}; + bool enable_asserts{}; }; OptimizerState optimizer{}; diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index dbee9f634..ff5505d12 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -210,10 +210,11 @@ bool MemoryManager::IsBlockContinuous(const GPUVAddr start, const std::size_t si return range == inner_size; } -void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, const std::size_t size) const { +void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, + const std::size_t size) const { std::size_t remaining_size{size}; - std::size_t page_index{src_addr >> page_bits}; - std::size_t page_offset{src_addr & page_mask}; + std::size_t page_index{gpu_src_addr >> page_bits}; + std::size_t page_offset{gpu_src_addr & page_mask}; auto& memory = system.Memory(); @@ -234,11 +235,11 @@ void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, const std::s } } -void MemoryManager::ReadBlockUnsafe(GPUVAddr src_addr, void* dest_buffer, +void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer, const std::size_t size) const { std::size_t remaining_size{size}; - std::size_t page_index{src_addr >> page_bits}; - std::size_t page_offset{src_addr & page_mask}; + std::size_t page_index{gpu_src_addr >> page_bits}; + std::size_t page_offset{gpu_src_addr & page_mask}; auto& memory = system.Memory(); @@ -259,10 +260,11 @@ void MemoryManager::ReadBlockUnsafe(GPUVAddr src_addr, void* dest_buffer, } } -void MemoryManager::WriteBlock(GPUVAddr dest_addr, const void* src_buffer, const std::size_t size) { +void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, + const std::size_t size) { std::size_t remaining_size{size}; - std::size_t page_index{dest_addr >> page_bits}; - std::size_t page_offset{dest_addr & page_mask}; + std::size_t page_index{gpu_dest_addr >> page_bits}; + std::size_t page_offset{gpu_dest_addr & page_mask}; auto& memory = system.Memory(); @@ -283,11 +285,11 @@ void MemoryManager::WriteBlock(GPUVAddr dest_addr, const void* src_buffer, const } } -void MemoryManager::WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer, +void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer, const std::size_t size) { std::size_t remaining_size{size}; - std::size_t page_index{dest_addr >> page_bits}; - std::size_t page_offset{dest_addr & page_mask}; + std::size_t page_index{gpu_dest_addr >> page_bits}; + std::size_t page_offset{gpu_dest_addr & page_mask}; auto& memory = system.Memory(); @@ -306,16 +308,18 @@ void MemoryManager::WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer, } } -void MemoryManager::CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, const std::size_t size) { +void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, + const std::size_t size) { std::vector<u8> tmp_buffer(size); - ReadBlock(src_addr, tmp_buffer.data(), size); - WriteBlock(dest_addr, tmp_buffer.data(), size); + ReadBlock(gpu_src_addr, tmp_buffer.data(), size); + WriteBlock(gpu_dest_addr, tmp_buffer.data(), size); } -void MemoryManager::CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, const std::size_t size) { +void MemoryManager::CopyBlockUnsafe(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, + const std::size_t size) { std::vector<u8> tmp_buffer(size); - ReadBlockUnsafe(src_addr, tmp_buffer.data(), size); - WriteBlockUnsafe(dest_addr, tmp_buffer.data(), size); + ReadBlockUnsafe(gpu_src_addr, tmp_buffer.data(), size); + WriteBlockUnsafe(gpu_dest_addr, tmp_buffer.data(), size); } bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) { diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 0ddd52d5a..87658e87a 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -79,9 +79,9 @@ public: * in the Host Memory counterpart. Note: This functions cause Host GPU Memory * Flushes and Invalidations, respectively to each operation. */ - void ReadBlock(GPUVAddr src_addr, void* dest_buffer, std::size_t size) const; - void WriteBlock(GPUVAddr dest_addr, const void* src_buffer, std::size_t size); - void CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size); + void ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const; + void WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size); + void CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size); /** * ReadBlockUnsafe and WriteBlockUnsafe are special versions of ReadBlock and @@ -93,9 +93,9 @@ public: * WriteBlockUnsafe instead of WriteBlock since it shouldn't invalidate the texture * being flushed. */ - void ReadBlockUnsafe(GPUVAddr src_addr, void* dest_buffer, std::size_t size) const; - void WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer, std::size_t size); - void CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size); + void ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const; + void WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size); + void CopyBlockUnsafe(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size); /** * IsGranularRange checks if a gpu region can be simply read with a pointer diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h index 2f75f8801..e12dab899 100644 --- a/src/video_core/query_cache.h +++ b/src/video_core/query_cache.h @@ -220,8 +220,8 @@ private: return cache_begin < addr_end && addr_begin < cache_end; }; - const u64 page_end = addr_end >> PAGE_SHIFT; - for (u64 page = addr_begin >> PAGE_SHIFT; page <= page_end; ++page) { + const u64 page_end = addr_end >> PAGE_BITS; + for (u64 page = addr_begin >> PAGE_BITS; page <= page_end; ++page) { const auto& it = cached_queries.find(page); if (it == std::end(cached_queries)) { continue; @@ -242,14 +242,14 @@ private: /// Registers the passed parameters as cached and returns a pointer to the stored cached query. CachedQuery* Register(VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr, bool timestamp) { rasterizer.UpdatePagesCachedCount(cpu_addr, CachedQuery::SizeInBytes(timestamp), 1); - const u64 page = static_cast<u64>(cpu_addr) >> PAGE_SHIFT; + const u64 page = static_cast<u64>(cpu_addr) >> PAGE_BITS; return &cached_queries[page].emplace_back(static_cast<QueryCache&>(*this), type, cpu_addr, host_ptr); } /// Tries to a get a cached query. Returns nullptr on failure. CachedQuery* TryGet(VAddr addr) { - const u64 page = static_cast<u64>(addr) >> PAGE_SHIFT; + const u64 page = static_cast<u64>(addr) >> PAGE_BITS; const auto it = cached_queries.find(page); if (it == std::end(cached_queries)) { return nullptr; @@ -268,7 +268,7 @@ private: } static constexpr std::uintptr_t PAGE_SIZE = 4096; - static constexpr unsigned PAGE_SHIFT = 12; + static constexpr unsigned PAGE_BITS = 12; Core::System& system; VideoCore::RasterizerInterface& rasterizer; diff --git a/src/video_core/renderer_opengl/gl_arb_decompiler.cpp b/src/video_core/renderer_opengl/gl_arb_decompiler.cpp index 1e96b0310..eb5158407 100644 --- a/src/video_core/renderer_opengl/gl_arb_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_arb_decompiler.cpp @@ -281,14 +281,14 @@ private: template <const std::string_view& op> std::string Unary(Operation operation) { - const std::string temporary = AllocTemporary(); + std::string temporary = AllocTemporary(); AddLine("{}{} {}, {};", op, Modifiers(operation), temporary, Visit(operation[0])); return temporary; } template <const std::string_view& op> std::string Binary(Operation operation) { - const std::string temporary = AllocTemporary(); + std::string temporary = AllocTemporary(); AddLine("{}{} {}, {}, {};", op, Modifiers(operation), temporary, Visit(operation[0]), Visit(operation[1])); return temporary; @@ -296,7 +296,7 @@ private: template <const std::string_view& op> std::string Trinary(Operation operation) { - const std::string temporary = AllocTemporary(); + std::string temporary = AllocTemporary(); AddLine("{}{} {}, {}, {}, {};", op, Modifiers(operation), temporary, Visit(operation[0]), Visit(operation[1]), Visit(operation[2])); return temporary; @@ -304,7 +304,7 @@ private: template <const std::string_view& op, bool unordered> std::string FloatComparison(Operation operation) { - const std::string temporary = AllocTemporary(); + std::string temporary = AllocTemporary(); AddLine("TRUNC.U.CC RC.x, {};", Binary<op>(operation)); AddLine("MOV.S {}, 0;", temporary); AddLine("MOV.S {} (NE.x), -1;", temporary); @@ -331,7 +331,7 @@ private: template <const std::string_view& op, bool is_nan> std::string HalfComparison(Operation operation) { - const std::string tmp1 = AllocVectorTemporary(); + std::string tmp1 = AllocVectorTemporary(); const std::string tmp2 = AllocVectorTemporary(); const std::string op_a = Visit(operation[0]); const std::string op_b = Visit(operation[1]); @@ -367,15 +367,14 @@ private: AddLine("MOV.F {}.{}, {};", value, Swizzle(i), Visit(meta.values[i])); } - const std::string result = coord; - AddLine("ATOMIM.{}.{} {}.x, {}, {}, image[{}], {};", op, type, result, value, coord, + AddLine("ATOMIM.{}.{} {}.x, {}, {}, image[{}], {};", op, type, coord, value, coord, image_id, ImageType(meta.image.type)); - return fmt::format("{}.x", result); + return fmt::format("{}.x", coord); } template <const std::string_view& op, const std::string_view& type> std::string Atomic(Operation operation) { - const std::string temporary = AllocTemporary(); + std::string temporary = AllocTemporary(); std::string address; std::string_view opname; if (const auto gmem = std::get_if<GmemNode>(&*operation[0])) { @@ -396,7 +395,7 @@ private: template <char type> std::string Negate(Operation operation) { - const std::string temporary = AllocTemporary(); + std::string temporary = AllocTemporary(); if constexpr (type == 'F') { AddLine("MOV.F32 {}, -{};", temporary, Visit(operation[0])); } else { @@ -407,7 +406,7 @@ private: template <char type> std::string Absolute(Operation operation) { - const std::string temporary = AllocTemporary(); + std::string temporary = AllocTemporary(); AddLine("MOV.{} {}, |{}|;", type, temporary, Visit(operation[0])); return temporary; } @@ -1156,20 +1155,20 @@ void ARBDecompiler::VisitAST(const ASTNode& node) { } std::string ARBDecompiler::VisitExpression(const Expr& node) { - const std::string result = AllocTemporary(); if (const auto expr = std::get_if<ExprAnd>(&*node)) { + std::string result = AllocTemporary(); AddLine("AND.U {}, {}, {};", result, VisitExpression(expr->operand1), VisitExpression(expr->operand2)); return result; } if (const auto expr = std::get_if<ExprOr>(&*node)) { - const std::string result = AllocTemporary(); + std::string result = AllocTemporary(); AddLine("OR.U {}, {}, {};", result, VisitExpression(expr->operand1), VisitExpression(expr->operand2)); return result; } if (const auto expr = std::get_if<ExprNot>(&*node)) { - const std::string result = AllocTemporary(); + std::string result = AllocTemporary(); AddLine("CMP.S {}, {}, 0, -1;", result, VisitExpression(expr->operand1)); return result; } @@ -1186,7 +1185,7 @@ std::string ARBDecompiler::VisitExpression(const Expr& node) { return expr->value ? "0xffffffff" : "0"; } if (const auto expr = std::get_if<ExprGprEqual>(&*node)) { - const std::string result = AllocTemporary(); + std::string result = AllocTemporary(); AddLine("SEQ.U {}, R{}.x, {};", result, expr->gpr, expr->value); return result; } @@ -1231,13 +1230,13 @@ std::string ARBDecompiler::Visit(const Node& node) { } if (const auto immediate = std::get_if<ImmediateNode>(&*node)) { - const std::string temporary = AllocTemporary(); + std::string temporary = AllocTemporary(); AddLine("MOV.U {}, {};", temporary, immediate->GetValue()); return temporary; } if (const auto predicate = std::get_if<PredicateNode>(&*node)) { - const std::string temporary = AllocTemporary(); + std::string temporary = AllocTemporary(); switch (const auto index = predicate->GetIndex(); index) { case Tegra::Shader::Pred::UnusedIndex: AddLine("MOV.S {}, -1;", temporary); @@ -1333,13 +1332,13 @@ std::string ARBDecompiler::Visit(const Node& node) { } else { offset_string = Visit(offset); } - const std::string temporary = AllocTemporary(); + std::string temporary = AllocTemporary(); AddLine("LDC.F32 {}, cbuf{}[{}];", temporary, cbuf->GetIndex(), offset_string); return temporary; } if (const auto gmem = std::get_if<GmemNode>(&*node)) { - const std::string temporary = AllocTemporary(); + std::string temporary = AllocTemporary(); AddLine("SUB.U {}, {}, {};", temporary, Visit(gmem->GetRealAddress()), Visit(gmem->GetBaseAddress())); AddLine("LDB.U32 {}, {}[{}];", temporary, GlobalMemoryName(gmem->GetDescriptor()), @@ -1348,14 +1347,14 @@ std::string ARBDecompiler::Visit(const Node& node) { } if (const auto lmem = std::get_if<LmemNode>(&*node)) { - const std::string temporary = Visit(lmem->GetAddress()); + std::string temporary = Visit(lmem->GetAddress()); AddLine("SHR.U {}, {}, 2;", temporary, temporary); AddLine("MOV.U {}, lmem[{}].x;", temporary, temporary); return temporary; } if (const auto smem = std::get_if<SmemNode>(&*node)) { - const std::string temporary = Visit(smem->GetAddress()); + std::string temporary = Visit(smem->GetAddress()); AddLine("LDS.U32 {}, shared_mem[{}];", temporary, temporary); return temporary; } @@ -1535,7 +1534,7 @@ std::string ARBDecompiler::Assign(Operation operation) { } std::string ARBDecompiler::Select(Operation operation) { - const std::string temporary = AllocTemporary(); + std::string temporary = AllocTemporary(); AddLine("CMP.S {}, {}, {}, {};", temporary, Visit(operation[0]), Visit(operation[1]), Visit(operation[2])); return temporary; @@ -1545,12 +1544,12 @@ std::string ARBDecompiler::FClamp(Operation operation) { // 1.0f in hex, replace with std::bit_cast on C++20 static constexpr u32 POSITIVE_ONE = 0x3f800000; - const std::string temporary = AllocTemporary(); + std::string temporary = AllocTemporary(); const Node& value = operation[0]; const Node& low = operation[1]; const Node& high = operation[2]; - const auto imm_low = std::get_if<ImmediateNode>(&*low); - const auto imm_high = std::get_if<ImmediateNode>(&*high); + const auto* const imm_low = std::get_if<ImmediateNode>(&*low); + const auto* const imm_high = std::get_if<ImmediateNode>(&*high); if (imm_low && imm_high && imm_low->GetValue() == 0 && imm_high->GetValue() == POSITIVE_ONE) { AddLine("MOV.F32.SAT {}, {};", temporary, Visit(value)); } else { @@ -1574,7 +1573,7 @@ std::string ARBDecompiler::FCastHalf1(Operation operation) { } std::string ARBDecompiler::FSqrt(Operation operation) { - const std::string temporary = AllocTemporary(); + std::string temporary = AllocTemporary(); AddLine("RSQ.F32 {}, {};", temporary, Visit(operation[0])); AddLine("RCP.F32 {}, {};", temporary, temporary); return temporary; @@ -1588,7 +1587,7 @@ std::string ARBDecompiler::FSwizzleAdd(Operation operation) { AddLine("ADD.F {}.x, {}, {};", temporary, Visit(operation[0]), Visit(operation[1])); return fmt::format("{}.x", temporary); } - const std::string lut = AllocVectorTemporary(); + AddLine("AND.U {}.z, {}.threadid, 3;", temporary, StageInputName(stage)); AddLine("SHL.U {}.z, {}.z, 1;", temporary, temporary); AddLine("SHR.U {}.z, {}, {}.z;", temporary, Visit(operation[2]), temporary); @@ -1766,21 +1765,21 @@ std::string ARBDecompiler::LogicalAssign(Operation operation) { } std::string ARBDecompiler::LogicalPick2(Operation operation) { - const std::string temporary = AllocTemporary(); + std::string temporary = AllocTemporary(); const u32 index = std::get<ImmediateNode>(*operation[1]).GetValue(); AddLine("MOV.U {}, {}.{};", temporary, Visit(operation[0]), Swizzle(index)); return temporary; } std::string ARBDecompiler::LogicalAnd2(Operation operation) { - const std::string temporary = AllocTemporary(); + std::string temporary = AllocTemporary(); const std::string op = Visit(operation[0]); AddLine("AND.U {}, {}.x, {}.y;", temporary, op, op); return temporary; } std::string ARBDecompiler::FloatOrdered(Operation operation) { - const std::string temporary = AllocTemporary(); + std::string temporary = AllocTemporary(); AddLine("MOVC.F32 RC.x, {};", Visit(operation[0])); AddLine("MOVC.F32 RC.y, {};", Visit(operation[1])); AddLine("MOV.S {}, -1;", temporary); @@ -1790,7 +1789,7 @@ std::string ARBDecompiler::FloatOrdered(Operation operation) { } std::string ARBDecompiler::FloatUnordered(Operation operation) { - const std::string temporary = AllocTemporary(); + std::string temporary = AllocTemporary(); AddLine("MOVC.F32 RC.x, {};", Visit(operation[0])); AddLine("MOVC.F32 RC.y, {};", Visit(operation[1])); AddLine("MOV.S {}, 0;", temporary); @@ -1800,7 +1799,7 @@ std::string ARBDecompiler::FloatUnordered(Operation operation) { } std::string ARBDecompiler::LogicalAddCarry(Operation operation) { - const std::string temporary = AllocTemporary(); + std::string temporary = AllocTemporary(); AddLine("ADDC.U RC, {}, {};", Visit(operation[0]), Visit(operation[1])); AddLine("MOV.S {}, 0;", temporary); AddLine("IF CF.x;"); diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index 59b441943..cd9673d1f 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp @@ -13,6 +13,7 @@ #include <fmt/format.h> #include "common/dynamic_library.h" +#include "common/file_util.h" #include "common/logging/log.h" #include "common/telemetry.h" #include "core/core.h" @@ -76,7 +77,8 @@ Common::DynamicLibrary OpenVulkanLibrary() { char* libvulkan_env = getenv("LIBVULKAN_PATH"); if (!libvulkan_env || !library.Open(libvulkan_env)) { // Use the libvulkan.dylib from the application bundle. - std::string filename = File::GetBundleDirectory() + "/Contents/Frameworks/libvulkan.dylib"; + const std::string filename = + FileUtil::GetBundleDirectory() + "/Contents/Frameworks/libvulkan.dylib"; library.Open(filename.c_str()); } #else diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 184b2238a..29001953c 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -870,7 +870,7 @@ void RasterizerVulkan::BeginTransformFeedback() { UNIMPLEMENTED_IF(binding.buffer_offset != 0); const GPUVAddr gpu_addr = binding.Address(); - const std::size_t size = binding.buffer_size; + const auto size = static_cast<VkDeviceSize>(binding.buffer_size); const auto [buffer, offset] = buffer_cache.UploadMemory(gpu_addr, size, 4, true); scheduler.Record([buffer = buffer, offset = offset, size](vk::CommandBuffer cmdbuf) { @@ -1154,7 +1154,7 @@ void RasterizerVulkan::SetupTexture(const Tegra::Texture::FullTextureInfo& textu const auto sampler = sampler_cache.GetSampler(texture.tsc); update_descriptor_queue.AddSampledImage(sampler, image_view); - const auto image_layout = update_descriptor_queue.GetLastImageLayout(); + VkImageLayout* const image_layout = update_descriptor_queue.LastImageLayout(); *image_layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; sampled_views.push_back(ImageView{std::move(view), image_layout}); } @@ -1180,7 +1180,7 @@ void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const Ima view->GetImageView(tic.x_source, tic.y_source, tic.z_source, tic.w_source); update_descriptor_queue.AddImage(image_view); - const auto image_layout = update_descriptor_queue.GetLastImageLayout(); + VkImageLayout* const image_layout = update_descriptor_queue.LastImageLayout(); *image_layout = VK_IMAGE_LAYOUT_GENERAL; image_views.push_back(ImageView{std::move(view), image_layout}); } diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp index 681ecde98..351c048d2 100644 --- a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp +++ b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp @@ -24,35 +24,25 @@ void VKUpdateDescriptorQueue::TickFrame() { } void VKUpdateDescriptorQueue::Acquire() { - entries.clear(); -} + // Minimum number of entries required. + // This is the maximum number of entries a single draw call migth use. + static constexpr std::size_t MIN_ENTRIES = 0x400; -void VKUpdateDescriptorQueue::Send(VkDescriptorUpdateTemplateKHR update_template, - VkDescriptorSet set) { - if (payload.size() + entries.size() >= payload.max_size()) { + if (payload.size() + MIN_ENTRIES >= payload.max_size()) { LOG_WARNING(Render_Vulkan, "Payload overflow, waiting for worker thread"); scheduler.WaitWorker(); payload.clear(); } + upload_start = &*payload.end(); +} - // TODO(Rodrigo): Rework to write the payload directly - const auto payload_start = payload.data() + payload.size(); - for (const auto& entry : entries) { - if (const auto image = std::get_if<VkDescriptorImageInfo>(&entry)) { - payload.push_back(*image); - } else if (const auto buffer = std::get_if<VkDescriptorBufferInfo>(&entry)) { - payload.push_back(*buffer); - } else if (const auto texel = std::get_if<VkBufferView>(&entry)) { - payload.push_back(*texel); - } else { - UNREACHABLE(); - } - } - - scheduler.Record( - [payload_start, set, update_template, logical = &device.GetLogical()](vk::CommandBuffer) { - logical->UpdateDescriptorSet(set, update_template, payload_start); - }); +void VKUpdateDescriptorQueue::Send(VkDescriptorUpdateTemplateKHR update_template, + VkDescriptorSet set) { + const void* const data = upload_start; + const vk::Device* const logical = &device.GetLogical(); + scheduler.Record([data, logical, set, update_template](vk::CommandBuffer) { + logical->UpdateDescriptorSet(set, update_template, data); + }); } } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.h b/src/video_core/renderer_vulkan/vk_update_descriptor.h index cc7e3dff4..945320c72 100644 --- a/src/video_core/renderer_vulkan/vk_update_descriptor.h +++ b/src/video_core/renderer_vulkan/vk_update_descriptor.h @@ -15,17 +15,13 @@ namespace Vulkan { class VKDevice; class VKScheduler; -class DescriptorUpdateEntry { -public: - explicit DescriptorUpdateEntry() {} - - DescriptorUpdateEntry(VkDescriptorImageInfo image) : image{image} {} +struct DescriptorUpdateEntry { + DescriptorUpdateEntry(VkDescriptorImageInfo image_) : image{image_} {} - DescriptorUpdateEntry(VkDescriptorBufferInfo buffer) : buffer{buffer} {} + DescriptorUpdateEntry(VkDescriptorBufferInfo buffer_) : buffer{buffer_} {} - DescriptorUpdateEntry(VkBufferView texel_buffer) : texel_buffer{texel_buffer} {} + DescriptorUpdateEntry(VkBufferView texel_buffer_) : texel_buffer{texel_buffer_} {} -private: union { VkDescriptorImageInfo image; VkDescriptorBufferInfo buffer; @@ -45,32 +41,34 @@ public: void Send(VkDescriptorUpdateTemplateKHR update_template, VkDescriptorSet set); void AddSampledImage(VkSampler sampler, VkImageView image_view) { - entries.emplace_back(VkDescriptorImageInfo{sampler, image_view, {}}); + payload.emplace_back(VkDescriptorImageInfo{sampler, image_view, {}}); } void AddImage(VkImageView image_view) { - entries.emplace_back(VkDescriptorImageInfo{{}, image_view, {}}); + payload.emplace_back(VkDescriptorImageInfo{{}, image_view, {}}); } void AddBuffer(VkBuffer buffer, u64 offset, std::size_t size) { - entries.emplace_back(VkDescriptorBufferInfo{buffer, offset, size}); + payload.emplace_back(VkDescriptorBufferInfo{buffer, offset, size}); } void AddTexelBuffer(VkBufferView texel_buffer) { - entries.emplace_back(texel_buffer); + payload.emplace_back(texel_buffer); } - VkImageLayout* GetLastImageLayout() { - return &std::get<VkDescriptorImageInfo>(entries.back()).imageLayout; + VkImageLayout* LastImageLayout() { + return &payload.back().image.imageLayout; } -private: - using Variant = std::variant<VkDescriptorImageInfo, VkDescriptorBufferInfo, VkBufferView>; + const VkImageLayout* LastImageLayout() const { + return &payload.back().image.imageLayout; + } +private: const VKDevice& device; VKScheduler& scheduler; - boost::container::static_vector<Variant, 0x400> entries; + const DescriptorUpdateEntry* upload_start = nullptr; boost::container::static_vector<DescriptorUpdateEntry, 0x10000> payload; }; diff --git a/src/video_core/renderer_vulkan/wrapper.cpp b/src/video_core/renderer_vulkan/wrapper.cpp index 2ce9b0626..42eff85d3 100644 --- a/src/video_core/renderer_vulkan/wrapper.cpp +++ b/src/video_core/renderer_vulkan/wrapper.cpp @@ -725,8 +725,7 @@ bool PhysicalDevice::GetSurfaceSupportKHR(u32 queue_family_index, VkSurfaceKHR s return supported == VK_TRUE; } -VkSurfaceCapabilitiesKHR PhysicalDevice::GetSurfaceCapabilitiesKHR(VkSurfaceKHR surface) const - noexcept { +VkSurfaceCapabilitiesKHR PhysicalDevice::GetSurfaceCapabilitiesKHR(VkSurfaceKHR surface) const { VkSurfaceCapabilitiesKHR capabilities; Check(dld->vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device, surface, &capabilities)); return capabilities; diff --git a/src/video_core/renderer_vulkan/wrapper.h b/src/video_core/renderer_vulkan/wrapper.h index 98937a77a..da42ca88e 100644 --- a/src/video_core/renderer_vulkan/wrapper.h +++ b/src/video_core/renderer_vulkan/wrapper.h @@ -779,7 +779,7 @@ public: bool GetSurfaceSupportKHR(u32 queue_family_index, VkSurfaceKHR) const; - VkSurfaceCapabilitiesKHR GetSurfaceCapabilitiesKHR(VkSurfaceKHR) const noexcept; + VkSurfaceCapabilitiesKHR GetSurfaceCapabilitiesKHR(VkSurfaceKHR) const; std::vector<VkSurfaceFormatKHR> GetSurfaceFormatsKHR(VkSurfaceKHR) const; diff --git a/src/video_core/shader/memory_util.cpp b/src/video_core/shader/memory_util.cpp index 074f21691..5071c83ca 100644 --- a/src/video_core/shader/memory_util.cpp +++ b/src/video_core/shader/memory_util.cpp @@ -66,12 +66,12 @@ ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, GPUVAddr gpu_add u64 GetUniqueIdentifier(Tegra::Engines::ShaderType shader_type, bool is_a, const ProgramCode& code, const ProgramCode& code_b) { - u64 unique_identifier = boost::hash_value(code); + size_t unique_identifier = boost::hash_value(code); if (is_a) { // VertexA programs include two programs boost::hash_combine(unique_identifier, boost::hash_value(code_b)); } - return unique_identifier; + return static_cast<u64>(unique_identifier); } } // namespace VideoCommon::Shader diff --git a/src/video_core/shader_cache.h b/src/video_core/shader_cache.h index a23c23886..2dd270e99 100644 --- a/src/video_core/shader_cache.h +++ b/src/video_core/shader_cache.h @@ -19,7 +19,7 @@ namespace VideoCommon { template <class T> class ShaderCache { - static constexpr u64 PAGE_SHIFT = 14; + static constexpr u64 PAGE_BITS = 14; struct Entry { VAddr addr_start; @@ -87,8 +87,8 @@ protected: const VAddr addr_end = addr + size; Entry* const entry = NewEntry(addr, addr_end, data.get()); - const u64 page_end = addr_end >> PAGE_SHIFT; - for (u64 page = addr >> PAGE_SHIFT; page <= page_end; ++page) { + const u64 page_end = addr_end >> PAGE_BITS; + for (u64 page = addr >> PAGE_BITS; page <= page_end; ++page) { invalidation_cache[page].push_back(entry); } @@ -108,8 +108,8 @@ private: /// @pre invalidation_mutex is locked void InvalidatePagesInRegion(VAddr addr, std::size_t size) { const VAddr addr_end = addr + size; - const u64 page_end = addr_end >> PAGE_SHIFT; - for (u64 page = addr >> PAGE_SHIFT; page <= page_end; ++page) { + const u64 page_end = addr_end >> PAGE_BITS; + for (u64 page = addr >> PAGE_BITS; page <= page_end; ++page) { const auto it = invalidation_cache.find(page); if (it == invalidation_cache.end()) { continue; diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index b543fc8c0..85075e868 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -1053,7 +1053,7 @@ private: void DeduceBestBlit(SurfaceParams& src_params, SurfaceParams& dst_params, const GPUVAddr src_gpu_addr, const GPUVAddr dst_gpu_addr) { auto deduced_src = DeduceSurface(src_gpu_addr, src_params); - auto deduced_dst = DeduceSurface(src_gpu_addr, src_params); + auto deduced_dst = DeduceSurface(dst_gpu_addr, dst_params); if (deduced_src.Failed() || deduced_dst.Failed()) { return; } |
