diff options
Diffstat (limited to 'src/video_core')
24 files changed, 1068 insertions, 296 deletions
diff --git a/src/video_core/engines/fermi_2d.cpp b/src/video_core/engines/fermi_2d.cpp index 597b279b9..74e44c7fe 100644 --- a/src/video_core/engines/fermi_2d.cpp +++ b/src/video_core/engines/fermi_2d.cpp @@ -47,9 +47,12 @@ void Fermi2D::HandleSurfaceCopy() { u32 dst_bytes_per_pixel = RenderTargetBytesPerPixel(regs.dst.format); if (!rasterizer.AccelerateSurfaceCopy(regs.src, regs.dst)) { - // TODO(bunnei): The below implementation currently will not get hit, as - // AccelerateSurfaceCopy tries to always copy and will always return success. This should be - // changed once we properly support flushing. + rasterizer.FlushRegion(source_cpu, src_bytes_per_pixel * regs.src.width * regs.src.height); + // We have to invalidate the destination region to evict any outdated surfaces from the + // cache. We do this before actually writing the new data because the destination address + // might contain a dirty surface that will have to be written back to memory. + rasterizer.InvalidateRegion(dest_cpu, + dst_bytes_per_pixel * regs.dst.width * regs.dst.height); if (regs.src.linear == regs.dst.linear) { // If the input layout and the output layout are the same, just perform a raw copy. diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp index 66ae6332d..585290d9f 100644 --- a/src/video_core/engines/kepler_memory.cpp +++ b/src/video_core/engines/kepler_memory.cpp @@ -5,10 +5,14 @@ #include "common/logging/log.h" #include "core/memory.h" #include "video_core/engines/kepler_memory.h" +#include "video_core/rasterizer_interface.h" namespace Tegra::Engines { -KeplerMemory::KeplerMemory(MemoryManager& memory_manager) : memory_manager(memory_manager) {} +KeplerMemory::KeplerMemory(VideoCore::RasterizerInterface& rasterizer, + MemoryManager& memory_manager) + : memory_manager(memory_manager), rasterizer{rasterizer} {} + KeplerMemory::~KeplerMemory() = default; void KeplerMemory::WriteReg(u32 method, u32 value) { @@ -37,6 +41,11 @@ void KeplerMemory::ProcessData(u32 data) { VAddr dest_address = *memory_manager.GpuToCpuAddress(address + state.write_offset * sizeof(u32)); + // We have to invalidate the destination region to evict any outdated surfaces from the cache. + // We do this before actually writing the new data because the destination address might contain + // a dirty surface that will have to be written back to memory. + rasterizer.InvalidateRegion(dest_address, sizeof(u32)); + Memory::Write32(dest_address, data); state.write_offset++; diff --git a/src/video_core/engines/kepler_memory.h b/src/video_core/engines/kepler_memory.h index b0d0078cf..bf4a13cff 100644 --- a/src/video_core/engines/kepler_memory.h +++ b/src/video_core/engines/kepler_memory.h @@ -11,6 +11,10 @@ #include "common/common_types.h" #include "video_core/memory_manager.h" +namespace VideoCore { +class RasterizerInterface; +} + namespace Tegra::Engines { #define KEPLERMEMORY_REG_INDEX(field_name) \ @@ -18,7 +22,7 @@ namespace Tegra::Engines { class KeplerMemory final { public: - KeplerMemory(MemoryManager& memory_manager); + KeplerMemory(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager); ~KeplerMemory(); /// Write the value to the register identified by method. @@ -72,6 +76,7 @@ public: private: MemoryManager& memory_manager; + VideoCore::RasterizerInterface& rasterizer; void ProcessData(u32 data); }; diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index 8afd26fe9..27ef865a2 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -3,6 +3,7 @@ // Refer to the license.txt file included. #include <cinttypes> +#include <cstring> #include "common/assert.h" #include "core/core.h" #include "core/core_timing.h" @@ -13,14 +14,30 @@ #include "video_core/renderer_base.h" #include "video_core/textures/texture.h" -namespace Tegra { -namespace Engines { +namespace Tegra::Engines { /// First register id that is actually a Macro call. constexpr u32 MacroRegistersStart = 0xE00; Maxwell3D::Maxwell3D(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager) - : memory_manager(memory_manager), rasterizer{rasterizer}, macro_interpreter(*this) {} + : memory_manager(memory_manager), rasterizer{rasterizer}, macro_interpreter(*this) { + InitializeRegisterDefaults(); +} + +void Maxwell3D::InitializeRegisterDefaults() { + // Initializes registers to their default values - what games expect them to be at boot. This is + // for certain registers that may not be explicitly set by games. + + // Reset all registers to zero + std::memset(®s, 0, sizeof(regs)); + + // Depth range near/far is not always set, but is expected to be the default 0.0f, 1.0f. This is + // needed for ARMS. + for (std::size_t viewport{}; viewport < Regs::NumViewports; ++viewport) { + regs.viewport[viewport].depth_range_near = 0.0f; + regs.viewport[viewport].depth_range_far = 1.0f; + } +} void Maxwell3D::CallMacroMethod(u32 method, std::vector<u32> parameters) { // Reset the current macro. @@ -156,7 +173,6 @@ void Maxwell3D::ProcessQueryGet() { ASSERT_MSG(regs.query.query_get.unit == Regs::QueryUnit::Crop, "Units other than CROP are unimplemented"); - u32 value = Memory::Read32(*address); u64 result = 0; // TODO(Subv): Support the other query variables @@ -408,5 +424,4 @@ void Maxwell3D::ProcessClearBuffers() { rasterizer.Clear(); } -} // namespace Engines -} // namespace Tegra +} // namespace Tegra::Engines diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h index c8af1c6b6..754a149fa 100644 --- a/src/video_core/engines/maxwell_3d.h +++ b/src/video_core/engines/maxwell_3d.h @@ -643,8 +643,10 @@ public: u32 d3d_cull_mode; ComparisonOp depth_test_func; + float alpha_test_ref; + ComparisonOp alpha_test_func; - INSERT_PADDING_WORDS(0xB); + INSERT_PADDING_WORDS(0x9); struct { u32 separate_alpha; @@ -982,6 +984,8 @@ public: Texture::FullTextureInfo GetStageTexture(Regs::ShaderStage stage, std::size_t offset) const; private: + void InitializeRegisterDefaults(); + VideoCore::RasterizerInterface& rasterizer; std::unordered_map<u32, std::vector<u32>> uploaded_macros; diff --git a/src/video_core/engines/maxwell_compute.cpp b/src/video_core/engines/maxwell_compute.cpp index 59e28b22d..8b5f08351 100644 --- a/src/video_core/engines/maxwell_compute.cpp +++ b/src/video_core/engines/maxwell_compute.cpp @@ -6,8 +6,7 @@ #include "core/core.h" #include "video_core/engines/maxwell_compute.h" -namespace Tegra { -namespace Engines { +namespace Tegra::Engines { void MaxwellCompute::WriteReg(u32 method, u32 value) { ASSERT_MSG(method < Regs::NUM_REGS, @@ -26,5 +25,4 @@ void MaxwellCompute::WriteReg(u32 method, u32 value) { } } -} // namespace Engines -} // namespace Tegra +} // namespace Tegra::Engines diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index bf2a21bb6..b8a78cf82 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp @@ -4,12 +4,13 @@ #include "core/memory.h" #include "video_core/engines/maxwell_dma.h" +#include "video_core/rasterizer_interface.h" #include "video_core/textures/decoders.h" -namespace Tegra { -namespace Engines { +namespace Tegra::Engines { -MaxwellDMA::MaxwellDMA(MemoryManager& memory_manager) : memory_manager(memory_manager) {} +MaxwellDMA::MaxwellDMA(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager) + : memory_manager(memory_manager), rasterizer{rasterizer} {} void MaxwellDMA::WriteReg(u32 method, u32 value) { ASSERT_MSG(method < Regs::NUM_REGS, @@ -44,40 +45,77 @@ void MaxwellDMA::HandleCopy() { ASSERT(regs.exec.query_mode == Regs::QueryMode::None); ASSERT(regs.exec.query_intr == Regs::QueryIntr::None); ASSERT(regs.exec.copy_mode == Regs::CopyMode::Unk2); - ASSERT(regs.src_params.pos_x == 0); - ASSERT(regs.src_params.pos_y == 0); ASSERT(regs.dst_params.pos_x == 0); ASSERT(regs.dst_params.pos_y == 0); - if (regs.exec.is_dst_linear == regs.exec.is_src_linear) { - std::size_t copy_size = regs.x_count; + if (!regs.exec.is_dst_linear && !regs.exec.is_src_linear) { + // If both the source and the destination are in block layout, assert. + UNREACHABLE_MSG("Tiled->Tiled DMA transfers are not yet implemented"); + return; + } + if (regs.exec.is_dst_linear && regs.exec.is_src_linear) { // When the enable_2d bit is disabled, the copy is performed as if we were copying a 1D - // buffer of length `x_count`, otherwise we copy a 2D buffer of size (x_count, y_count). - if (regs.exec.enable_2d) { - copy_size = copy_size * regs.y_count; + // buffer of length `x_count`, otherwise we copy a 2D image of dimensions (x_count, + // y_count). + if (!regs.exec.enable_2d) { + Memory::CopyBlock(dest_cpu, source_cpu, regs.x_count); + return; } - Memory::CopyBlock(dest_cpu, source_cpu, copy_size); + // If both the source and the destination are in linear layout, perform a line-by-line + // copy. We're going to take a subrect of size (x_count, y_count) from the source + // rectangle. There is no need to manually flush/invalidate the regions because + // CopyBlock does that for us. + for (u32 line = 0; line < regs.y_count; ++line) { + const VAddr source_line = source_cpu + line * regs.src_pitch; + const VAddr dest_line = dest_cpu + line * regs.dst_pitch; + Memory::CopyBlock(dest_line, source_line, regs.x_count); + } return; } ASSERT(regs.exec.enable_2d == 1); - u8* src_buffer = Memory::GetPointer(source_cpu); - u8* dst_buffer = Memory::GetPointer(dest_cpu); + + const std::size_t copy_size = regs.x_count * regs.y_count; + + const auto FlushAndInvalidate = [&](u32 src_size, u64 dst_size) { + // TODO(Subv): For now, manually flush the regions until we implement GPU-accelerated + // copying. + rasterizer.FlushRegion(source_cpu, src_size); + + // We have to invalidate the destination region to evict any outdated surfaces from the + // cache. We do this before actually writing the new data because the destination address + // might contain a dirty surface that will have to be written back to memory. + rasterizer.InvalidateRegion(dest_cpu, dst_size); + }; if (regs.exec.is_dst_linear && !regs.exec.is_src_linear) { + ASSERT(regs.src_params.size_z == 1); // If the input is tiled and the output is linear, deswizzle the input and copy it over. - Texture::CopySwizzledData(regs.src_params.size_x, regs.src_params.size_y, - regs.src_params.size_z, 1, 1, src_buffer, dst_buffer, true, - regs.src_params.BlockHeight(), regs.src_params.BlockDepth()); + + const u32 src_bytes_per_pixel = regs.src_pitch / regs.src_params.size_x; + + FlushAndInvalidate(regs.src_pitch * regs.src_params.size_y, + copy_size * src_bytes_per_pixel); + + Texture::UnswizzleSubrect(regs.x_count, regs.y_count, regs.dst_pitch, + regs.src_params.size_x, src_bytes_per_pixel, source_cpu, dest_cpu, + regs.src_params.BlockHeight(), regs.src_params.pos_x, + regs.src_params.pos_y); } else { + ASSERT(regs.dst_params.size_z == 1); + ASSERT(regs.src_pitch == regs.x_count); + + const u32 src_bpp = regs.src_pitch / regs.x_count; + + FlushAndInvalidate(regs.src_pitch * regs.y_count, + regs.dst_params.size_x * regs.dst_params.size_y * src_bpp); + // If the input is linear and the output is tiled, swizzle the input and copy it over. - Texture::CopySwizzledData(regs.dst_params.size_x, regs.dst_params.size_y, - regs.dst_params.size_z, 1, 1, dst_buffer, src_buffer, false, - regs.dst_params.BlockHeight(), regs.dst_params.BlockDepth()); + Texture::SwizzleSubrect(regs.x_count, regs.y_count, regs.src_pitch, regs.dst_params.size_x, + src_bpp, dest_cpu, source_cpu, regs.dst_params.BlockHeight()); } } -} // namespace Engines -} // namespace Tegra +} // namespace Tegra::Engines diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h index df19e02e2..5f3704f05 100644 --- a/src/video_core/engines/maxwell_dma.h +++ b/src/video_core/engines/maxwell_dma.h @@ -12,11 +12,15 @@ #include "video_core/gpu.h" #include "video_core/memory_manager.h" +namespace VideoCore { +class RasterizerInterface; +} + namespace Tegra::Engines { class MaxwellDMA final { public: - explicit MaxwellDMA(MemoryManager& memory_manager); + explicit MaxwellDMA(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager); ~MaxwellDMA() = default; /// Write the value to the register identified by method. @@ -133,6 +137,8 @@ public: MemoryManager& memory_manager; private: + VideoCore::RasterizerInterface& rasterizer; + /// Performs the copy from the source buffer to the destination buffer as configured in the /// registers. void HandleCopy(); diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h index f356f9a03..af7756266 100644 --- a/src/video_core/engines/shader_bytecode.h +++ b/src/video_core/engines/shader_bytecode.h @@ -79,6 +79,7 @@ union Attribute { constexpr explicit Attribute(u64 value) : value(value) {} enum class Index : u64 { + PointSize = 6, Position = 7, Attribute_0 = 8, Attribute_31 = 39, @@ -214,7 +215,7 @@ enum class IMinMaxExchange : u64 { XHi = 3, }; -enum class VmadType : u64 { +enum class VideoType : u64 { Size16_Low = 0, Size16_High = 1, Size32 = 2, @@ -335,6 +336,26 @@ enum class IsberdMode : u64 { enum class IsberdShift : u64 { None = 0, U16 = 1, B32 = 2 }; +enum class HalfType : u64 { + H0_H1 = 0, + F32 = 1, + H0_H0 = 2, + H1_H1 = 3, +}; + +enum class HalfMerge : u64 { + H0_H1 = 0, + F32 = 1, + Mrg_H0 = 2, + Mrg_H1 = 3, +}; + +enum class HalfPrecision : u64 { + None = 0, + FTZ = 1, + FMZ = 2, +}; + enum class IpaInterpMode : u64 { Linear = 0, Perspective = 1, @@ -544,6 +565,10 @@ union Instruction { } fmul; union { + BitField<55, 1, u64> saturate; + } fmul32; + + union { BitField<48, 1, u64> is_signed; } shift; @@ -554,6 +579,70 @@ union Instruction { } alu_integer; union { + BitField<39, 1, u64> ftz; + BitField<32, 1, u64> saturate; + BitField<49, 2, HalfMerge> merge; + + BitField<43, 1, u64> negate_a; + BitField<44, 1, u64> abs_a; + BitField<47, 2, HalfType> type_a; + + BitField<31, 1, u64> negate_b; + BitField<30, 1, u64> abs_b; + BitField<47, 2, HalfType> type_b; + + BitField<35, 2, HalfType> type_c; + } alu_half; + + union { + BitField<39, 2, HalfPrecision> precision; + BitField<39, 1, u64> ftz; + BitField<52, 1, u64> saturate; + BitField<49, 2, HalfMerge> merge; + + BitField<43, 1, u64> negate_a; + BitField<44, 1, u64> abs_a; + BitField<47, 2, HalfType> type_a; + } alu_half_imm; + + union { + BitField<29, 1, u64> first_negate; + BitField<20, 9, u64> first; + + BitField<56, 1, u64> second_negate; + BitField<30, 9, u64> second; + + u32 PackImmediates() const { + // Immediates are half floats shifted. + constexpr u32 imm_shift = 6; + return static_cast<u32>((first << imm_shift) | (second << (16 + imm_shift))); + } + } half_imm; + + union { + union { + BitField<37, 2, HalfPrecision> precision; + BitField<32, 1, u64> saturate; + + BitField<30, 1, u64> negate_c; + BitField<35, 2, HalfType> type_c; + } rr; + + BitField<57, 2, HalfPrecision> precision; + BitField<52, 1, u64> saturate; + + BitField<49, 2, HalfMerge> merge; + + BitField<47, 2, HalfType> type_a; + + BitField<56, 1, u64> negate_b; + BitField<28, 2, HalfType> type_b; + + BitField<51, 1, u64> negate_c; + BitField<53, 2, HalfType> type_reg39; + } hfma2; + + union { BitField<40, 1, u64> invert; } popc; @@ -669,7 +758,6 @@ union Instruction { BitField<45, 2, PredOperation> op; BitField<47, 1, u64> ftz; BitField<48, 4, PredCondition> cond; - BitField<56, 1, u64> neg_b; } fsetp; union { @@ -696,6 +784,14 @@ union Instruction { } psetp; union { + BitField<43, 4, PredCondition> cond; + BitField<45, 2, PredOperation> op; + BitField<3, 3, u64> pred3; + BitField<0, 3, u64> pred0; + BitField<39, 3, u64> pred39; + } vsetp; + + union { BitField<12, 3, u64> pred12; BitField<15, 1, u64> neg_pred12; BitField<24, 2, PredOperation> cond; @@ -717,6 +813,23 @@ union Instruction { } csetp; union { + BitField<35, 4, PredCondition> cond; + BitField<49, 1, u64> h_and; + BitField<6, 1, u64> ftz; + BitField<45, 2, PredOperation> op; + BitField<3, 3, u64> pred3; + BitField<0, 3, u64> pred0; + BitField<43, 1, u64> negate_a; + BitField<44, 1, u64> abs_a; + BitField<47, 2, HalfType> type_a; + BitField<31, 1, u64> negate_b; + BitField<30, 1, u64> abs_b; + BitField<28, 2, HalfType> type_b; + BitField<42, 1, u64> neg_pred; + BitField<39, 3, u64> pred39; + } hsetp2; + + union { BitField<39, 3, u64> pred39; BitField<42, 1, u64> neg_pred; BitField<43, 1, u64> neg_a; @@ -727,10 +840,24 @@ union Instruction { BitField<53, 1, u64> neg_b; BitField<54, 1, u64> abs_a; BitField<55, 1, u64> ftz; - BitField<56, 1, u64> neg_imm; } fset; union { + BitField<49, 1, u64> bf; + BitField<35, 3, PredCondition> cond; + BitField<50, 1, u64> ftz; + BitField<45, 2, PredOperation> op; + BitField<43, 1, u64> negate_a; + BitField<44, 1, u64> abs_a; + BitField<47, 2, HalfType> type_a; + BitField<31, 1, u64> negate_b; + BitField<30, 1, u64> abs_b; + BitField<28, 2, HalfType> type_b; + BitField<42, 1, u64> neg_pred; + BitField<39, 3, u64> pred39; + } hset2; + + union { BitField<39, 3, u64> pred39; BitField<42, 1, u64> neg_pred; BitField<44, 1, u64> bf; @@ -1036,15 +1163,17 @@ union Instruction { union { BitField<48, 1, u64> signed_a; BitField<38, 1, u64> is_byte_chunk_a; - BitField<36, 2, VmadType> type_a; + BitField<36, 2, VideoType> type_a; BitField<36, 2, u64> byte_height_a; BitField<49, 1, u64> signed_b; BitField<50, 1, u64> use_register_b; BitField<30, 1, u64> is_byte_chunk_b; - BitField<28, 2, VmadType> type_b; + BitField<28, 2, VideoType> type_b; BitField<28, 2, u64> byte_height_b; + } video; + union { BitField<51, 2, VmadShr> shr; BitField<55, 1, u64> saturate; // Saturates the result (a * b + c) BitField<47, 1, u64> cc; @@ -1095,11 +1224,13 @@ public: KIL, SSY, SYNC, + BRK, DEPBAR, BFE_C, BFE_R, BFE_IMM, BRA, + PBK, LD_A, LD_C, ST_A, @@ -1118,6 +1249,7 @@ public: OUT_R, // Emit vertex/primitive ISBERD, VMAD, + VSETP, FFMA_IMM, // Fused Multiply and Add FFMA_CR, FFMA_RC, @@ -1145,6 +1277,18 @@ public: LEA_RZ, LEA_IMM, LEA_HI, + HADD2_C, + HADD2_R, + HADD2_IMM, + HMUL2_C, + HMUL2_R, + HMUL2_IMM, + HFMA2_CR, + HFMA2_RC, + HFMA2_RR, + HFMA2_IMM_R, + HSETP2_R, + HSET2_R, POPC_C, POPC_R, POPC_IMM, @@ -1218,9 +1362,12 @@ public: ArithmeticImmediate, ArithmeticInteger, ArithmeticIntegerImmediate, + ArithmeticHalf, + ArithmeticHalfImmediate, Bfe, Shift, Ffma, + Hfma2, Flow, Synch, Memory, @@ -1228,6 +1375,8 @@ public: FloatSetPredicate, IntegerSet, IntegerSetPredicate, + HalfSet, + HalfSetPredicate, PredicateSetPredicate, PredicateSetRegister, Conversion, @@ -1239,7 +1388,7 @@ public: /// conditionally executed). static bool IsPredicatedInstruction(Id opcode) { // TODO(Subv): Add the rest of unpredicated instructions. - return opcode != Id::SSY; + return opcode != Id::SSY && opcode != Id::PBK; } class Matcher { @@ -1335,9 +1484,11 @@ private: #define INST(bitstring, op, type, name) Detail::GetMatcher(bitstring, op, type, name) INST("111000110011----", Id::KIL, Type::Flow, "KIL"), INST("111000101001----", Id::SSY, Type::Flow, "SSY"), + INST("111000101010----", Id::PBK, Type::Flow, "PBK"), INST("111000100100----", Id::BRA, Type::Flow, "BRA"), + INST("1111000011111---", Id::SYNC, Type::Flow, "SYNC"), + INST("111000110100---", Id::BRK, Type::Flow, "BRK"), INST("1111000011110---", Id::DEPBAR, Type::Synch, "DEPBAR"), - INST("1111000011111---", Id::SYNC, Type::Synch, "SYNC"), INST("1110111111011---", Id::LD_A, Type::Memory, "LD_A"), INST("1110111110010---", Id::LD_C, Type::Memory, "LD_C"), INST("1110111111110---", Id::ST_A, Type::Memory, "ST_A"), @@ -1356,6 +1507,7 @@ private: INST("1111101111100---", Id::OUT_R, Type::Trivial, "OUT_R"), INST("1110111111010---", Id::ISBERD, Type::Trivial, "ISBERD"), INST("01011111--------", Id::VMAD, Type::Trivial, "VMAD"), + INST("0101000011110---", Id::VSETP, Type::Trivial, "VSETP"), INST("0011001-1-------", Id::FFMA_IMM, Type::Ffma, "FFMA_IMM"), INST("010010011-------", Id::FFMA_CR, Type::Ffma, "FFMA_CR"), INST("010100011-------", Id::FFMA_RC, Type::Ffma, "FFMA_RC"), @@ -1389,6 +1541,18 @@ private: INST("001101101101----", Id::LEA_IMM, Type::ArithmeticInteger, "LEA_IMM"), INST("010010111101----", Id::LEA_RZ, Type::ArithmeticInteger, "LEA_RZ"), INST("00011000--------", Id::LEA_HI, Type::ArithmeticInteger, "LEA_HI"), + INST("0111101-1-------", Id::HADD2_C, Type::ArithmeticHalf, "HADD2_C"), + INST("0101110100010---", Id::HADD2_R, Type::ArithmeticHalf, "HADD2_R"), + INST("0111101-0-------", Id::HADD2_IMM, Type::ArithmeticHalfImmediate, "HADD2_IMM"), + INST("0111100-1-------", Id::HMUL2_C, Type::ArithmeticHalf, "HMUL2_C"), + INST("0101110100001---", Id::HMUL2_R, Type::ArithmeticHalf, "HMUL2_R"), + INST("0111100-0-------", Id::HMUL2_IMM, Type::ArithmeticHalfImmediate, "HMUL2_IMM"), + INST("01110---1-------", Id::HFMA2_CR, Type::Hfma2, "HFMA2_CR"), + INST("01100---1-------", Id::HFMA2_RC, Type::Hfma2, "HFMA2_RC"), + INST("0101110100000---", Id::HFMA2_RR, Type::Hfma2, "HFMA2_RR"), + INST("01110---0-------", Id::HFMA2_IMM_R, Type::Hfma2, "HFMA2_R_IMM"), + INST("0101110100100---", Id::HSETP2_R, Type::HalfSetPredicate, "HSETP_R"), + INST("0101110100011---", Id::HSET2_R, Type::HalfSet, "HSET2_R"), INST("0101000010000---", Id::MUFU, Type::Arithmetic, "MUFU"), INST("0100110010010---", Id::RRO_C, Type::Arithmetic, "RRO_C"), INST("0101110010010---", Id::RRO_R, Type::Arithmetic, "RRO_R"), @@ -1463,4 +1627,4 @@ private: } }; -} // namespace Tegra::Shader +} // namespace Tegra::Shader
\ No newline at end of file diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 9ba7e3533..83c7e5b0b 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -27,8 +27,8 @@ GPU::GPU(VideoCore::RasterizerInterface& rasterizer) { maxwell_3d = std::make_unique<Engines::Maxwell3D>(rasterizer, *memory_manager); fermi_2d = std::make_unique<Engines::Fermi2D>(rasterizer, *memory_manager); maxwell_compute = std::make_unique<Engines::MaxwellCompute>(); - maxwell_dma = std::make_unique<Engines::MaxwellDMA>(*memory_manager); - kepler_memory = std::make_unique<Engines::KeplerMemory>(*memory_manager); + maxwell_dma = std::make_unique<Engines::MaxwellDMA>(rasterizer, *memory_manager); + kepler_memory = std::make_unique<Engines::KeplerMemory>(rasterizer, *memory_manager); } GPU::~GPU() = default; diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 468253033..b472f421f 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -570,10 +570,12 @@ void RasterizerOpenGL::DrawArrays() { SyncBlendState(); SyncLogicOpState(); SyncCullMode(); - SyncAlphaTest(); + SyncDepthRange(); SyncScissorTest(); + // Alpha Testing is synced on shaders. SyncTransformFeedback(); SyncPointState(); + CheckAlphaTests(); // TODO(bunnei): Sync framebuffer_scale uniform here // TODO(bunnei): Sync scissorbox uniform(s) here @@ -659,6 +661,12 @@ void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) { bool RasterizerOpenGL::AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src, const Tegra::Engines::Fermi2D::Regs::Surface& dst) { MICROPROFILE_SCOPE(OpenGL_Blits); + + if (Settings::values.use_accurate_gpu_emulation) { + // Skip the accelerated copy and perform a slow but more accurate copy + return false; + } + res_cache.FermiCopySurface(src, dst); return true; } @@ -916,12 +924,11 @@ void RasterizerOpenGL::SyncCullMode() { } } -void RasterizerOpenGL::SyncDepthScale() { - UNREACHABLE(); -} +void RasterizerOpenGL::SyncDepthRange() { + const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; -void RasterizerOpenGL::SyncDepthOffset() { - UNREACHABLE(); + state.depth.depth_range_near = regs.viewport->depth_range_near; + state.depth.depth_range_far = regs.viewport->depth_range_far; } void RasterizerOpenGL::SyncDepthTestState() { @@ -1001,17 +1008,6 @@ void RasterizerOpenGL::SyncLogicOpState() { state.logic_op.operation = MaxwellToGL::LogicOp(regs.logic_op.operation); } -void RasterizerOpenGL::SyncAlphaTest() { - const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; - - // TODO(Rodrigo): Alpha testing is a legacy OpenGL feature, but it can be - // implemented with a test+discard in fragment shaders. - if (regs.alpha_test_enabled != 0) { - LOG_CRITICAL(Render_OpenGL, "Alpha testing is not implemented"); - UNREACHABLE(); - } -} - void RasterizerOpenGL::SyncScissorTest() { const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; @@ -1046,4 +1042,15 @@ void RasterizerOpenGL::SyncPointState() { state.point.size = regs.point_size == 0 ? 1 : regs.point_size; } +void RasterizerOpenGL::CheckAlphaTests() { + const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs; + + if (regs.alpha_test_enabled != 0 && regs.rt_control.count > 1) { + LOG_CRITICAL( + Render_OpenGL, + "Alpha Testing is enabled with Multiple Render Targets, this behavior is undefined."); + UNREACHABLE(); + } +} + } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index b1f7ccc7e..731a336d5 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -144,11 +144,8 @@ private: /// Syncs the cull mode to match the guest state void SyncCullMode(); - /// Syncs the depth scale to match the guest state - void SyncDepthScale(); - - /// Syncs the depth offset to match the guest state - void SyncDepthOffset(); + /// Syncs the depth range to match the guest state + void SyncDepthRange(); /// Syncs the depth test state to match the guest state void SyncDepthTestState(); @@ -162,9 +159,6 @@ private: /// Syncs the LogicOp state to match the guest state void SyncLogicOpState(); - /// Syncs the alpha test state to match the guest state - void SyncAlphaTest(); - /// Syncs the scissor test state to match the guest state void SyncScissorTest(); @@ -174,6 +168,9 @@ private: /// Syncs the point state to match the guest state void SyncPointState(); + /// Check asserts for alpha testing. + void CheckAlphaTests(); + bool has_ARB_direct_state_access = false; bool has_ARB_multi_bind = false; bool has_ARB_separate_shader_objects = false; diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index 9c8925383..591ec7998 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -78,6 +78,29 @@ void SurfaceParams::InitCacheParameters(Tegra::GPUVAddr gpu_addr_) { } } +std::size_t SurfaceParams::InnerMemorySize(bool layer_only) const { + const u32 compression_factor{GetCompressionFactor(pixel_format)}; + const u32 bytes_per_pixel{GetBytesPerPixel(pixel_format)}; + u32 m_depth = (layer_only ? 1U : depth); + u32 m_width = std::max(1U, width / compression_factor); + u32 m_height = std::max(1U, height / compression_factor); + std::size_t size = Tegra::Texture::CalculateSize(is_tiled, bytes_per_pixel, m_width, m_height, + m_depth, block_height, block_depth); + u32 m_block_height = block_height; + u32 m_block_depth = block_depth; + std::size_t block_size_bytes = 512 * block_height * block_depth; // 512 is GOB size + for (u32 i = 1; i < max_mip_level; i++) { + m_width = std::max(1U, m_width / 2); + m_height = std::max(1U, m_height / 2); + m_depth = std::max(1U, m_depth / 2); + m_block_height = std::max(1U, m_block_height / 2); + m_block_depth = std::max(1U, m_block_depth / 2); + size += Tegra::Texture::CalculateSize(is_tiled, bytes_per_pixel, m_width, m_height, m_depth, + m_block_height, m_block_depth); + } + return is_tiled ? Common::AlignUp(size, block_size_bytes) : size; +} + /*static*/ SurfaceParams SurfaceParams::CreateForTexture( const Tegra::Texture::FullTextureInfo& config, const GLShader::SamplerEntry& entry) { SurfaceParams params{}; @@ -124,6 +147,7 @@ void SurfaceParams::InitCacheParameters(Tegra::GPUVAddr gpu_addr_) { break; } + params.is_layered = SurfaceTargetIsLayered(params.target); params.max_mip_level = config.tic.max_mip_level + 1; params.rt = {}; @@ -150,6 +174,7 @@ void SurfaceParams::InitCacheParameters(Tegra::GPUVAddr gpu_addr_) { params.target = SurfaceTarget::Texture2D; params.depth = 1; params.max_mip_level = 0; + params.is_layered = false; // Render target specific parameters, not used for caching params.rt.index = static_cast<u32>(index); @@ -182,6 +207,7 @@ void SurfaceParams::InitCacheParameters(Tegra::GPUVAddr gpu_addr_) { params.target = SurfaceTarget::Texture2D; params.depth = 1; params.max_mip_level = 0; + params.is_layered = false; params.rt = {}; params.InitCacheParameters(zeta_address); @@ -361,10 +387,11 @@ void MortonCopy(u32 stride, u32 block_height, u32 height, u32 block_depth, u32 d } } -static constexpr std::array<void (*)(u32, u32, u32, u32, u32, u8*, std::size_t, VAddr), - SurfaceParams::MaxPixelFormat> - morton_to_gl_fns = { - // clang-format off +using GLConversionArray = std::array<void (*)(u32, u32, u32, u32, u32, u8*, std::size_t, VAddr), + SurfaceParams::MaxPixelFormat>; + +static constexpr GLConversionArray morton_to_gl_fns = { + // clang-format off MortonCopy<true, PixelFormat::ABGR8U>, MortonCopy<true, PixelFormat::ABGR8S>, MortonCopy<true, PixelFormat::ABGR8UI>, @@ -418,13 +445,11 @@ static constexpr std::array<void (*)(u32, u32, u32, u32, u32, u8*, std::size_t, MortonCopy<true, PixelFormat::Z24S8>, MortonCopy<true, PixelFormat::S8Z24>, MortonCopy<true, PixelFormat::Z32FS8>, - // clang-format on + // clang-format on }; -static constexpr std::array<void (*)(u32, u32, u32, u32, u32, u8*, std::size_t, VAddr), - SurfaceParams::MaxPixelFormat> - gl_to_morton_fns = { - // clang-format off +static constexpr GLConversionArray gl_to_morton_fns = { + // clang-format off MortonCopy<false, PixelFormat::ABGR8U>, MortonCopy<false, PixelFormat::ABGR8S>, MortonCopy<false, PixelFormat::ABGR8UI>, @@ -479,9 +504,35 @@ static constexpr std::array<void (*)(u32, u32, u32, u32, u32, u8*, std::size_t, MortonCopy<false, PixelFormat::Z24S8>, MortonCopy<false, PixelFormat::S8Z24>, MortonCopy<false, PixelFormat::Z32FS8>, - // clang-format on + // clang-format on }; +void SwizzleFunc(const GLConversionArray& functions, const SurfaceParams& params, + std::vector<u8>& gl_buffer) { + u32 depth = params.depth; + if (params.target == SurfaceParams::SurfaceTarget::Texture2D) { + // TODO(Blinkhawk): Eliminate this condition once all texture types are implemented. + depth = 1U; + } + if (params.is_layered) { + u64 offset = 0; + u64 offset_gl = 0; + u64 layer_size = params.LayerMemorySize(); + u64 gl_size = params.LayerSizeGL(); + for (u32 i = 0; i < depth; i++) { + functions[static_cast<std::size_t>(params.pixel_format)]( + params.width, params.block_height, params.height, params.block_depth, 1, + gl_buffer.data() + offset_gl, gl_size, params.addr + offset); + offset += layer_size; + offset_gl += gl_size; + } + } else { + functions[static_cast<std::size_t>(params.pixel_format)]( + params.width, params.block_height, params.height, params.block_depth, depth, + gl_buffer.data(), gl_buffer.size(), params.addr); + } +} + static bool BlitSurface(const Surface& src_surface, const Surface& dst_surface, GLuint read_fb_handle, GLuint draw_fb_handle, GLenum src_attachment = 0, GLenum dst_attachment = 0, std::size_t cubemap_face = 0) { @@ -881,21 +932,10 @@ void CachedSurface::LoadGLBuffer() { gl_buffer.resize(params.size_in_bytes_gl); if (params.is_tiled) { - u32 depth = params.depth; - u32 block_depth = params.block_depth; - ASSERT_MSG(params.block_width == 1, "Block width is defined as {} on texture type {}", params.block_width, static_cast<u32>(params.target)); - if (params.target == SurfaceParams::SurfaceTarget::Texture2D) { - // TODO(Blinkhawk): Eliminate this condition once all texture types are implemented. - depth = 1U; - block_depth = 1U; - } - - morton_to_gl_fns[static_cast<std::size_t>(params.pixel_format)]( - params.width, params.block_height, params.height, block_depth, depth, gl_buffer.data(), - gl_buffer.size(), params.addr); + SwizzleFunc(morton_to_gl_fns, params, gl_buffer); } else { const auto texture_src_data{Memory::GetPointer(params.addr)}; const auto texture_src_data_end{texture_src_data + params.size_in_bytes_gl}; @@ -929,19 +969,10 @@ void CachedSurface::FlushGLBuffer() { const u8* const texture_src_data = Memory::GetPointer(params.addr); ASSERT(texture_src_data); if (params.is_tiled) { - u32 depth = params.depth; - u32 block_depth = params.block_depth; - ASSERT_MSG(params.block_width == 1, "Block width is defined as {} on texture type {}", params.block_width, static_cast<u32>(params.target)); - if (params.target == SurfaceParams::SurfaceTarget::Texture2D) { - // TODO(Blinkhawk): Eliminate this condition once all texture types are implemented. - depth = 1U; - } - gl_to_morton_fns[static_cast<size_t>(params.pixel_format)]( - params.width, params.block_height, params.height, block_depth, depth, gl_buffer.data(), - gl_buffer.size(), GetAddr()); + SwizzleFunc(gl_to_morton_fns, params, gl_buffer); } else { std::memcpy(Memory::GetPointer(GetAddr()), gl_buffer.data(), GetSizeInBytes()); } @@ -1179,7 +1210,7 @@ void RasterizerCacheOpenGL::AccurateCopySurface(const Surface& src_surface, const Surface& dst_surface) { const auto& src_params{src_surface->GetSurfaceParams()}; const auto& dst_params{dst_surface->GetSurfaceParams()}; - FlushRegion(src_params.addr, dst_params.size_in_bytes); + FlushRegion(src_params.addr, dst_params.MemorySize()); LoadSurface(dst_surface); } @@ -1221,44 +1252,10 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& old_surface, CopySurface(old_surface, new_surface, copy_pbo.handle); } break; + case SurfaceParams::SurfaceTarget::TextureCubemap: case SurfaceParams::SurfaceTarget::Texture3D: AccurateCopySurface(old_surface, new_surface); break; - case SurfaceParams::SurfaceTarget::TextureCubemap: { - if (old_params.rt.array_mode != 1) { - // TODO(bunnei): This is used by Breath of the Wild, I'm not sure how to implement this - // yet (array rendering used as a cubemap texture). - LOG_CRITICAL(HW_GPU, "Unhandled rendertarget array_mode {}", old_params.rt.array_mode); - UNREACHABLE(); - return new_surface; - } - - // This seems to be used for render-to-cubemap texture - ASSERT_MSG(old_params.target == SurfaceParams::SurfaceTarget::Texture2D, "Unexpected"); - ASSERT_MSG(old_params.pixel_format == new_params.pixel_format, "Unexpected"); - ASSERT_MSG(old_params.rt.base_layer == 0, "Unimplemented"); - - // TODO(bunnei): Verify the below - this stride seems to be in 32-bit words, not pixels. - // Tested with Splatoon 2, Super Mario Odyssey, and Breath of the Wild. - const std::size_t byte_stride{old_params.rt.layer_stride * sizeof(u32)}; - - for (std::size_t index = 0; index < new_params.depth; ++index) { - Surface face_surface{TryGetReservedSurface(old_params)}; - ASSERT_MSG(face_surface, "Unexpected"); - - if (is_blit) { - BlitSurface(face_surface, new_surface, read_framebuffer.handle, - draw_framebuffer.handle, face_surface->GetSurfaceParams().rt.index, - new_params.rt.index, index); - } else { - CopySurface(face_surface, new_surface, copy_pbo.handle, - face_surface->GetSurfaceParams().rt.index, new_params.rt.index, index); - } - - old_params.addr += byte_stride; - } - break; - } default: LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}", static_cast<u32>(new_params.target)); @@ -1266,7 +1263,7 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& old_surface, } return new_surface; -} +} // namespace OpenGL Surface RasterizerCacheOpenGL::TryFindFramebufferSurface(VAddr addr) const { return TryGet(addr); diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h index 0dd0d90a3..50a7ab47d 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h @@ -168,6 +168,23 @@ struct SurfaceParams { } } + static bool SurfaceTargetIsLayered(SurfaceTarget target) { + switch (target) { + case SurfaceTarget::Texture1D: + case SurfaceTarget::Texture2D: + case SurfaceTarget::Texture3D: + return false; + case SurfaceTarget::Texture1DArray: + case SurfaceTarget::Texture2DArray: + case SurfaceTarget::TextureCubemap: + return true; + default: + LOG_CRITICAL(HW_GPU, "Unimplemented surface_target={}", static_cast<u32>(target)); + UNREACHABLE(); + return false; + } + } + /** * Gets the compression factor for the specified PixelFormat. This applies to just the * "compressed width" and "compressed height", not the overall compression factor of a @@ -742,6 +759,25 @@ struct SurfaceParams { return size_in_bytes_gl / 6; } + /// Returns the exact size of memory occupied by the texture in VRAM, including mipmaps. + std::size_t MemorySize() const { + std::size_t size = InnerMemorySize(is_layered); + if (is_layered) + return size * depth; + return size; + } + + /// Returns the exact size of the memory occupied by a layer in a texture in VRAM, including + /// mipmaps. + std::size_t LayerMemorySize() const { + return InnerMemorySize(true); + } + + /// Returns the size of a layer of this surface in OpenGL. + std::size_t LayerSizeGL() const { + return SizeInBytesRaw(true) / depth; + } + /// Creates SurfaceParams from a texture configuration static SurfaceParams CreateForTexture(const Tegra::Texture::FullTextureInfo& config, const GLShader::SamplerEntry& entry); @@ -782,6 +818,7 @@ struct SurfaceParams { u32 unaligned_height; SurfaceTarget target; u32 max_mip_level; + bool is_layered; // Parameters used for caching VAddr addr; @@ -797,6 +834,9 @@ struct SurfaceParams { u32 layer_stride; u32 base_layer; } rt; + +private: + std::size_t InnerMemorySize(bool layer_only = false) const; }; }; // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp index f4340a017..81ffb24e4 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp @@ -6,6 +6,7 @@ #include <set> #include <string> #include <string_view> +#include <unordered_set> #include <boost/optional.hpp> #include <fmt/format.h> @@ -30,8 +31,6 @@ using Tegra::Shader::SubOp; constexpr u32 PROGRAM_END = MAX_PROGRAM_CODE_LENGTH; constexpr u32 PROGRAM_HEADER_SIZE = sizeof(Tegra::Shader::Header); -enum : u32 { POSITION_VARYING_LOCATION = 0, GENERIC_VARYING_START_LOCATION = 1 }; - constexpr u32 MAX_GEOMETRY_BUFFERS = 6; constexpr u32 MAX_ATTRIBUTES = 0x100; // Size in vec4s, this value is untested @@ -165,10 +164,11 @@ private: const ExitMethod jmp = Scan(target, end, labels); return exit_method = ParallelExit(no_jmp, jmp); } - case OpCode::Id::SSY: { - // The SSY instruction uses a similar encoding as the BRA instruction. + case OpCode::Id::SSY: + case OpCode::Id::PBK: { + // The SSY and PBK use a similar encoding as the BRA instruction. ASSERT_MSG(instr.bra.constant_buffer == 0, - "Constant buffer SSY is not supported"); + "Constant buffer branching is not supported"); const u32 target = offset + instr.bra.GetBranchTarget(); labels.insert(target); // Continue scanning for an exit method. @@ -277,7 +277,8 @@ public: GLSLRegisterManager(ShaderWriter& shader, ShaderWriter& declarations, const Maxwell3D::Regs::ShaderStage& stage, const std::string& suffix, const Tegra::Shader::Header& header) - : shader{shader}, declarations{declarations}, stage{stage}, suffix{suffix}, header{header} { + : shader{shader}, declarations{declarations}, stage{stage}, suffix{suffix}, header{header}, + fixed_pipeline_output_attributes_used{} { BuildRegisterList(); BuildInputList(); } @@ -376,11 +377,55 @@ public: } /** + * Writes code that does a register assignment to a half float value operation. + * @param reg The destination register to use. + * @param elem The element to use for the operation. + * @param value The code representing the value to assign. Type has to be half float. + * @param merge Half float kind of assignment. + * @param dest_num_components Number of components in the destination. + * @param value_num_components Number of components in the value. + * @param is_saturated Optional, when True, saturates the provided value. + * @param dest_elem Optional, the destination element to use for the operation. + */ + void SetRegisterToHalfFloat(const Register& reg, u64 elem, const std::string& value, + Tegra::Shader::HalfMerge merge, u64 dest_num_components, + u64 value_num_components, bool is_saturated = false, + u64 dest_elem = 0) { + ASSERT_MSG(!is_saturated, "Unimplemented"); + + const std::string result = [&]() { + switch (merge) { + case Tegra::Shader::HalfMerge::H0_H1: + return "uintBitsToFloat(packHalf2x16(" + value + "))"; + case Tegra::Shader::HalfMerge::F32: + // Half float instructions take the first component when doing a float cast. + return "float(" + value + ".x)"; + case Tegra::Shader::HalfMerge::Mrg_H0: + // TODO(Rodrigo): I guess Mrg_H0 and Mrg_H1 take their respective component from the + // pack. I couldn't test this on hardware but it shouldn't really matter since most + // of the time when a Mrg_* flag is used both components will be mirrored. That + // being said, it deserves a test. + return "((" + GetRegisterAsInteger(reg, 0, false) + + " & 0xffff0000) | (packHalf2x16(" + value + ") & 0x0000ffff))"; + case Tegra::Shader::HalfMerge::Mrg_H1: + return "((" + GetRegisterAsInteger(reg, 0, false) + + " & 0x0000ffff) | (packHalf2x16(" + value + ") & 0xffff0000))"; + default: + UNREACHABLE(); + return std::string("0"); + } + }(); + + SetRegister(reg, elem, result, dest_num_components, value_num_components, dest_elem); + } + + /** * Writes code that does a register assignment to input attribute operation. Input attributes * are stored as floats, so this may require conversion. * @param reg The destination register to use. * @param elem The element to use for the operation. * @param attribute The input attribute to use as the source value. + * @param input_mode The input mode. * @param vertex The register that decides which vertex to read from (used in GS). */ void SetRegisterToInputAttibute(const Register& reg, u64 elem, Attribute::Index attribute, @@ -437,7 +482,12 @@ public: std::to_string(static_cast<u32>(attribute)) + ']' + GetSwizzle(elem) + " = " + src + ';'); } else { - shader.AddLine(dest + GetSwizzle(elem) + " = " + src + ';'); + if (attribute == Attribute::Index::PointSize) { + fixed_pipeline_output_attributes_used.insert(attribute); + shader.AddLine(dest + " = " + src + ';'); + } else { + shader.AddLine(dest + GetSwizzle(elem) + " = " + src + ';'); + } } } } @@ -481,6 +531,7 @@ public: /// Add declarations. void GenerateDeclarations(const std::string& suffix) { + GenerateVertex(); GenerateRegisters(suffix); GenerateInternalFlags(); GenerateInputAttrs(); @@ -548,13 +599,6 @@ private: /// Generates declarations for input attributes. void GenerateInputAttrs() { - if (stage != Maxwell3D::Regs::ShaderStage::Vertex) { - const std::string attr = - stage == Maxwell3D::Regs::ShaderStage::Geometry ? "gs_position[]" : "position"; - declarations.AddLine("layout (location = " + std::to_string(POSITION_VARYING_LOCATION) + - ") in vec4 " + attr + ';'); - } - for (const auto element : declr_input_attribute) { // TODO(bunnei): Use proper number of elements for these u32 idx = @@ -577,10 +621,6 @@ private: /// Generates declarations for output attributes. void GenerateOutputAttrs() { - if (stage != Maxwell3D::Regs::ShaderStage::Fragment) { - declarations.AddLine("layout (location = " + std::to_string(POSITION_VARYING_LOCATION) + - ") out vec4 position;"); - } for (const auto& index : declr_output_attribute) { // TODO(bunnei): Use proper number of elements for these const u32 idx = static_cast<u32>(index) - @@ -651,6 +691,20 @@ private: declarations.AddNewLine(); } + void GenerateVertex() { + if (stage != Maxwell3D::Regs::ShaderStage::Vertex) + return; + declarations.AddLine("out gl_PerVertex {"); + ++declarations.scope; + declarations.AddLine("vec4 gl_Position;"); + for (auto& o : fixed_pipeline_output_attributes_used) { + if (o == Attribute::Index::PointSize) + declarations.AddLine("float gl_PointSize;"); + } + --declarations.scope; + declarations.AddLine("};"); + } + /// Generates code representing a temporary (GPR) register. std::string GetRegister(const Register& reg, unsigned elem) { if (reg == Register::ZeroIndex) { @@ -804,6 +858,8 @@ private: /// Generates code representing the declaration name of an output attribute register. std::string GetOutputAttribute(Attribute::Index attribute) { switch (attribute) { + case Attribute::Index::PointSize: + return "gl_PointSize"; case Attribute::Index::Position: return "position"; default: @@ -838,6 +894,7 @@ private: const Maxwell3D::Regs::ShaderStage& stage; const std::string& suffix; const Tegra::Shader::Header& header; + std::unordered_set<Attribute::Index> fixed_pipeline_output_attributes_used; }; class GLSLGenerator { @@ -877,6 +934,19 @@ private: return fmt::format("uintBitsToFloat({})", instr.alu.GetImm20_32()); } + /// Generates code representing a vec2 pair unpacked from a half float immediate + static std::string UnpackHalfImmediate(const Instruction& instr, bool negate) { + const std::string immediate = GetHalfFloat(std::to_string(instr.half_imm.PackImmediates())); + if (!negate) { + return immediate; + } + const std::string negate_first = instr.half_imm.first_negate != 0 ? "-" : ""; + const std::string negate_second = instr.half_imm.second_negate != 0 ? "-" : ""; + const std::string negate_vec = "vec2(" + negate_first + "1, " + negate_second + "1)"; + + return '(' + immediate + " * " + negate_vec + ')'; + } + /// Generates code representing a texture sampler. std::string GetSampler(const Sampler& sampler, Tegra::Shader::TextureType type, bool is_array, bool is_shadow) { @@ -908,7 +978,7 @@ private: // Can't assign to the constant predicate. ASSERT(pred != static_cast<u64>(Pred::UnusedIndex)); - const std::string variable = 'p' + std::to_string(pred) + '_' + suffix; + std::string variable = 'p' + std::to_string(pred) + '_' + suffix; shader.AddLine(variable + " = " + value + ';'); declr_predicates.insert(std::move(variable)); } @@ -1013,6 +1083,41 @@ private: } /* + * Transforms the input string GLSL operand into an unpacked half float pair. + * @note This function returns a float type pair instead of a half float pair. This is because + * real half floats are not standardized in GLSL but unpackHalf2x16 (which returns a vec2) is. + * @param operand Input operand. It has to be an unsigned integer. + * @param type How to unpack the unsigned integer to a half float pair. + * @param abs Get the absolute value of unpacked half floats. + * @param neg Get the negative value of unpacked half floats. + * @returns String corresponding to a half float pair. + */ + static std::string GetHalfFloat(const std::string& operand, + Tegra::Shader::HalfType type = Tegra::Shader::HalfType::H0_H1, + bool abs = false, bool neg = false) { + // "vec2" calls emitted in this function are intended to alias components. + const std::string value = [&]() { + switch (type) { + case Tegra::Shader::HalfType::H0_H1: + return "unpackHalf2x16(" + operand + ')'; + case Tegra::Shader::HalfType::F32: + return "vec2(uintBitsToFloat(" + operand + "))"; + case Tegra::Shader::HalfType::H0_H0: + case Tegra::Shader::HalfType::H1_H1: { + const bool high = type == Tegra::Shader::HalfType::H1_H1; + const char unpack_index = "xy"[high ? 1 : 0]; + return "vec2(unpackHalf2x16(" + operand + ")." + unpack_index + ')'; + } + default: + UNREACHABLE(); + return std::string("vec2(0)"); + } + }(); + + return GetOperandAbsNeg(value, abs, neg); + } + + /* * Returns whether the instruction at the specified offset is a 'sched' instruction. * Sched instructions always appear before a sequence of 3 instructions. */ @@ -1154,27 +1259,27 @@ private: } /* - * Emits code to push the input target address to the SSY address stack, incrementing the stack + * Emits code to push the input target address to the flow address stack, incrementing the stack * top. */ - void EmitPushToSSYStack(u32 target) { + void EmitPushToFlowStack(u32 target) { shader.AddLine('{'); ++shader.scope; - shader.AddLine("ssy_stack[ssy_stack_top] = " + std::to_string(target) + "u;"); - shader.AddLine("ssy_stack_top++;"); + shader.AddLine("flow_stack[flow_stack_top] = " + std::to_string(target) + "u;"); + shader.AddLine("flow_stack_top++;"); --shader.scope; shader.AddLine('}'); } /* - * Emits code to pop an address from the SSY address stack, setting the jump address to the + * Emits code to pop an address from the flow address stack, setting the jump address to the * popped address and decrementing the stack top. */ - void EmitPopFromSSYStack() { + void EmitPopFromFlowStack() { shader.AddLine('{'); ++shader.scope; - shader.AddLine("ssy_stack_top--;"); - shader.AddLine("jmp_to = ssy_stack[ssy_stack_top];"); + shader.AddLine("flow_stack_top--;"); + shader.AddLine("jmp_to = flow_stack[flow_stack_top];"); shader.AddLine("break;"); --shader.scope; shader.AddLine('}'); @@ -1186,9 +1291,29 @@ private: ASSERT_MSG(header.ps.omap.sample_mask == 0, "Samplemask write is unimplemented"); + shader.AddLine("if (alpha_test[0] != 0) {"); + ++shader.scope; + // We start on the register containing the alpha value in the first RT. + u32 current_reg = 3; + for (u32 render_target = 0; render_target < Maxwell3D::Regs::NumRenderTargets; + ++render_target) { + // TODO(Blinkhawk): verify the behavior of alpha testing on hardware when + // multiple render targets are used. + if (header.ps.IsColorComponentOutputEnabled(render_target, 0) || + header.ps.IsColorComponentOutputEnabled(render_target, 1) || + header.ps.IsColorComponentOutputEnabled(render_target, 2) || + header.ps.IsColorComponentOutputEnabled(render_target, 3)) { + shader.AddLine(fmt::format("if (!AlphaFunc({})) discard;", + regs.GetRegisterAsFloat(current_reg))); + current_reg += 4; + } + } + --shader.scope; + shader.AddLine('}'); + // Write the color outputs using the data in the shader registers, disabled // rendertargets/components are skipped in the register assignment. - u32 current_reg = 0; + current_reg = 0; for (u32 render_target = 0; render_target < Maxwell3D::Regs::NumRenderTargets; ++render_target) { // TODO(Subv): Figure out how dual-source blending is configured in the Switch. @@ -1212,6 +1337,63 @@ private: } } + /// Unpacks a video instruction operand (e.g. VMAD). + std::string GetVideoOperand(const std::string& op, bool is_chunk, bool is_signed, + Tegra::Shader::VideoType type, u64 byte_height) { + const std::string value = [&]() { + if (!is_chunk) { + const auto offset = static_cast<u32>(byte_height * 8); + return "((" + op + " >> " + std::to_string(offset) + ") & 0xff)"; + } + const std::string zero = "0"; + + switch (type) { + case Tegra::Shader::VideoType::Size16_Low: + return '(' + op + " & 0xffff)"; + case Tegra::Shader::VideoType::Size16_High: + return '(' + op + " >> 16)"; + case Tegra::Shader::VideoType::Size32: + // TODO(Rodrigo): From my hardware tests it becomes a bit "mad" when + // this type is used (1 * 1 + 0 == 0x5b800000). Until a better + // explanation is found: assert. + UNIMPLEMENTED(); + return zero; + case Tegra::Shader::VideoType::Invalid: + UNREACHABLE_MSG("Invalid instruction encoding"); + return zero; + default: + UNREACHABLE(); + return zero; + } + }(); + + if (is_signed) { + return "int(" + value + ')'; + } + return value; + }; + + /// Gets the A operand for a video instruction. + std::string GetVideoOperandA(Instruction instr) { + return GetVideoOperand(regs.GetRegisterAsInteger(instr.gpr8, 0, false), + instr.video.is_byte_chunk_a != 0, instr.video.signed_a, + instr.video.type_a, instr.video.byte_height_a); + } + + /// Gets the B operand for a video instruction. + std::string GetVideoOperandB(Instruction instr) { + if (instr.video.use_register_b) { + return GetVideoOperand(regs.GetRegisterAsInteger(instr.gpr20, 0, false), + instr.video.is_byte_chunk_b != 0, instr.video.signed_b, + instr.video.type_b, instr.video.byte_height_b); + } else { + return '(' + + std::to_string(instr.video.signed_b ? static_cast<s16>(instr.alu.GetImm20_16()) + : instr.alu.GetImm20_16()) + + ')'; + } + } + /** * Compiles a single instruction from Tegra to GLSL. * @param offset the offset of the Tegra shader instruction. @@ -1381,9 +1563,10 @@ private: break; } case OpCode::Id::FMUL32_IMM: { - regs.SetRegisterToFloat( - instr.gpr0, 0, - regs.GetRegisterAsFloat(instr.gpr8) + " * " + GetImmediate32(instr), 1, 1); + regs.SetRegisterToFloat(instr.gpr0, 0, + regs.GetRegisterAsFloat(instr.gpr8) + " * " + + GetImmediate32(instr), + 1, 1, instr.fmul32.saturate); break; } case OpCode::Id::FADD32I: { @@ -1748,6 +1931,86 @@ private: break; } + case OpCode::Type::ArithmeticHalf: { + if (opcode->GetId() == OpCode::Id::HADD2_C || opcode->GetId() == OpCode::Id::HADD2_R) { + ASSERT_MSG(instr.alu_half.ftz == 0, "Unimplemented"); + } + const bool negate_a = + opcode->GetId() != OpCode::Id::HMUL2_R && instr.alu_half.negate_a != 0; + const bool negate_b = + opcode->GetId() != OpCode::Id::HMUL2_C && instr.alu_half.negate_b != 0; + + const std::string op_a = + GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.alu_half.type_a, + instr.alu_half.abs_a != 0, negate_a); + + std::string op_b; + switch (opcode->GetId()) { + case OpCode::Id::HADD2_C: + case OpCode::Id::HMUL2_C: + op_b = regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, + GLSLRegister::Type::UnsignedInteger); + break; + case OpCode::Id::HADD2_R: + case OpCode::Id::HMUL2_R: + op_b = regs.GetRegisterAsInteger(instr.gpr20, 0, false); + break; + default: + UNREACHABLE(); + op_b = "0"; + break; + } + op_b = GetHalfFloat(op_b, instr.alu_half.type_b, instr.alu_half.abs_b != 0, negate_b); + + const std::string result = [&]() { + switch (opcode->GetId()) { + case OpCode::Id::HADD2_C: + case OpCode::Id::HADD2_R: + return '(' + op_a + " + " + op_b + ')'; + case OpCode::Id::HMUL2_C: + case OpCode::Id::HMUL2_R: + return '(' + op_a + " * " + op_b + ')'; + default: + LOG_CRITICAL(HW_GPU, "Unhandled half float instruction: {}", opcode->GetName()); + UNREACHABLE(); + return std::string("0"); + } + }(); + + regs.SetRegisterToHalfFloat(instr.gpr0, 0, result, instr.alu_half.merge, 1, 1, + instr.alu_half.saturate != 0); + break; + } + case OpCode::Type::ArithmeticHalfImmediate: { + if (opcode->GetId() == OpCode::Id::HADD2_IMM) { + ASSERT_MSG(instr.alu_half_imm.ftz == 0, "Unimplemented"); + } else { + ASSERT_MSG(instr.alu_half_imm.precision == Tegra::Shader::HalfPrecision::None, + "Unimplemented"); + } + + const std::string op_a = GetHalfFloat( + regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.alu_half_imm.type_a, + instr.alu_half_imm.abs_a != 0, instr.alu_half_imm.negate_a != 0); + + const std::string op_b = UnpackHalfImmediate(instr, true); + + const std::string result = [&]() { + switch (opcode->GetId()) { + case OpCode::Id::HADD2_IMM: + return op_a + " + " + op_b; + case OpCode::Id::HMUL2_IMM: + return op_a + " * " + op_b; + default: + UNREACHABLE(); + return std::string("0"); + } + }(); + + regs.SetRegisterToHalfFloat(instr.gpr0, 0, result, instr.alu_half_imm.merge, 1, 1, + instr.alu_half_imm.saturate != 0); + break; + } case OpCode::Type::Ffma: { const std::string op_a = regs.GetRegisterAsFloat(instr.gpr8); std::string op_b = instr.ffma.negate_b ? "-" : ""; @@ -1792,6 +2055,59 @@ private: instr.alu.saturate_d); break; } + case OpCode::Type::Hfma2: { + if (opcode->GetId() == OpCode::Id::HFMA2_RR) { + ASSERT_MSG(instr.hfma2.rr.precision == Tegra::Shader::HalfPrecision::None, + "Unimplemented"); + } else { + ASSERT_MSG(instr.hfma2.precision == Tegra::Shader::HalfPrecision::None, + "Unimplemented"); + } + const bool saturate = opcode->GetId() == OpCode::Id::HFMA2_RR + ? instr.hfma2.rr.saturate != 0 + : instr.hfma2.saturate != 0; + + const std::string op_a = + GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.hfma2.type_a); + std::string op_b, op_c; + + switch (opcode->GetId()) { + case OpCode::Id::HFMA2_CR: + op_b = GetHalfFloat(regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, + GLSLRegister::Type::UnsignedInteger), + instr.hfma2.type_b, false, instr.hfma2.negate_b); + op_c = GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr39, 0, false), + instr.hfma2.type_reg39, false, instr.hfma2.negate_c); + break; + case OpCode::Id::HFMA2_RC: + op_b = GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr39, 0, false), + instr.hfma2.type_reg39, false, instr.hfma2.negate_b); + op_c = GetHalfFloat(regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, + GLSLRegister::Type::UnsignedInteger), + instr.hfma2.type_b, false, instr.hfma2.negate_c); + break; + case OpCode::Id::HFMA2_RR: + op_b = GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr20, 0, false), + instr.hfma2.type_b, false, instr.hfma2.negate_b); + op_c = GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr39, 0, false), + instr.hfma2.rr.type_c, false, instr.hfma2.rr.negate_c); + break; + case OpCode::Id::HFMA2_IMM_R: + op_b = UnpackHalfImmediate(instr, true); + op_c = GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr39, 0, false), + instr.hfma2.type_reg39, false, instr.hfma2.negate_c); + break; + default: + UNREACHABLE(); + op_c = op_b = "vec2(0)"; + break; + } + + const std::string result = '(' + op_a + " * " + op_b + " + " + op_c + ')'; + + regs.SetRegisterToHalfFloat(instr.gpr0, 0, result, instr.hfma2.merge, 1, 1, saturate); + break; + } case OpCode::Type::Conversion: { switch (opcode->GetId()) { case OpCode::Id::I2I_R: { @@ -2525,20 +2841,13 @@ private: break; } case OpCode::Type::FloatSetPredicate: { - std::string op_a = instr.fsetp.neg_a ? "-" : ""; - op_a += regs.GetRegisterAsFloat(instr.gpr8); - - if (instr.fsetp.abs_a) { - op_a = "abs(" + op_a + ')'; - } + const std::string op_a = + GetOperandAbsNeg(regs.GetRegisterAsFloat(instr.gpr8), instr.fsetp.abs_a != 0, + instr.fsetp.neg_a != 0); - std::string op_b{}; + std::string op_b; if (instr.is_b_imm) { - if (instr.fsetp.neg_b) { - // Only the immediate version of fsetp has a neg_b bit. - op_b += '-'; - } op_b += '(' + GetImmediate19(instr) + ')'; } else { if (instr.is_b_gpr) { @@ -2611,6 +2920,51 @@ private: } break; } + case OpCode::Type::HalfSetPredicate: { + ASSERT_MSG(instr.hsetp2.ftz == 0, "Unimplemented"); + + const std::string op_a = + GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.hsetp2.type_a, + instr.hsetp2.abs_a, instr.hsetp2.negate_a); + + const std::string op_b = [&]() { + switch (opcode->GetId()) { + case OpCode::Id::HSETP2_R: + return GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr20, 0, false), + instr.hsetp2.type_b, instr.hsetp2.abs_a, + instr.hsetp2.negate_b); + default: + UNREACHABLE(); + return std::string("vec2(0)"); + } + }(); + + // We can't use the constant predicate as destination. + ASSERT(instr.hsetp2.pred3 != static_cast<u64>(Pred::UnusedIndex)); + + const std::string second_pred = + GetPredicateCondition(instr.hsetp2.pred39, instr.hsetp2.neg_pred != 0); + + const std::string combiner = GetPredicateCombiner(instr.hsetp2.op); + + const std::string component_combiner = instr.hsetp2.h_and ? "&&" : "||"; + const std::string predicate = + '(' + GetPredicateComparison(instr.hsetp2.cond, op_a + ".x", op_b + ".x") + ' ' + + component_combiner + ' ' + + GetPredicateComparison(instr.hsetp2.cond, op_a + ".y", op_b + ".y") + ')'; + + // Set the primary predicate to the result of Predicate OP SecondPredicate + SetPredicate(instr.hsetp2.pred3, + '(' + predicate + ") " + combiner + " (" + second_pred + ')'); + + if (instr.hsetp2.pred0 != static_cast<u64>(Pred::UnusedIndex)) { + // Set the secondary predicate to the result of !Predicate OP SecondPredicate, + // if enabled + SetPredicate(instr.hsetp2.pred0, + "!(" + predicate + ") " + combiner + " (" + second_pred + ')'); + } + break; + } case OpCode::Type::PredicateSetRegister: { const std::string op_a = GetPredicateCondition(instr.pset.pred12, instr.pset.neg_pred12 != 0); @@ -2689,33 +3043,24 @@ private: break; } case OpCode::Type::FloatSet: { - std::string op_a = instr.fset.neg_a ? "-" : ""; - op_a += regs.GetRegisterAsFloat(instr.gpr8); - - if (instr.fset.abs_a) { - op_a = "abs(" + op_a + ')'; - } + const std::string op_a = GetOperandAbsNeg(regs.GetRegisterAsFloat(instr.gpr8), + instr.fset.abs_a != 0, instr.fset.neg_a != 0); - std::string op_b = instr.fset.neg_b ? "-" : ""; + std::string op_b; if (instr.is_b_imm) { const std::string imm = GetImmediate19(instr); - if (instr.fset.neg_imm) - op_b += "(-" + imm + ')'; - else - op_b += imm; + op_b = imm; } else { if (instr.is_b_gpr) { - op_b += regs.GetRegisterAsFloat(instr.gpr20); + op_b = regs.GetRegisterAsFloat(instr.gpr20); } else { - op_b += regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, - GLSLRegister::Type::Float); + op_b = regs.GetUniform(instr.cbuf34.index, instr.cbuf34.offset, + GLSLRegister::Type::Float); } } - if (instr.fset.abs_b) { - op_b = "abs(" + op_b + ')'; - } + op_b = GetOperandAbsNeg(op_b, instr.fset.abs_b != 0, instr.fset.neg_b != 0); // The fset instruction sets a register to 1.0 or -1 (depending on the bf bit) if the // condition is true, and to 0 otherwise. @@ -2771,6 +3116,50 @@ private: } break; } + case OpCode::Type::HalfSet: { + ASSERT_MSG(instr.hset2.ftz == 0, "Unimplemented"); + + const std::string op_a = + GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.hset2.type_a, + instr.hset2.abs_a != 0, instr.hset2.negate_a != 0); + + const std::string op_b = [&]() { + switch (opcode->GetId()) { + case OpCode::Id::HSET2_R: + return GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr20, 0, false), + instr.hset2.type_b, instr.hset2.abs_b != 0, + instr.hset2.negate_b != 0); + default: + UNREACHABLE(); + return std::string("vec2(0)"); + } + }(); + + const std::string second_pred = + GetPredicateCondition(instr.hset2.pred39, instr.hset2.neg_pred != 0); + + const std::string combiner = GetPredicateCombiner(instr.hset2.op); + + // HSET2 operates on each half float in the pack. + std::string result; + for (int i = 0; i < 2; ++i) { + const std::string float_value = i == 0 ? "0x00003c00" : "0x3c000000"; + const std::string integer_value = i == 0 ? "0x0000ffff" : "0xffff0000"; + const std::string value = instr.hset2.bf == 1 ? float_value : integer_value; + + const std::string comp = std::string(".") + "xy"[i]; + const std::string predicate = + "((" + GetPredicateComparison(instr.hset2.cond, op_a + comp, op_b + comp) + + ") " + combiner + " (" + second_pred + "))"; + + result += '(' + predicate + " ? " + value + " : 0)"; + if (i == 0) { + result += " | "; + } + } + regs.SetRegisterToInteger(instr.gpr0, false, 0, '(' + result + ')', 1, 1); + break; + } case OpCode::Type::Xmad: { ASSERT_MSG(!instr.xmad.sign_a, "Unimplemented"); ASSERT_MSG(!instr.xmad.sign_b, "Unimplemented"); @@ -2979,16 +3368,32 @@ private: // The SSY opcode tells the GPU where to re-converge divergent execution paths, it // sets the target of the jump that the SYNC instruction will make. The SSY opcode // has a similar structure to the BRA opcode. - ASSERT_MSG(instr.bra.constant_buffer == 0, "Constant buffer SSY is not supported"); + ASSERT_MSG(instr.bra.constant_buffer == 0, "Constant buffer flow is not supported"); + + const u32 target = offset + instr.bra.GetBranchTarget(); + EmitPushToFlowStack(target); + break; + } + case OpCode::Id::PBK: { + // PBK pushes to a stack the address where BRK will jump to. This shares stack with + // SSY but using SYNC on a PBK address will kill the shader execution. We don't + // emulate this because it's very unlikely a driver will emit such invalid shader. + ASSERT_MSG(instr.bra.constant_buffer == 0, "Constant buffer PBK is not supported"); const u32 target = offset + instr.bra.GetBranchTarget(); - EmitPushToSSYStack(target); + EmitPushToFlowStack(target); break; } case OpCode::Id::SYNC: { // The SYNC opcode jumps to the address previously set by the SSY opcode ASSERT(instr.flow.cond == Tegra::Shader::FlowCondition::Always); - EmitPopFromSSYStack(); + EmitPopFromFlowStack(); + break; + } + case OpCode::Id::BRK: { + // The BRK opcode jumps to the address previously set by the PBK opcode + ASSERT(instr.flow.cond == Tegra::Shader::FlowCondition::Always); + EmitPopFromFlowStack(); break; } case OpCode::Id::DEPBAR: { @@ -2998,87 +3403,51 @@ private: break; } case OpCode::Id::VMAD: { - const bool signed_a = instr.vmad.signed_a == 1; - const bool signed_b = instr.vmad.signed_b == 1; - const bool result_signed = signed_a || signed_b; - boost::optional<std::string> forced_result; - - auto Unpack = [&](const std::string& op, bool is_chunk, bool is_signed, - Tegra::Shader::VmadType type, u64 byte_height) { - const std::string value = [&]() { - if (!is_chunk) { - const auto offset = static_cast<u32>(byte_height * 8); - return "((" + op + " >> " + std::to_string(offset) + ") & 0xff)"; - } - const std::string zero = "0"; - - switch (type) { - case Tegra::Shader::VmadType::Size16_Low: - return '(' + op + " & 0xffff)"; - case Tegra::Shader::VmadType::Size16_High: - return '(' + op + " >> 16)"; - case Tegra::Shader::VmadType::Size32: - // TODO(Rodrigo): From my hardware tests it becomes a bit "mad" when - // this type is used (1 * 1 + 0 == 0x5b800000). Until a better - // explanation is found: assert. - UNREACHABLE_MSG("Unimplemented"); - return zero; - case Tegra::Shader::VmadType::Invalid: - // Note(Rodrigo): This flag is invalid according to nvdisasm. From my - // testing (even though it's invalid) this makes the whole instruction - // assign zero to target register. - forced_result = boost::make_optional(zero); - return zero; - default: - UNREACHABLE(); - return zero; - } - }(); - - if (is_signed) { - return "int(" + value + ')'; - } - return value; - }; - - const std::string op_a = Unpack(regs.GetRegisterAsInteger(instr.gpr8, 0, false), - instr.vmad.is_byte_chunk_a != 0, signed_a, - instr.vmad.type_a, instr.vmad.byte_height_a); - - std::string op_b; - if (instr.vmad.use_register_b) { - op_b = Unpack(regs.GetRegisterAsInteger(instr.gpr20, 0, false), - instr.vmad.is_byte_chunk_b != 0, signed_b, instr.vmad.type_b, - instr.vmad.byte_height_b); - } else { - op_b = '(' + - std::to_string(signed_b ? static_cast<s16>(instr.alu.GetImm20_16()) - : instr.alu.GetImm20_16()) + - ')'; - } - + const bool result_signed = instr.video.signed_a == 1 || instr.video.signed_b == 1; + const std::string op_a = GetVideoOperandA(instr); + const std::string op_b = GetVideoOperandB(instr); const std::string op_c = regs.GetRegisterAsInteger(instr.gpr39, 0, result_signed); - std::string result; - if (forced_result) { - result = *forced_result; - } else { - result = '(' + op_a + " * " + op_b + " + " + op_c + ')'; + std::string result = '(' + op_a + " * " + op_b + " + " + op_c + ')'; - switch (instr.vmad.shr) { - case Tegra::Shader::VmadShr::Shr7: - result = '(' + result + " >> 7)"; - break; - case Tegra::Shader::VmadShr::Shr15: - result = '(' + result + " >> 15)"; - break; - } + switch (instr.vmad.shr) { + case Tegra::Shader::VmadShr::Shr7: + result = '(' + result + " >> 7)"; + break; + case Tegra::Shader::VmadShr::Shr15: + result = '(' + result + " >> 15)"; + break; } + regs.SetRegisterToInteger(instr.gpr0, result_signed, 1, result, 1, 1, instr.vmad.saturate == 1, 0, Register::Size::Word, instr.vmad.cc); break; } + case OpCode::Id::VSETP: { + const std::string op_a = GetVideoOperandA(instr); + const std::string op_b = GetVideoOperandB(instr); + + // We can't use the constant predicate as destination. + ASSERT(instr.vsetp.pred3 != static_cast<u64>(Pred::UnusedIndex)); + + const std::string second_pred = GetPredicateCondition(instr.vsetp.pred39, false); + + const std::string combiner = GetPredicateCombiner(instr.vsetp.op); + + const std::string predicate = GetPredicateComparison(instr.vsetp.cond, op_a, op_b); + // Set the primary predicate to the result of Predicate OP SecondPredicate + SetPredicate(instr.vsetp.pred3, + '(' + predicate + ") " + combiner + " (" + second_pred + ')'); + + if (instr.vsetp.pred0 != static_cast<u64>(Pred::UnusedIndex)) { + // Set the secondary predicate to the result of !Predicate OP SecondPredicate, + // if enabled + SetPredicate(instr.vsetp.pred0, + "!(" + predicate + ") " + combiner + " (" + second_pred + ')'); + } + break; + } default: { LOG_CRITICAL(HW_GPU, "Unhandled instruction: {}", opcode->GetName()); UNREACHABLE(); @@ -3142,11 +3511,11 @@ private: labels.insert(subroutine.begin); shader.AddLine("uint jmp_to = " + std::to_string(subroutine.begin) + "u;"); - // TODO(Subv): Figure out the actual depth of the SSY stack, for now it seems - // unlikely that shaders will use 20 nested SSYs. - constexpr u32 SSY_STACK_SIZE = 20; - shader.AddLine("uint ssy_stack[" + std::to_string(SSY_STACK_SIZE) + "];"); - shader.AddLine("uint ssy_stack_top = 0u;"); + // TODO(Subv): Figure out the actual depth of the flow stack, for now it seems + // unlikely that shaders will use 20 nested SSYs and PBKs. + constexpr u32 FLOW_STACK_SIZE = 20; + shader.AddLine("uint flow_stack[" + std::to_string(FLOW_STACK_SIZE) + "];"); + shader.AddLine("uint flow_stack_top = 0u;"); shader.AddLine("while (true) {"); ++shader.scope; @@ -3213,7 +3582,7 @@ private: // Declarations std::set<std::string> declr_predicates; -}; // namespace Decompiler +}; // namespace OpenGL::GLShader::Decompiler std::string GetCommonDeclarations() { return fmt::format("#define MAX_CONSTBUFFER_ELEMENTS {}\n", diff --git a/src/video_core/renderer_opengl/gl_shader_gen.cpp b/src/video_core/renderer_opengl/gl_shader_gen.cpp index 1e5eb32df..dfb562706 100644 --- a/src/video_core/renderer_opengl/gl_shader_gen.cpp +++ b/src/video_core/renderer_opengl/gl_shader_gen.cpp @@ -19,14 +19,14 @@ ProgramResult GenerateVertexShader(const ShaderSetup& setup) { out += Decompiler::GetCommonDeclarations(); out += R"( -out gl_PerVertex { - vec4 gl_Position; -}; + +layout (location = 0) out vec4 position; layout(std140) uniform vs_config { vec4 viewport_flip; uvec4 instance_id; uvec4 flip_stage; + uvec4 alpha_test; }; )"; @@ -96,10 +96,14 @@ out gl_PerVertex { vec4 gl_Position; }; +layout (location = 0) in vec4 gs_position[]; +layout (location = 0) out vec4 position; + layout (std140) uniform gs_config { vec4 viewport_flip; uvec4 instance_id; uvec4 flip_stage; + uvec4 alpha_test; }; void main() { @@ -131,12 +135,39 @@ layout(location = 5) out vec4 FragColor5; layout(location = 6) out vec4 FragColor6; layout(location = 7) out vec4 FragColor7; +layout (location = 0) in vec4 position; + layout (std140) uniform fs_config { vec4 viewport_flip; uvec4 instance_id; uvec4 flip_stage; + uvec4 alpha_test; }; +bool AlphaFunc(in float value) { + float ref = uintBitsToFloat(alpha_test[2]); + switch (alpha_test[1]) { + case 1: + return false; + case 2: + return value < ref; + case 3: + return value == ref; + case 4: + return value <= ref; + case 5: + return value > ref; + case 6: + return value != ref; + case 7: + return value >= ref; + case 8: + return true; + default: + return false; + } +} + void main() { exec_fragment(); } @@ -145,4 +176,4 @@ void main() { out += program.first; return {out, program.second}; } -} // namespace OpenGL::GLShader
\ No newline at end of file +} // namespace OpenGL::GLShader diff --git a/src/video_core/renderer_opengl/gl_shader_gen.h b/src/video_core/renderer_opengl/gl_shader_gen.h index 79596087a..520b9d4e3 100644 --- a/src/video_core/renderer_opengl/gl_shader_gen.h +++ b/src/video_core/renderer_opengl/gl_shader_gen.h @@ -16,6 +16,8 @@ namespace OpenGL::GLShader { constexpr std::size_t MAX_PROGRAM_CODE_LENGTH{0x1000}; using ProgramCode = std::vector<u64>; +enum : u32 { POSITION_VARYING_LOCATION = 0, GENERIC_VARYING_START_LOCATION = 1 }; + class ConstBufferEntry { using Maxwell = Tegra::Engines::Maxwell3D::Regs; diff --git a/src/video_core/renderer_opengl/gl_shader_manager.cpp b/src/video_core/renderer_opengl/gl_shader_manager.cpp index 010857ec6..8b8869ecb 100644 --- a/src/video_core/renderer_opengl/gl_shader_manager.cpp +++ b/src/video_core/renderer_opengl/gl_shader_manager.cpp @@ -16,6 +16,17 @@ void MaxwellUniformData::SetFromRegs(const Maxwell3D::State::ShaderStageInfo& sh viewport_flip[0] = regs.viewport_transform[0].scale_x < 0.0 ? -1.0f : 1.0f; viewport_flip[1] = regs.viewport_transform[0].scale_y < 0.0 ? -1.0f : 1.0f; + u32 func = static_cast<u32>(regs.alpha_test_func); + // Normalize the gl variants of opCompare to be the same as the normal variants + u32 op_gl_variant_base = static_cast<u32>(Tegra::Engines::Maxwell3D::Regs::ComparisonOp::Never); + if (func >= op_gl_variant_base) { + func = func - op_gl_variant_base + 1U; + } + + alpha_test.enabled = regs.alpha_test_enabled; + alpha_test.func = func; + alpha_test.ref = regs.alpha_test_ref; + // We only assign the instance to the first component of the vector, the rest is just padding. instance_id[0] = state.current_instance; diff --git a/src/video_core/renderer_opengl/gl_shader_manager.h b/src/video_core/renderer_opengl/gl_shader_manager.h index b3a191cf2..36fe1f04c 100644 --- a/src/video_core/renderer_opengl/gl_shader_manager.h +++ b/src/video_core/renderer_opengl/gl_shader_manager.h @@ -22,8 +22,14 @@ struct MaxwellUniformData { alignas(16) GLvec4 viewport_flip; alignas(16) GLuvec4 instance_id; alignas(16) GLuvec4 flip_stage; + struct alignas(16) { + GLuint enabled; + GLuint func; + GLfloat ref; + GLuint padding; + } alpha_test; }; -static_assert(sizeof(MaxwellUniformData) == 48, "MaxwellUniformData structure size is incorrect"); +static_assert(sizeof(MaxwellUniformData) == 64, "MaxwellUniformData structure size is incorrect"); static_assert(sizeof(MaxwellUniformData) < 16384, "MaxwellUniformData structure must be less than 16kb as per the OpenGL spec"); diff --git a/src/video_core/renderer_opengl/gl_state.cpp b/src/video_core/renderer_opengl/gl_state.cpp index 1fe26a2a9..ba6c6919a 100644 --- a/src/video_core/renderer_opengl/gl_state.cpp +++ b/src/video_core/renderer_opengl/gl_state.cpp @@ -21,6 +21,8 @@ OpenGLState::OpenGLState() { depth.test_enabled = false; depth.test_func = GL_LESS; depth.write_mask = GL_TRUE; + depth.depth_range_near = 0.0f; + depth.depth_range_far = 1.0f; color_mask.red_enabled = GL_TRUE; color_mask.green_enabled = GL_TRUE; @@ -119,6 +121,12 @@ void OpenGLState::Apply() const { glDepthMask(depth.write_mask); } + // Depth range + if (depth.depth_range_near != cur_state.depth.depth_range_near || + depth.depth_range_far != cur_state.depth.depth_range_far) { + glDepthRange(depth.depth_range_near, depth.depth_range_far); + } + // Color mask if (color_mask.red_enabled != cur_state.color_mask.red_enabled || color_mask.green_enabled != cur_state.color_mask.green_enabled || diff --git a/src/video_core/renderer_opengl/gl_state.h b/src/video_core/renderer_opengl/gl_state.h index dc21a2ee3..daf7eb533 100644 --- a/src/video_core/renderer_opengl/gl_state.h +++ b/src/video_core/renderer_opengl/gl_state.h @@ -42,9 +42,11 @@ public: } cull; struct { - bool test_enabled; // GL_DEPTH_TEST - GLenum test_func; // GL_DEPTH_FUNC - GLboolean write_mask; // GL_DEPTH_WRITEMASK + bool test_enabled; // GL_DEPTH_TEST + GLenum test_func; // GL_DEPTH_FUNC + GLboolean write_mask; // GL_DEPTH_WRITEMASK + GLfloat depth_range_near; // GL_DEPTH_RANGE + GLfloat depth_range_far; // GL_DEPTH_RANGE } depth; struct { diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h index 3c3bcaae4..0f6dcab2b 100644 --- a/src/video_core/renderer_opengl/maxwell_to_gl.h +++ b/src/video_core/renderer_opengl/maxwell_to_gl.h @@ -82,8 +82,20 @@ inline GLenum VertexType(Maxwell::VertexAttribute attrib) { return {}; } - case Maxwell::VertexAttribute::Type::Float: - return GL_FLOAT; + case Maxwell::VertexAttribute::Type::Float: { + switch (attrib.size) { + case Maxwell::VertexAttribute::Size::Size_16: + case Maxwell::VertexAttribute::Size::Size_16_16: + case Maxwell::VertexAttribute::Size::Size_16_16_16: + case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + return GL_HALF_FLOAT; + case Maxwell::VertexAttribute::Size::Size_32: + case Maxwell::VertexAttribute::Size::Size_32_32: + case Maxwell::VertexAttribute::Size::Size_32_32_32: + case Maxwell::VertexAttribute::Size::Size_32_32_32_32: + return GL_FLOAT; + } + } } LOG_CRITICAL(Render_OpenGL, "Unimplemented vertex type={}", attrib.TypeString()); diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp index 18ab723f7..550ca856c 100644 --- a/src/video_core/textures/decoders.cpp +++ b/src/video_core/textures/decoders.cpp @@ -142,7 +142,6 @@ void SwizzledData(u8* swizzled_data, u8* unswizzled_data, const bool unswizzle, const u32 blocks_on_x = div_ceil(width, block_x_elements); const u32 blocks_on_y = div_ceil(height, block_y_elements); const u32 blocks_on_z = div_ceil(depth, block_z_elements); - const u32 blocks = blocks_on_x * blocks_on_y * blocks_on_z; const u32 gob_size = gob_x_bytes * gob_elements_y * gob_elements_z; const u32 xy_block_size = gob_size * block_height; const u32 block_size = xy_block_size * block_depth; @@ -237,6 +236,46 @@ std::vector<u8> UnswizzleTexture(VAddr address, u32 tile_size, u32 bytes_per_pix return unswizzled_data; } +void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, + u32 bytes_per_pixel, VAddr swizzled_data, VAddr unswizzled_data, + u32 block_height) { + const u32 image_width_in_gobs{(swizzled_width * bytes_per_pixel + 63) / 64}; + for (u32 line = 0; line < subrect_height; ++line) { + const u32 gob_address_y = + (line / (8 * block_height)) * 512 * block_height * image_width_in_gobs + + (line % (8 * block_height) / 8) * 512; + const auto& table = legacy_swizzle_table[line % 8]; + for (u32 x = 0; x < subrect_width; ++x) { + const u32 gob_address = gob_address_y + (x * bytes_per_pixel / 64) * 512 * block_height; + const u32 swizzled_offset = gob_address + table[(x * bytes_per_pixel) % 64]; + const VAddr source_line = unswizzled_data + line * source_pitch + x * bytes_per_pixel; + const VAddr dest_addr = swizzled_data + swizzled_offset; + + Memory::CopyBlock(dest_addr, source_line, bytes_per_pixel); + } + } +} + +void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32 swizzled_width, + u32 bytes_per_pixel, VAddr swizzled_data, VAddr unswizzled_data, + u32 block_height, u32 offset_x, u32 offset_y) { + for (u32 line = 0; line < subrect_height; ++line) { + const u32 y2 = line + offset_y; + const u32 gob_address_y = + (y2 / (8 * block_height)) * 512 * block_height + (y2 % (8 * block_height) / 8) * 512; + const auto& table = legacy_swizzle_table[y2 % 8]; + for (u32 x = 0; x < subrect_width; ++x) { + const u32 x2 = (x + offset_x) * bytes_per_pixel; + const u32 gob_address = gob_address_y + (x2 / 64) * 512 * block_height; + const u32 swizzled_offset = gob_address + table[x2 % 64]; + const VAddr dest_line = unswizzled_data + line * dest_pitch + x * bytes_per_pixel; + const VAddr source_addr = swizzled_data + swizzled_offset; + + Memory::CopyBlock(dest_line, source_addr, bytes_per_pixel); + } + } +} + std::vector<u8> DecodeTexture(const std::vector<u8>& texture_data, TextureFormat format, u32 width, u32 height) { std::vector<u8> rgba_data; @@ -280,13 +319,13 @@ std::vector<u8> DecodeTexture(const std::vector<u8>& texture_data, TextureFormat std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth) { if (tiled) { - const u32 gobs_in_x = 64 / bytes_per_pixel; + const u32 gobs_in_x = 64; const u32 gobs_in_y = 8; const u32 gobs_in_z = 1; - const u32 aligned_width = Common::AlignUp(width, gobs_in_x); + const u32 aligned_width = Common::AlignUp(width * bytes_per_pixel, gobs_in_x); const u32 aligned_height = Common::AlignUp(height, gobs_in_y * block_height); const u32 aligned_depth = Common::AlignUp(depth, gobs_in_z * block_depth); - return aligned_width * aligned_height * aligned_depth * bytes_per_pixel; + return aligned_width * aligned_height * aligned_depth; } else { return width * height * depth * bytes_per_pixel; } diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h index aaf316947..4726f54a5 100644 --- a/src/video_core/textures/decoders.h +++ b/src/video_core/textures/decoders.h @@ -35,4 +35,13 @@ std::vector<u8> DecodeTexture(const std::vector<u8>& texture_data, TextureFormat std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth); +/// Copies an untiled subrectangle into a tiled surface. +void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, + u32 bytes_per_pixel, VAddr swizzled_data, VAddr unswizzled_data, + u32 block_height); +/// Copies a tiled subrectangle into a linear surface. +void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32 swizzled_width, + u32 bytes_per_pixel, VAddr swizzled_data, VAddr unswizzled_data, + u32 block_height, u32 offset_x, u32 offset_y); + } // namespace Tegra::Texture |
