From eaff1030de07f3739794207403ea833ee91c0034 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Wed, 19 May 2021 21:58:32 -0400 Subject: glsl: Initial backend --- src/shader_recompiler/backend/glsl/emit_glsl_image.cpp | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 src/shader_recompiler/backend/glsl/emit_glsl_image.cpp (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp new file mode 100644 index 000000000..e69de29bb -- cgit v1.2.3 From 3d086e6130a2c5f0546ccef3b234c65ef2f0c99b Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Wed, 26 May 2021 00:16:20 -0400 Subject: glsl: Implement some attribute getters and setters --- .../backend/glsl/emit_glsl_image.cpp | 205 +++++++++++++++++++++ 1 file changed, 205 insertions(+) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index e69de29bb..109938e0e 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -0,0 +1,205 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include + +#include "shader_recompiler/backend/glsl/emit_context.h" +#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h" +#include "shader_recompiler/frontend/ir/value.h" +#include "shader_recompiler/profile.h" + +namespace Shader::Backend::GLSL { + +void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, + [[maybe_unused]] std::string_view coords, + [[maybe_unused]] std::string_view bias_lc, + [[maybe_unused]] const IR::Value& offset) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, + [[maybe_unused]] std::string_view coords, + [[maybe_unused]] std::string_view lod_lc, + [[maybe_unused]] const IR::Value& offset) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitImageSampleDrefImplicitLod([[maybe_unused]] EmitContext& ctx, + [[maybe_unused]] IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, + [[maybe_unused]] std::string_view coords, + [[maybe_unused]] std::string_view dref, + [[maybe_unused]] std::string_view bias_lc, + [[maybe_unused]] const IR::Value& offset) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitImageSampleDrefExplicitLod([[maybe_unused]] EmitContext& ctx, + [[maybe_unused]] IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, + [[maybe_unused]] std::string_view coords, + [[maybe_unused]] std::string_view dref, + [[maybe_unused]] std::string_view lod_lc, + [[maybe_unused]] const IR::Value& offset) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, + [[maybe_unused]] std::string_view coords, + [[maybe_unused]] const IR::Value& offset, + [[maybe_unused]] const IR::Value& offset2) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitImageGatherDref([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, + [[maybe_unused]] std::string_view coords, + [[maybe_unused]] const IR::Value& offset, + [[maybe_unused]] const IR::Value& offset2, + [[maybe_unused]] std::string_view dref) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitImageFetch([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, + [[maybe_unused]] std::string_view coords, + [[maybe_unused]] std::string_view offset, [[maybe_unused]] std::string_view lod, + [[maybe_unused]] std::string_view ms) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitImageQueryDimensions([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, + [[maybe_unused]] std::string_view lod) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitImageQueryLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, + [[maybe_unused]] std::string_view coords) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitImageGradient([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, + [[maybe_unused]] std::string_view coords, + [[maybe_unused]] std::string_view derivates, + [[maybe_unused]] std::string_view offset, + [[maybe_unused]] std::string_view lod_clamp) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitImageRead([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, + [[maybe_unused]] std::string_view coords) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitImageWrite([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, + [[maybe_unused]] std::string_view coords, + [[maybe_unused]] std::string_view color) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBindlessImageSampleImplicitLod(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBindlessImageSampleExplicitLod(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBindlessImageSampleDrefImplicitLod(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBindlessImageSampleDrefExplicitLod(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBindlessImageGather(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBindlessImageGatherDref(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBindlessImageFetch(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBindlessImageQueryDimensions(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBindlessImageQueryLod(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBindlessImageGradient(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBindlessImageRead(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBindlessImageWrite(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBoundImageSampleImplicitLod(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBoundImageSampleExplicitLod(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBoundImageSampleDrefImplicitLod(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBoundImageSampleDrefExplicitLod(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBoundImageGather(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBoundImageGatherDref(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBoundImageFetch(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBoundImageQueryDimensions(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBoundImageQueryLod(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBoundImageGradient(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBoundImageRead(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +void EmitBoundImageWrite(EmitContext&) { + throw NotImplementedException("GLSL Instruction"); +} + +} // namespace Shader::Backend::GLSL -- cgit v1.2.3 From d171083d53e106c8c5131522fdc81d51360c562d Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Wed, 26 May 2021 21:18:17 -0400 Subject: glsl: textures wip --- .../backend/glsl/emit_glsl_image.cpp | 24 +++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index 109938e0e..cc5afc048 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -6,17 +6,39 @@ #include "shader_recompiler/backend/glsl/emit_context.h" #include "shader_recompiler/backend/glsl/emit_glsl_instructions.h" +#include "shader_recompiler/frontend/ir/modifiers.h" #include "shader_recompiler/frontend/ir/value.h" #include "shader_recompiler/profile.h" namespace Shader::Backend::GLSL { +namespace { +std::string Texture(EmitContext& ctx, IR::TextureInstInfo info, + [[maybe_unused]] const IR::Value& index) { + if (info.type == TextureType::Buffer) { + throw NotImplementedException("TextureType::Buffer"); + } else { + return fmt::format("tex{}", ctx.texture_bindings.at(info.descriptor_index)); + } +} +} // namespace void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, [[maybe_unused]] const IR::Value& index, [[maybe_unused]] std::string_view coords, [[maybe_unused]] std::string_view bias_lc, [[maybe_unused]] const IR::Value& offset) { - throw NotImplementedException("GLSL Instruction"); + const auto info{inst.Flags()}; + if (info.has_bias) { + throw NotImplementedException("Bias texture samples"); + } + if (info.has_lod_clamp) { + throw NotImplementedException("Lod clamp samples"); + } + if (!offset.IsEmpty()) { + throw NotImplementedException("Offset"); + } + const auto texture{Texture(ctx, info, index)}; + ctx.AddF32x4("{}=texture({},{});", inst, texture, coords); } void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, -- cgit v1.2.3 From a752ec88d06c6bcfb13605447a164c6b6915ed6e Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Thu, 27 May 2021 20:31:03 -0400 Subject: glsl: Implement derivatives and YDirection plus some other misc additions/changed --- src/shader_recompiler/backend/glsl/emit_glsl_image.cpp | 3 +++ 1 file changed, 3 insertions(+) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index cc5afc048..1a348b117 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -37,6 +37,9 @@ void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse if (!offset.IsEmpty()) { throw NotImplementedException("Offset"); } + if (info.type != TextureType::Color2D) { + throw NotImplementedException("Texture type: {}", info.type.Value()); + } const auto texture{Texture(ctx, info, index)}; ctx.AddF32x4("{}=texture({},{});", inst, texture, coords); } -- cgit v1.2.3 From 6674637853009115833e132efce19c8e210f0471 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Thu, 27 May 2021 20:37:56 -0400 Subject: glsl: remove unused headers --- src/shader_recompiler/backend/glsl/emit_glsl_image.cpp | 2 -- 1 file changed, 2 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index 1a348b117..6b7f1eaad 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -5,10 +5,8 @@ #include #include "shader_recompiler/backend/glsl/emit_context.h" -#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h" #include "shader_recompiler/frontend/ir/modifiers.h" #include "shader_recompiler/frontend/ir/value.h" -#include "shader_recompiler/profile.h" namespace Shader::Backend::GLSL { namespace { -- cgit v1.2.3 From 2a713337165df4d5c4228458999a680e9ab65369 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Thu, 27 May 2021 22:28:33 -0400 Subject: glsl: Fix bindings, add some CC ops --- src/shader_recompiler/backend/glsl/emit_glsl_image.cpp | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index 6b7f1eaad..c070fba0e 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -32,14 +32,13 @@ void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse if (info.has_lod_clamp) { throw NotImplementedException("Lod clamp samples"); } + const auto texture{Texture(ctx, info, index)}; if (!offset.IsEmpty()) { - throw NotImplementedException("Offset"); - } - if (info.type != TextureType::Color2D) { - throw NotImplementedException("Texture type: {}", info.type.Value()); + ctx.AddF32x4("{}=textureOffset({},{},ivec2({}));", inst, texture, coords, + ctx.reg_alloc.Consume(offset)); + } else { + ctx.AddF32x4("{}=texture({},{});", inst, texture, coords); } - const auto texture{Texture(ctx, info, index)}; - ctx.AddF32x4("{}=texture({},{});", inst, texture, coords); } void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, -- cgit v1.2.3 From 453cd25da57e4088826cb6df48b5b6856affe109 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Fri, 28 May 2021 13:55:07 -0400 Subject: glsl: SSBO access fixes and wip SampleExplicitLod implementation. --- src/shader_recompiler/backend/glsl/emit_glsl_image.cpp | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index c070fba0e..1a34fe9b3 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -46,7 +46,20 @@ void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse [[maybe_unused]] std::string_view coords, [[maybe_unused]] std::string_view lod_lc, [[maybe_unused]] const IR::Value& offset) { - throw NotImplementedException("GLSL Instruction"); + const auto info{inst.Flags()}; + if (info.has_bias) { + throw NotImplementedException("Bias texture samples"); + } + if (info.has_lod_clamp) { + throw NotImplementedException("Lod clamp samples"); + } + const auto texture{Texture(ctx, info, index)}; + if (!offset.IsEmpty()) { + ctx.AddF32x4("{}=textureLodOffset({},{},{},ivec2({}));", inst, texture, coords, lod_lc, + ctx.reg_alloc.Consume(offset)); + } else { + ctx.AddF32x4("{}=textureLod({},{},{});", inst, texture, coords, lod_lc); + } } void EmitImageSampleDrefImplicitLod([[maybe_unused]] EmitContext& ctx, -- cgit v1.2.3 From 55e0211a5e520482246273f2cc64388c4b4eff1c Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Sat, 29 May 2021 01:06:29 -0400 Subject: glsl: Implement TEX ImageSample functions --- .../backend/glsl/emit_glsl_image.cpp | 71 +++++++++++++++++++--- 1 file changed, 61 insertions(+), 10 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index 1a34fe9b3..71eb3ac2b 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -10,7 +10,7 @@ namespace Shader::Backend::GLSL { namespace { -std::string Texture(EmitContext& ctx, IR::TextureInstInfo info, +std::string Texture(EmitContext& ctx, const IR::TextureInstInfo& info, [[maybe_unused]] const IR::Value& index) { if (info.type == TextureType::Buffer) { throw NotImplementedException("TextureType::Buffer"); @@ -18,6 +18,32 @@ std::string Texture(EmitContext& ctx, IR::TextureInstInfo info, return fmt::format("tex{}", ctx.texture_bindings.at(info.descriptor_index)); } } + +std::string CastToIntVec(std::string_view value, const IR::TextureInstInfo& info) { + switch (info.type) { + case TextureType::Color1D: + return fmt::format("int({})", value); + case TextureType::ColorArray1D: + case TextureType::Color2D: + return fmt::format("ivec2({})", value); + case TextureType::ColorArray2D: + case TextureType::Color3D: + case TextureType::ColorCube: + return fmt::format("ivec3({})", value); + case TextureType::ColorArrayCube: + return fmt::format("ivec4({})", value); + default: + throw NotImplementedException("Offset type {}", info.type.Value()); + } +} + +IR::Inst* PrepareSparse(IR::Inst& inst) { + const auto sparse_inst{inst.GetAssociatedPseudoOperation(IR::Opcode::GetSparseFromOp)}; + if (sparse_inst) { + sparse_inst->Invalidate(); + } + return sparse_inst; +} } // namespace void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, @@ -26,18 +52,30 @@ void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse [[maybe_unused]] std::string_view bias_lc, [[maybe_unused]] const IR::Value& offset) { const auto info{inst.Flags()}; - if (info.has_bias) { - throw NotImplementedException("Bias texture samples"); - } if (info.has_lod_clamp) { throw NotImplementedException("Lod clamp samples"); } const auto texture{Texture(ctx, info, index)}; + const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""}; + const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)}; + const auto sparse_inst{PrepareSparse(inst)}; + if (!sparse_inst) { + if (!offset.IsEmpty()) { + ctx.Add("{}=textureOffset({},{},{}{});", texel, texture, coords, + CastToIntVec(ctx.reg_alloc.Consume(offset), info), bias); + } else { + ctx.Add("{}=texture({},{}{});", texel, texture, coords, bias); + } + return; + } + // TODO: Query sparseTexels extension support if (!offset.IsEmpty()) { - ctx.AddF32x4("{}=textureOffset({},{},ivec2({}));", inst, texture, coords, - ctx.reg_alloc.Consume(offset)); + ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureOffsetARB({},{},{},{}{}));", + *sparse_inst, texture, coords, CastToIntVec(ctx.reg_alloc.Consume(offset), info), + texel, bias); } else { - ctx.AddF32x4("{}=texture({},{});", inst, texture, coords); + ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureARB({},{},{}{}));", *sparse_inst, + texture, coords, texel, bias); } } @@ -54,11 +92,24 @@ void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse throw NotImplementedException("Lod clamp samples"); } const auto texture{Texture(ctx, info, index)}; + const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)}; + const auto sparse_inst{PrepareSparse(inst)}; + if (!sparse_inst) { + if (!offset.IsEmpty()) { + ctx.Add("{}=textureLodOffset({},{},{},{});", texel, texture, coords, lod_lc, + CastToIntVec(ctx.reg_alloc.Consume(offset), info)); + } else { + ctx.Add("{}=textureLod({},{},{});", texel, texture, coords, lod_lc); + } + return; + } if (!offset.IsEmpty()) { - ctx.AddF32x4("{}=textureLodOffset({},{},{},ivec2({}));", inst, texture, coords, lod_lc, - ctx.reg_alloc.Consume(offset)); + ctx.AddU1("{}=sparseTexelsResidentARB(sparseTexelFetchOffsetARB({},{},int({}),{},{}));", + *sparse_inst, texture, CastToIntVec(coords, info), lod_lc, + CastToIntVec(ctx.reg_alloc.Consume(offset), info), texel); } else { - ctx.AddF32x4("{}=textureLod({},{},{});", inst, texture, coords, lod_lc); + ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureLodARB({},{},{},{}));", *sparse_inst, + texture, coords, lod_lc, texel); } } -- cgit v1.2.3 From 7619b7d427437cb58df0f9fc57a7d6b3f5c45f9c Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Sat, 29 May 2021 01:53:32 -0400 Subject: glsl: Implement TEX depth functions --- .../backend/glsl/emit_glsl_image.cpp | 26 ++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index 71eb3ac2b..4381ed351 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -120,7 +120,17 @@ void EmitImageSampleDrefImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view dref, [[maybe_unused]] std::string_view bias_lc, [[maybe_unused]] const IR::Value& offset) { - throw NotImplementedException("GLSL Instruction"); + const auto info{inst.Flags()}; + if (info.has_bias) { + throw NotImplementedException("Bias texture samples"); + } + if (info.has_lod_clamp) { + throw NotImplementedException("Lod clamp samples"); + } + const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""}; + const auto texture{Texture(ctx, info, index)}; + const auto vec_cast{info.type == TextureType::ColorArrayCube ? "vec4" : "vec3"}; + ctx.AddF32("{}=texture({},{}({},{}){});", inst, texture, vec_cast, dref, coords, bias); } void EmitImageSampleDrefExplicitLod([[maybe_unused]] EmitContext& ctx, @@ -130,7 +140,19 @@ void EmitImageSampleDrefExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view dref, [[maybe_unused]] std::string_view lod_lc, [[maybe_unused]] const IR::Value& offset) { - throw NotImplementedException("GLSL Instruction"); + const auto info{inst.Flags()}; + if (info.has_bias) { + throw NotImplementedException("Bias texture samples"); + } + if (info.has_lod_clamp) { + throw NotImplementedException("Lod clamp samples"); + } + const auto texture{Texture(ctx, info, index)}; + if (info.type == TextureType::ColorArrayCube) { + ctx.AddF32("{}=textureLod({},{},{},{});", inst, texture, coords, dref, lod_lc); + } else { + ctx.AddF32("{}=textureLod({},vec3({},{}),{});", inst, texture, coords, dref, lod_lc); + } } void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, -- cgit v1.2.3 From c9a25855bc208c0bd878f430c8d9fa6e6df44e46 Mon Sep 17 00:00:00 2001 From: lat9nq <22451773+lat9nq@users.noreply.github.com> Date: Sat, 29 May 2021 02:09:29 -0400 Subject: shader_recompiler: GCC fixes --- src/shader_recompiler/backend/glsl/emit_glsl_image.cpp | 1 + 1 file changed, 1 insertion(+) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index 4381ed351..6962f2b91 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -5,6 +5,7 @@ #include #include "shader_recompiler/backend/glsl/emit_context.h" +#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h" #include "shader_recompiler/frontend/ir/modifiers.h" #include "shader_recompiler/frontend/ir/value.h" -- cgit v1.2.3 From 59a692e9edf385d56f84f38006cf15fff4372d6b Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Sat, 29 May 2021 14:10:24 -0400 Subject: glsl: Cleanup texture functions --- .../backend/glsl/emit_glsl_image.cpp | 24 ++++++++++------------ 1 file changed, 11 insertions(+), 13 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index 6962f2b91..68701ee52 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -26,8 +26,8 @@ std::string CastToIntVec(std::string_view value, const IR::TextureInstInfo& info return fmt::format("int({})", value); case TextureType::ColorArray1D: case TextureType::Color2D: - return fmt::format("ivec2({})", value); case TextureType::ColorArray2D: + return fmt::format("ivec2({})", value); case TextureType::Color3D: case TextureType::ColorCube: return fmt::format("ivec3({})", value); @@ -65,7 +65,11 @@ void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse ctx.Add("{}=textureOffset({},{},{}{});", texel, texture, coords, CastToIntVec(ctx.reg_alloc.Consume(offset), info), bias); } else { - ctx.Add("{}=texture({},{}{});", texel, texture, coords, bias); + if (ctx.stage == Stage::Fragment) { + ctx.Add("{}=texture({},{}{});", texel, texture, coords, bias); + } else { + ctx.Add("{}=textureLod({},{},0.0);", texel, texture, coords); + } } return; } @@ -104,6 +108,7 @@ void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse } return; } + // TODO: Query sparseTexels extension support if (!offset.IsEmpty()) { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTexelFetchOffsetARB({},{},int({}),{},{}));", *sparse_inst, texture, CastToIntVec(coords, info), lod_lc, @@ -121,17 +126,7 @@ void EmitImageSampleDrefImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view dref, [[maybe_unused]] std::string_view bias_lc, [[maybe_unused]] const IR::Value& offset) { - const auto info{inst.Flags()}; - if (info.has_bias) { - throw NotImplementedException("Bias texture samples"); - } - if (info.has_lod_clamp) { - throw NotImplementedException("Lod clamp samples"); - } - const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""}; - const auto texture{Texture(ctx, info, index)}; - const auto vec_cast{info.type == TextureType::ColorArrayCube ? "vec4" : "vec3"}; - ctx.AddF32("{}=texture({},{}({},{}){});", inst, texture, vec_cast, dref, coords, bias); + throw NotImplementedException("GLSL Instruction"); } void EmitImageSampleDrefExplicitLod([[maybe_unused]] EmitContext& ctx, @@ -148,6 +143,9 @@ void EmitImageSampleDrefExplicitLod([[maybe_unused]] EmitContext& ctx, if (info.has_lod_clamp) { throw NotImplementedException("Lod clamp samples"); } + if (!offset.IsEmpty()) { + throw NotImplementedException("textureLodOffset"); + } const auto texture{Texture(ctx, info, index)}; if (info.type == TextureType::ColorArrayCube) { ctx.AddF32("{}=textureLod({},{},{},{});", inst, texture, coords, dref, lod_lc); -- cgit v1.2.3 From e4ba75570570007d4c85d6d28a4f890ce58b02e8 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Sat, 29 May 2021 14:21:25 -0400 Subject: glsl: Implement TEXS --- .../backend/glsl/emit_glsl_image.cpp | 30 +++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index 68701ee52..d721b018b 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -38,6 +38,17 @@ std::string CastToIntVec(std::string_view value, const IR::TextureInstInfo& info } } +std::string ShadowSamplerVecCast(TextureType type) { + switch (type) { + case TextureType::ColorArray2D: + case TextureType::ColorCube: + case TextureType::ColorArrayCube: + return "vec4"; + default: + return "vec3"; + } +} + IR::Inst* PrepareSparse(IR::Inst& inst) { const auto sparse_inst{inst.GetAssociatedPseudoOperation(IR::Opcode::GetSparseFromOp)}; if (sparse_inst) { @@ -126,7 +137,24 @@ void EmitImageSampleDrefImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view dref, [[maybe_unused]] std::string_view bias_lc, [[maybe_unused]] const IR::Value& offset) { - throw NotImplementedException("GLSL Instruction"); + const auto info{inst.Flags()}; + if (info.has_bias) { + throw NotImplementedException("Bias texture samples"); + } + if (info.has_lod_clamp) { + throw NotImplementedException("Lod clamp samples"); + } + if (!offset.IsEmpty()) { + throw NotImplementedException("textureLodOffset"); + } + const auto texture{Texture(ctx, info, index)}; + const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""}; + const auto cast{ShadowSamplerVecCast(info.type)}; + if (ctx.stage == Stage::Fragment) { + ctx.AddF32("{}=texture({},{}({},{}){});", inst, texture, cast, coords, dref, bias); + } else { + ctx.AddF32("{}=textureLod({},{}({},{}),0.0);", inst, texture, cast, coords, dref); + } } void EmitImageSampleDrefExplicitLod([[maybe_unused]] EmitContext& ctx, -- cgit v1.2.3 From 697eacd095f6568e43285499bba433a4eafe65d3 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Sat, 29 May 2021 15:03:28 -0400 Subject: glsl: Implement TLD instruction --- .../backend/glsl/emit_glsl_image.cpp | 56 +++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index d721b018b..78e2d5bac 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -38,6 +38,24 @@ std::string CastToIntVec(std::string_view value, const IR::TextureInstInfo& info } } +std::string TexelFetchCastToInt(std::string_view value, const IR::TextureInstInfo& info) { + switch (info.type) { + case TextureType::Color1D: + return fmt::format("int({})", value); + case TextureType::ColorArray1D: + case TextureType::Color2D: + return fmt::format("ivec2({})", value); + case TextureType::ColorArray2D: + case TextureType::Color3D: + case TextureType::ColorCube: + return fmt::format("ivec3({})", value); + case TextureType::ColorArrayCube: + return fmt::format("ivec4({})", value); + default: + throw NotImplementedException("Offset type {}", info.type.Value()); + } +} + std::string ShadowSamplerVecCast(TextureType type) { switch (type) { case TextureType::ColorArray2D: @@ -138,6 +156,10 @@ void EmitImageSampleDrefImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view bias_lc, [[maybe_unused]] const IR::Value& offset) { const auto info{inst.Flags()}; + const auto sparse_inst{PrepareSparse(inst)}; + if (sparse_inst) { + throw NotImplementedException("Sparse texture samples"); + } if (info.has_bias) { throw NotImplementedException("Bias texture samples"); } @@ -165,6 +187,10 @@ void EmitImageSampleDrefExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view lod_lc, [[maybe_unused]] const IR::Value& offset) { const auto info{inst.Flags()}; + const auto sparse_inst{PrepareSparse(inst)}; + if (sparse_inst) { + throw NotImplementedException("Sparse texture samples"); + } if (info.has_bias) { throw NotImplementedException("Bias texture samples"); } @@ -204,7 +230,35 @@ void EmitImageFetch([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst [[maybe_unused]] std::string_view coords, [[maybe_unused]] std::string_view offset, [[maybe_unused]] std::string_view lod, [[maybe_unused]] std::string_view ms) { - throw NotImplementedException("GLSL Instruction"); + const auto info{inst.Flags()}; + if (info.has_bias) { + throw NotImplementedException("Bias texture samples"); + } + if (info.has_lod_clamp) { + throw NotImplementedException("Lod clamp samples"); + } + const auto texture{Texture(ctx, info, index)}; + const auto sparse_inst{PrepareSparse(inst)}; + const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)}; + if (!sparse_inst) { + if (!offset.empty()) { + ctx.Add("{}=texelFetchOffset({},{},int({}),{});", texel, texture, + TexelFetchCastToInt(coords, info), lod, TexelFetchCastToInt(offset, info)); + } else { + ctx.Add("{}=texelFetch({},{},int({}));", texel, texture, + TexelFetchCastToInt(coords, info), lod); + } + return; + } + // TODO: Query sparseTexels extension support + if (!offset.empty()) { + ctx.AddU1("{}=sparseTexelsResidentARB(sparseTexelFetchOffsetARB({},{},int({}),{},{}));", + *sparse_inst, texture, CastToIntVec(coords, info), lod, + CastToIntVec(offset, info), texel); + } else { + ctx.AddU1("{}=sparseTexelsResidentARB(sparseTexelFetchARB({},{},{},{}));", *sparse_inst, + texture, CastToIntVec(coords, info), lod, texel); + } } void EmitImageQueryDimensions([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, -- cgit v1.2.3 From 5fd92780b2d463fd4668472c41ef32ae4c15e9e6 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Sat, 29 May 2021 16:58:33 -0400 Subject: glsl: TLD4 implementation --- .../backend/glsl/emit_glsl_image.cpp | 91 +++++++++++++++++++++- 1 file changed, 89 insertions(+), 2 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index 78e2d5bac..e12d7b850 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -67,6 +67,23 @@ std::string ShadowSamplerVecCast(TextureType type) { } } +std::string PtpOffsets(const IR::Value& offset, const IR::Value& offset2) { + const std::array values{offset.InstRecursive(), offset2.InstRecursive()}; + if (!values[0]->AreAllArgsImmediates() || !values[1]->AreAllArgsImmediates()) { + // LOG_WARNING("Not all arguments in PTP are immediate, STUBBING"); + return ""; + } + const IR::Opcode opcode{values[0]->GetOpcode()}; + if (opcode != values[1]->GetOpcode() || opcode != IR::Opcode::CompositeConstructU32x4) { + throw LogicError("Invalid PTP arguments"); + } + auto read{[&](unsigned int a, unsigned int b) { return values[a]->Arg(b).U32(); }}; + + return fmt::format("ivec2[](ivec2({},{}),ivec2({},{}),ivec2({},{}),ivec2({},{}))", read(0, 0), + read(0, 1), read(0, 2), read(0, 3), read(1, 0), read(1, 1), read(1, 2), + read(1, 3)); +} + IR::Inst* PrepareSparse(IR::Inst& inst) { const auto sparse_inst{inst.GetAssociatedPseudoOperation(IR::Opcode::GetSparseFromOp)}; if (sparse_inst) { @@ -213,7 +230,45 @@ void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Ins [[maybe_unused]] std::string_view coords, [[maybe_unused]] const IR::Value& offset, [[maybe_unused]] const IR::Value& offset2) { - throw NotImplementedException("GLSL Instruction"); + const auto info{inst.Flags()}; + const auto texture{Texture(ctx, info, index)}; + const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)}; + const auto sparse_inst{PrepareSparse(inst)}; + if (!offset2.IsEmpty()) { + ctx.Add("/*OFFSET 2 IS {}*/", ctx.reg_alloc.Consume(offset2)); + } + if (!sparse_inst) { + if (offset.IsEmpty()) { + ctx.Add("{}=textureGather({},{},int({}));", texel, texture, coords, + info.gather_component); + return; + } + if (offset2.IsEmpty()) { + ctx.Add("{}=textureGatherOffset({},{},{},int({}));", texel, texture, coords, + CastToIntVec(ctx.reg_alloc.Consume(offset), info), info.gather_component); + return; + } + // PTP + const auto offsets{PtpOffsets(offset, offset2)}; + ctx.Add("{}=textureGatherOffsets({},{},{},int({}));", texel, texture, coords, offsets, + info.gather_component); + return; + } + // TODO: Query sparseTexels extension support + if (offset.IsEmpty()) { + ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherARB({},{},{},int({})));", + *sparse_inst, texture, coords, texel, info.gather_component); + } + if (offset2.IsEmpty()) { + ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},{},int({})));", + *sparse_inst, texture, CastToIntVec(coords, info), + CastToIntVec(ctx.reg_alloc.Consume(offset), info), texel, info.gather_component); + } + // PTP + const auto offsets{PtpOffsets(offset, offset2)}; + ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},{},int({})));", + *sparse_inst, texture, CastToIntVec(coords, info), offsets, texel, + info.gather_component); } void EmitImageGatherDref([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, @@ -222,7 +277,39 @@ void EmitImageGatherDref([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR: [[maybe_unused]] const IR::Value& offset, [[maybe_unused]] const IR::Value& offset2, [[maybe_unused]] std::string_view dref) { - throw NotImplementedException("GLSL Instruction"); + const auto info{inst.Flags()}; + const auto texture{Texture(ctx, info, index)}; + const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)}; + const auto sparse_inst{PrepareSparse(inst)}; + if (!sparse_inst) { + if (offset.IsEmpty()) { + ctx.Add("{}=textureGather({},{},{});", texel, texture, coords, dref); + return; + } + if (offset2.IsEmpty()) { + ctx.Add("{}=textureGatherOffset({},{},{},{});", texel, texture, coords, dref, + CastToIntVec(ctx.reg_alloc.Consume(offset), info)); + return; + } + // PTP + const auto offsets{PtpOffsets(offset, offset2)}; + ctx.Add("{}=textureGatherOffsets({},{},{},{});", texel, texture, coords, dref, offsets); + return; + } + // TODO: Query sparseTexels extension support + if (offset.IsEmpty()) { + ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherARB({},{},{},{}));", *sparse_inst, + texture, coords, dref, texel); + } + if (offset2.IsEmpty()) { + ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},,{},{}));", + *sparse_inst, texture, CastToIntVec(coords, info), dref, + CastToIntVec(ctx.reg_alloc.Consume(offset), info), texel); + } + // PTP + const auto offsets{PtpOffsets(offset, offset2)}; + ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},,{},{}));", + *sparse_inst, texture, CastToIntVec(coords, info), dref, offsets, texel); } void EmitImageFetch([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, -- cgit v1.2.3 From 3047eb66889a9782fadfbe479c33e6a8bfc5bf53 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Sat, 29 May 2021 18:08:19 -0400 Subject: glsl: Implement TXQ and other misc changes --- .../backend/glsl/emit_glsl_image.cpp | 24 +++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index e12d7b850..9213375b4 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -351,7 +351,29 @@ void EmitImageFetch([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst void EmitImageQueryDimensions([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, [[maybe_unused]] const IR::Value& index, [[maybe_unused]] std::string_view lod) { - throw NotImplementedException("GLSL Instruction"); + const auto info{inst.Flags()}; + const auto texture{Texture(ctx, info, index)}; + switch (info.type) { + case TextureType::Color1D: + return ctx.AddU32x4( + "{}=uvec4(uint(textureSize({},int({}))),0u,0u,uint(textureQueryLevels({})));", inst, + texture, lod, texture); + case TextureType::ColorArray1D: + case TextureType::Color2D: + case TextureType::ColorCube: + return ctx.AddU32x4( + "{}=uvec4(uvec2(textureSize({},int({}))),0u,uint(textureQueryLevels({})));", inst, + texture, lod, texture); + case TextureType::ColorArray2D: + case TextureType::Color3D: + case TextureType::ColorArrayCube: + return ctx.AddU32x4( + "{}=uvec4(uvec3(textureSize({},int({}))),uint(textureQueryLevels({})));", inst, texture, + lod, texture); + case TextureType::Buffer: + throw NotImplementedException("Texture buffers"); + } + throw LogicError("Unspecified image type {}", info.type.Value()); } void EmitImageQueryLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, -- cgit v1.2.3 From 1542f31e7979a7bae465d299774268533a130f9b Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Sat, 29 May 2021 20:00:06 -0400 Subject: glsl: minor cleanup --- src/shader_recompiler/backend/glsl/emit_glsl_image.cpp | 3 --- 1 file changed, 3 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index 9213375b4..d1f7c5d91 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -234,9 +234,6 @@ void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Ins const auto texture{Texture(ctx, info, index)}; const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)}; const auto sparse_inst{PrepareSparse(inst)}; - if (!offset2.IsEmpty()) { - ctx.Add("/*OFFSET 2 IS {}*/", ctx.reg_alloc.Consume(offset2)); - } if (!sparse_inst) { if (offset.IsEmpty()) { ctx.Add("{}=textureGather({},{},int({}));", texel, texture, coords, -- cgit v1.2.3 From 7df0815117c6bdc70775d78b4625f44835ede54a Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Sat, 29 May 2021 21:12:52 -0400 Subject: glsl: Implement more instructions used by SMO --- src/shader_recompiler/backend/glsl/emit_glsl_image.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index d1f7c5d91..e63e3f2bd 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -376,7 +376,9 @@ void EmitImageQueryDimensions([[maybe_unused]] EmitContext& ctx, [[maybe_unused] void EmitImageQueryLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, [[maybe_unused]] const IR::Value& index, [[maybe_unused]] std::string_view coords) { - throw NotImplementedException("GLSL Instruction"); + const auto info{inst.Flags()}; + const auto texture{Texture(ctx, info, index)}; + return ctx.AddF32x4("{}=vec4(textureQueryLod({},{}),0.0,0.0);", inst, texture, coords); } void EmitImageGradient([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, -- cgit v1.2.3 From 1269a0cf8b3844c1a9bb06c843a7698b0a9643d5 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Sun, 30 May 2021 17:27:00 -0400 Subject: glsl: Rework variable allocator to allow for variable reuse --- .../backend/glsl/emit_glsl_image.cpp | 26 +++++++++++----------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index e63e3f2bd..eb427d8b5 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -104,12 +104,12 @@ void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse } const auto texture{Texture(ctx, info, index)}; const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""}; - const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)}; + const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const auto sparse_inst{PrepareSparse(inst)}; if (!sparse_inst) { if (!offset.IsEmpty()) { ctx.Add("{}=textureOffset({},{},{}{});", texel, texture, coords, - CastToIntVec(ctx.reg_alloc.Consume(offset), info), bias); + CastToIntVec(ctx.var_alloc.Consume(offset), info), bias); } else { if (ctx.stage == Stage::Fragment) { ctx.Add("{}=texture({},{}{});", texel, texture, coords, bias); @@ -122,7 +122,7 @@ void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse // TODO: Query sparseTexels extension support if (!offset.IsEmpty()) { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureOffsetARB({},{},{},{}{}));", - *sparse_inst, texture, coords, CastToIntVec(ctx.reg_alloc.Consume(offset), info), + *sparse_inst, texture, coords, CastToIntVec(ctx.var_alloc.Consume(offset), info), texel, bias); } else { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureARB({},{},{}{}));", *sparse_inst, @@ -143,12 +143,12 @@ void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse throw NotImplementedException("Lod clamp samples"); } const auto texture{Texture(ctx, info, index)}; - const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)}; + const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const auto sparse_inst{PrepareSparse(inst)}; if (!sparse_inst) { if (!offset.IsEmpty()) { ctx.Add("{}=textureLodOffset({},{},{},{});", texel, texture, coords, lod_lc, - CastToIntVec(ctx.reg_alloc.Consume(offset), info)); + CastToIntVec(ctx.var_alloc.Consume(offset), info)); } else { ctx.Add("{}=textureLod({},{},{});", texel, texture, coords, lod_lc); } @@ -158,7 +158,7 @@ void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse if (!offset.IsEmpty()) { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTexelFetchOffsetARB({},{},int({}),{},{}));", *sparse_inst, texture, CastToIntVec(coords, info), lod_lc, - CastToIntVec(ctx.reg_alloc.Consume(offset), info), texel); + CastToIntVec(ctx.var_alloc.Consume(offset), info), texel); } else { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureLodARB({},{},{},{}));", *sparse_inst, texture, coords, lod_lc, texel); @@ -232,7 +232,7 @@ void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Ins [[maybe_unused]] const IR::Value& offset2) { const auto info{inst.Flags()}; const auto texture{Texture(ctx, info, index)}; - const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)}; + const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const auto sparse_inst{PrepareSparse(inst)}; if (!sparse_inst) { if (offset.IsEmpty()) { @@ -242,7 +242,7 @@ void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Ins } if (offset2.IsEmpty()) { ctx.Add("{}=textureGatherOffset({},{},{},int({}));", texel, texture, coords, - CastToIntVec(ctx.reg_alloc.Consume(offset), info), info.gather_component); + CastToIntVec(ctx.var_alloc.Consume(offset), info), info.gather_component); return; } // PTP @@ -259,7 +259,7 @@ void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Ins if (offset2.IsEmpty()) { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},{},int({})));", *sparse_inst, texture, CastToIntVec(coords, info), - CastToIntVec(ctx.reg_alloc.Consume(offset), info), texel, info.gather_component); + CastToIntVec(ctx.var_alloc.Consume(offset), info), texel, info.gather_component); } // PTP const auto offsets{PtpOffsets(offset, offset2)}; @@ -276,7 +276,7 @@ void EmitImageGatherDref([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR: [[maybe_unused]] std::string_view dref) { const auto info{inst.Flags()}; const auto texture{Texture(ctx, info, index)}; - const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)}; + const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const auto sparse_inst{PrepareSparse(inst)}; if (!sparse_inst) { if (offset.IsEmpty()) { @@ -285,7 +285,7 @@ void EmitImageGatherDref([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR: } if (offset2.IsEmpty()) { ctx.Add("{}=textureGatherOffset({},{},{},{});", texel, texture, coords, dref, - CastToIntVec(ctx.reg_alloc.Consume(offset), info)); + CastToIntVec(ctx.var_alloc.Consume(offset), info)); return; } // PTP @@ -301,7 +301,7 @@ void EmitImageGatherDref([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR: if (offset2.IsEmpty()) { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},,{},{}));", *sparse_inst, texture, CastToIntVec(coords, info), dref, - CastToIntVec(ctx.reg_alloc.Consume(offset), info), texel); + CastToIntVec(ctx.var_alloc.Consume(offset), info), texel); } // PTP const auto offsets{PtpOffsets(offset, offset2)}; @@ -323,7 +323,7 @@ void EmitImageFetch([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst } const auto texture{Texture(ctx, info, index)}; const auto sparse_inst{PrepareSparse(inst)}; - const auto texel{ctx.reg_alloc.Define(inst, Type::F32x4)}; + const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; if (!sparse_inst) { if (!offset.empty()) { ctx.Add("{}=texelFetchOffset({},{},int({}),{});", texel, texture, -- cgit v1.2.3 From 3a024b302622068f4842715a7f0b31652898a606 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Mon, 31 May 2021 01:12:52 -0400 Subject: glsl: Implement gl_ViewportIndex SSBU now working --- src/shader_recompiler/backend/glsl/emit_glsl_image.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index eb427d8b5..f339f4ade 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -71,7 +71,7 @@ std::string PtpOffsets(const IR::Value& offset, const IR::Value& offset2) { const std::array values{offset.InstRecursive(), offset2.InstRecursive()}; if (!values[0]->AreAllArgsImmediates() || !values[1]->AreAllArgsImmediates()) { // LOG_WARNING("Not all arguments in PTP are immediate, STUBBING"); - return ""; + return "ivec2[](ivec2(0), ivec2(1), ivec2(2), ivec2(3))"; } const IR::Opcode opcode{values[0]->GetOpcode()}; if (opcode != values[1]->GetOpcode() || opcode != IR::Opcode::CompositeConstructU32x4) { @@ -340,8 +340,8 @@ void EmitImageFetch([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst *sparse_inst, texture, CastToIntVec(coords, info), lod, CastToIntVec(offset, info), texel); } else { - ctx.AddU1("{}=sparseTexelsResidentARB(sparseTexelFetchARB({},{},{},{}));", *sparse_inst, - texture, CastToIntVec(coords, info), lod, texel); + ctx.AddU1("{}=sparseTexelsResidentARB(sparseTexelFetchARB({},{},int({}),{}));", + *sparse_inst, texture, CastToIntVec(coords, info), lod, texel); } } -- cgit v1.2.3 From df53046d68b26b23ced683396ebc204d96176c8e Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Mon, 31 May 2021 12:53:40 -0400 Subject: glsl: Use NotImplemented macro with function name output --- .../backend/glsl/emit_glsl_image.cpp | 54 +++++++++++----------- 1 file changed, 27 insertions(+), 27 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index f339f4ade..3de19cdfe 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -387,116 +387,116 @@ void EmitImageGradient([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::I [[maybe_unused]] std::string_view derivates, [[maybe_unused]] std::string_view offset, [[maybe_unused]] std::string_view lod_clamp) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitImageRead([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, [[maybe_unused]] const IR::Value& index, [[maybe_unused]] std::string_view coords) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitImageWrite([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, [[maybe_unused]] const IR::Value& index, [[maybe_unused]] std::string_view coords, [[maybe_unused]] std::string_view color) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBindlessImageSampleImplicitLod(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBindlessImageSampleExplicitLod(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBindlessImageSampleDrefImplicitLod(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBindlessImageSampleDrefExplicitLod(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBindlessImageGather(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBindlessImageGatherDref(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBindlessImageFetch(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBindlessImageQueryDimensions(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBindlessImageQueryLod(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBindlessImageGradient(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBindlessImageRead(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBindlessImageWrite(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBoundImageSampleImplicitLod(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBoundImageSampleExplicitLod(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBoundImageSampleDrefImplicitLod(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBoundImageSampleDrefExplicitLod(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBoundImageGather(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBoundImageGatherDref(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBoundImageFetch(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBoundImageQueryDimensions(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBoundImageQueryLod(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBoundImageGradient(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBoundImageRead(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } void EmitBoundImageWrite(EmitContext&) { - throw NotImplementedException("GLSL Instruction"); + NotImplemented(); } } // namespace Shader::Backend::GLSL -- cgit v1.2.3 From c7d085b505ab6a766bf37b34030fc9fcb5b662b7 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Mon, 31 May 2021 16:03:20 -0400 Subject: glsl: Implement ImageGradient and other texture function variants --- .../backend/glsl/emit_glsl_image.cpp | 101 +++++++++++++++------ 1 file changed, 71 insertions(+), 30 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index 3de19cdfe..c62451e23 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -100,7 +100,7 @@ void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse [[maybe_unused]] const IR::Value& offset) { const auto info{inst.Flags()}; if (info.has_lod_clamp) { - throw NotImplementedException("Lod clamp samples"); + throw NotImplementedException("EmitImageSampleImplicitLod Lod clamp samples"); } const auto texture{Texture(ctx, info, index)}; const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""}; @@ -108,8 +108,12 @@ void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse const auto sparse_inst{PrepareSparse(inst)}; if (!sparse_inst) { if (!offset.IsEmpty()) { - ctx.Add("{}=textureOffset({},{},{}{});", texel, texture, coords, - CastToIntVec(ctx.var_alloc.Consume(offset), info), bias); + const auto offset_str{CastToIntVec(ctx.var_alloc.Consume(offset), info)}; + if (ctx.stage == Stage::Fragment) { + ctx.Add("{}=textureOffset({},{},{}{});", texel, texture, coords, offset_str, bias); + } else { + ctx.Add("{}=textureLodOffset({},{},0.0,{});", texel, texture, coords, offset_str); + } } else { if (ctx.stage == Stage::Fragment) { ctx.Add("{}=texture({},{}{});", texel, texture, coords, bias); @@ -137,10 +141,10 @@ void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse [[maybe_unused]] const IR::Value& offset) { const auto info{inst.Flags()}; if (info.has_bias) { - throw NotImplementedException("Bias texture samples"); + throw NotImplementedException("EmitImageSampleExplicitLod Bias texture samples"); } if (info.has_lod_clamp) { - throw NotImplementedException("Lod clamp samples"); + throw NotImplementedException("EmitImageSampleExplicitLod Lod clamp samples"); } const auto texture{Texture(ctx, info, index)}; const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; @@ -175,24 +179,32 @@ void EmitImageSampleDrefImplicitLod([[maybe_unused]] EmitContext& ctx, const auto info{inst.Flags()}; const auto sparse_inst{PrepareSparse(inst)}; if (sparse_inst) { - throw NotImplementedException("Sparse texture samples"); + throw NotImplementedException("EmitImageSampleDrefImplicitLod Sparse texture samples"); } if (info.has_bias) { - throw NotImplementedException("Bias texture samples"); + throw NotImplementedException("EmitImageSampleDrefImplicitLod Bias texture samples"); } if (info.has_lod_clamp) { - throw NotImplementedException("Lod clamp samples"); - } - if (!offset.IsEmpty()) { - throw NotImplementedException("textureLodOffset"); + throw NotImplementedException("EmitImageSampleDrefImplicitLod Lod clamp samples"); } const auto texture{Texture(ctx, info, index)}; const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""}; const auto cast{ShadowSamplerVecCast(info.type)}; - if (ctx.stage == Stage::Fragment) { - ctx.AddF32("{}=texture({},{}({},{}){});", inst, texture, cast, coords, dref, bias); + if (!offset.IsEmpty()) { + const auto offset_str{CastToIntVec(ctx.var_alloc.Consume(offset), info)}; + if (ctx.stage == Stage::Fragment) { + ctx.AddF32("{}=textureOffset({},{}({},{}),{}{});", inst, texture, cast, coords, dref, + offset_str, bias); + } else { + ctx.AddF32("{}=textureLodOffset({},{}({},{}),0.0,{});", inst, texture, cast, coords, + dref, offset_str); + } } else { - ctx.AddF32("{}=textureLod({},{}({},{}),0.0);", inst, texture, cast, coords, dref); + if (ctx.stage == Stage::Fragment) { + ctx.AddF32("{}=texture({},{}({},{}){});", inst, texture, cast, coords, dref, bias); + } else { + ctx.AddF32("{}=textureLod({},{}({},{}),0.0);", inst, texture, cast, coords, dref); + } } } @@ -206,22 +218,30 @@ void EmitImageSampleDrefExplicitLod([[maybe_unused]] EmitContext& ctx, const auto info{inst.Flags()}; const auto sparse_inst{PrepareSparse(inst)}; if (sparse_inst) { - throw NotImplementedException("Sparse texture samples"); + throw NotImplementedException("EmitImageSampleDrefExplicitLod Sparse texture samples"); } if (info.has_bias) { - throw NotImplementedException("Bias texture samples"); + throw NotImplementedException("EmitImageSampleDrefExplicitLod Bias texture samples"); } if (info.has_lod_clamp) { - throw NotImplementedException("Lod clamp samples"); - } - if (!offset.IsEmpty()) { - throw NotImplementedException("textureLodOffset"); + throw NotImplementedException("EmitImageSampleDrefExplicitLod Lod clamp samples"); } const auto texture{Texture(ctx, info, index)}; - if (info.type == TextureType::ColorArrayCube) { - ctx.AddF32("{}=textureLod({},{},{},{});", inst, texture, coords, dref, lod_lc); + if (!offset.IsEmpty()) { + const auto offset_str{CastToIntVec(ctx.var_alloc.Consume(offset), info)}; + if (info.type == TextureType::ColorArrayCube) { + ctx.AddF32("{}=textureLodOffset({},{},{},{},{});", inst, texture, coords, dref, lod_lc, + offset_str); + } else { + ctx.AddF32("{}=textureLodOffset({},vec3({},{}),{},{});", inst, texture, coords, dref, + lod_lc, offset_str); + } } else { - ctx.AddF32("{}=textureLod({},vec3({},{}),{});", inst, texture, coords, dref, lod_lc); + if (info.type == TextureType::ColorArrayCube) { + ctx.AddF32("{}=textureLod({},{},{},{});", inst, texture, coords, dref, lod_lc); + } else { + ctx.AddF32("{}=textureLod({},vec3({},{}),{});", inst, texture, coords, dref, lod_lc); + } } } @@ -316,10 +336,10 @@ void EmitImageFetch([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst [[maybe_unused]] std::string_view ms) { const auto info{inst.Flags()}; if (info.has_bias) { - throw NotImplementedException("Bias texture samples"); + throw NotImplementedException("EmitImageFetch Bias texture samples"); } if (info.has_lod_clamp) { - throw NotImplementedException("Lod clamp samples"); + throw NotImplementedException("EmitImageFetch Lod clamp samples"); } const auto texture{Texture(ctx, info, index)}; const auto sparse_inst{PrepareSparse(inst)}; @@ -368,7 +388,7 @@ void EmitImageQueryDimensions([[maybe_unused]] EmitContext& ctx, [[maybe_unused] "{}=uvec4(uvec3(textureSize({},int({}))),uint(textureQueryLevels({})));", inst, texture, lod, texture); case TextureType::Buffer: - throw NotImplementedException("Texture buffers"); + throw NotImplementedException("EmitImageQueryDimensions Texture buffers"); } throw LogicError("Unspecified image type {}", info.type.Value()); } @@ -384,10 +404,31 @@ void EmitImageQueryLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::I void EmitImageGradient([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, [[maybe_unused]] const IR::Value& index, [[maybe_unused]] std::string_view coords, - [[maybe_unused]] std::string_view derivates, - [[maybe_unused]] std::string_view offset, - [[maybe_unused]] std::string_view lod_clamp) { - NotImplemented(); + [[maybe_unused]] const IR::Value& derivatives, + [[maybe_unused]] const IR::Value& offset, + [[maybe_unused]] const IR::Value& lod_clamp) { + const auto info{inst.Flags()}; + if (info.has_lod_clamp) { + throw NotImplementedException("EmitImageGradient Lod clamp samples"); + } + const auto sparse_inst{PrepareSparse(inst)}; + if (sparse_inst) { + throw NotImplementedException("EmitImageGradient Sparse"); + } + if (!offset.IsEmpty()) { + throw NotImplementedException("EmitImageGradient offset"); + } + const auto texture{Texture(ctx, info, index)}; + const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; + const bool multi_component{info.num_derivates > 1 || info.has_lod_clamp}; + const auto derivatives_vec{ctx.var_alloc.Consume(derivatives)}; + if (multi_component) { + ctx.Add("{}=textureGrad({},{},vec2({}.xz),vec2({}.yz));", texel, texture, coords, + derivatives_vec, derivatives_vec); + } else { + ctx.Add("{}=textureGrad({},{},float({}.x),float({}.y));", texel, texture, coords, + derivatives_vec, derivatives_vec); + } } void EmitImageRead([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, -- cgit v1.2.3 From af9696059cc24e07fba2920814725e56c3c61df0 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Wed, 2 Jun 2021 20:37:24 -0400 Subject: glsl: Implement Images --- .../backend/glsl/emit_glsl_image.cpp | 33 ++++++++++++++++++---- 1 file changed, 28 insertions(+), 5 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index c62451e23..8c54f0fb3 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -14,15 +14,25 @@ namespace { std::string Texture(EmitContext& ctx, const IR::TextureInstInfo& info, [[maybe_unused]] const IR::Value& index) { if (info.type == TextureType::Buffer) { - throw NotImplementedException("TextureType::Buffer"); + return fmt::format("tex{}", ctx.texture_buffer_bindings.at(info.descriptor_index)); } else { return fmt::format("tex{}", ctx.texture_bindings.at(info.descriptor_index)); } } +std::string Image(EmitContext& ctx, const IR::TextureInstInfo& info, + [[maybe_unused]] const IR::Value& index) { + if (info.type == TextureType::Buffer) { + return fmt::format("img{}", ctx.image_buffer_bindings.at(info.descriptor_index)); + } else { + return fmt::format("img{}", ctx.image_bindings.at(info.descriptor_index)); + } +} + std::string CastToIntVec(std::string_view value, const IR::TextureInstInfo& info) { switch (info.type) { case TextureType::Color1D: + case TextureType::Buffer: return fmt::format("int({})", value); case TextureType::ColorArray1D: case TextureType::Color2D: @@ -41,6 +51,7 @@ std::string CastToIntVec(std::string_view value, const IR::TextureInstInfo& info std::string TexelFetchCastToInt(std::string_view value, const IR::TextureInstInfo& info) { switch (info.type) { case TextureType::Color1D: + case TextureType::Buffer: return fmt::format("int({})", value); case TextureType::ColorArray1D: case TextureType::Color2D: @@ -349,8 +360,12 @@ void EmitImageFetch([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst ctx.Add("{}=texelFetchOffset({},{},int({}),{});", texel, texture, TexelFetchCastToInt(coords, info), lod, TexelFetchCastToInt(offset, info)); } else { - ctx.Add("{}=texelFetch({},{},int({}));", texel, texture, - TexelFetchCastToInt(coords, info), lod); + if (info.type == TextureType::Buffer) { + ctx.Add("{}=texelFetch({},int({}));", texel, texture, coords); + } else { + ctx.Add("{}=texelFetch({},{},int({}));", texel, texture, + TexelFetchCastToInt(coords, info), lod); + } } return; } @@ -434,14 +449,22 @@ void EmitImageGradient([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::I void EmitImageRead([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, [[maybe_unused]] const IR::Value& index, [[maybe_unused]] std::string_view coords) { - NotImplemented(); + const auto info{inst.Flags()}; + const auto sparse_inst{PrepareSparse(inst)}; + if (sparse_inst) { + throw NotImplementedException("EmitImageRead Sparse"); + } + const auto image{Image(ctx, info, index)}; + ctx.AddU32x4("{}=uvec4(imageLoad({},{}));", inst, image, TexelFetchCastToInt(coords, info)); } void EmitImageWrite([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, [[maybe_unused]] const IR::Value& index, [[maybe_unused]] std::string_view coords, [[maybe_unused]] std::string_view color) { - NotImplemented(); + const auto info{inst.Flags()}; + const auto image{Image(ctx, info, index)}; + ctx.Add("imageStore({},{},{});", image, TexelFetchCastToInt(coords, info), color); } void EmitBindlessImageSampleImplicitLod(EmitContext&) { -- cgit v1.2.3 From 8d8ce24f20649be639dbb3cc0f3edc90c6a6481e Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Thu, 3 Jun 2021 19:15:36 -0400 Subject: glsl: Implement Load/WriteGlobal along with some other misc changes and fixes --- src/shader_recompiler/backend/glsl/emit_glsl_image.cpp | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index 8c54f0fb3..37ddd57d3 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -212,7 +212,11 @@ void EmitImageSampleDrefImplicitLod([[maybe_unused]] EmitContext& ctx, } } else { if (ctx.stage == Stage::Fragment) { - ctx.AddF32("{}=texture({},{}({},{}){});", inst, texture, cast, coords, dref, bias); + if (info.type == TextureType::ColorArrayCube) { + ctx.AddF32("{}=texture({},vec4({}),{});", inst, texture, coords, dref); + } else { + ctx.AddF32("{}=texture({},{}({},{}){});", inst, texture, cast, coords, dref, bias); + } } else { ctx.AddF32("{}=textureLod({},{}({},{}),0.0);", inst, texture, cast, coords, dref); } @@ -238,6 +242,7 @@ void EmitImageSampleDrefExplicitLod([[maybe_unused]] EmitContext& ctx, throw NotImplementedException("EmitImageSampleDrefExplicitLod Lod clamp samples"); } const auto texture{Texture(ctx, info, index)}; + const auto cast{ShadowSamplerVecCast(info.type)}; if (!offset.IsEmpty()) { const auto offset_str{CastToIntVec(ctx.var_alloc.Consume(offset), info)}; if (info.type == TextureType::ColorArrayCube) { @@ -251,7 +256,8 @@ void EmitImageSampleDrefExplicitLod([[maybe_unused]] EmitContext& ctx, if (info.type == TextureType::ColorArrayCube) { ctx.AddF32("{}=textureLod({},{},{},{});", inst, texture, coords, dref, lod_lc); } else { - ctx.AddF32("{}=textureLod({},vec3({},{}),{});", inst, texture, coords, dref, lod_lc); + ctx.AddF32("{}=textureLod({},{}({},{}),{});", inst, texture, cast, coords, dref, + lod_lc); } } } -- cgit v1.2.3 From 34fdb6471d6050b438fd53a0406aedbf6b690600 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Thu, 3 Jun 2021 20:57:52 -0400 Subject: glsl: Cleanup and address feedback --- src/shader_recompiler/backend/glsl/emit_glsl_image.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index 37ddd57d3..ce3a82656 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -102,7 +102,7 @@ IR::Inst* PrepareSparse(IR::Inst& inst) { } return sparse_inst; } -} // namespace +} // Anonymous namespace void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, [[maybe_unused]] const IR::Value& index, -- cgit v1.2.3 From d12f2b8ccf74671224c6f8f90873d74f35625762 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Thu, 3 Jun 2021 23:18:38 -0400 Subject: emit_glsl_image: Use immediate offsets when possible --- .../backend/glsl/emit_glsl_image.cpp | 45 ++++++++++++++++------ 1 file changed, 33 insertions(+), 12 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index ce3a82656..a62e2b181 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -78,6 +78,28 @@ std::string ShadowSamplerVecCast(TextureType type) { } } +std::string GetOffsetVec(EmitContext& ctx, const IR::Value& offset) { + if (offset.IsImmediate()) { + return fmt::format("int({})", offset.U32()); + } + IR::Inst* const inst{offset.InstRecursive()}; + if (inst->AreAllArgsImmediates()) { + switch (inst->GetOpcode()) { + case IR::Opcode::CompositeConstructU32x2: + return fmt::format("ivec2({},{})", inst->Arg(0).U32(), inst->Arg(1).U32()); + case IR::Opcode::CompositeConstructU32x3: + return fmt::format("ivec3({},{},{})", inst->Arg(0).U32(), inst->Arg(1).U32(), + inst->Arg(2).U32()); + case IR::Opcode::CompositeConstructU32x4: + return fmt::format("ivec4({},{},{},{})", inst->Arg(0).U32(), inst->Arg(1).U32(), + inst->Arg(2).U32(), inst->Arg(3).U32()); + default: + break; + } + } + return ctx.var_alloc.Consume(offset); +} + std::string PtpOffsets(const IR::Value& offset, const IR::Value& offset2) { const std::array values{offset.InstRecursive(), offset2.InstRecursive()}; if (!values[0]->AreAllArgsImmediates() || !values[1]->AreAllArgsImmediates()) { @@ -119,7 +141,7 @@ void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse const auto sparse_inst{PrepareSparse(inst)}; if (!sparse_inst) { if (!offset.IsEmpty()) { - const auto offset_str{CastToIntVec(ctx.var_alloc.Consume(offset), info)}; + const auto offset_str{GetOffsetVec(ctx, offset)}; if (ctx.stage == Stage::Fragment) { ctx.Add("{}=textureOffset({},{},{}{});", texel, texture, coords, offset_str, bias); } else { @@ -137,8 +159,7 @@ void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse // TODO: Query sparseTexels extension support if (!offset.IsEmpty()) { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureOffsetARB({},{},{},{}{}));", - *sparse_inst, texture, coords, CastToIntVec(ctx.var_alloc.Consume(offset), info), - texel, bias); + *sparse_inst, texture, coords, GetOffsetVec(ctx, offset), texel, bias); } else { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureARB({},{},{}{}));", *sparse_inst, texture, coords, texel, bias); @@ -163,7 +184,7 @@ void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse if (!sparse_inst) { if (!offset.IsEmpty()) { ctx.Add("{}=textureLodOffset({},{},{},{});", texel, texture, coords, lod_lc, - CastToIntVec(ctx.var_alloc.Consume(offset), info)); + GetOffsetVec(ctx, offset)); } else { ctx.Add("{}=textureLod({},{},{});", texel, texture, coords, lod_lc); } @@ -173,7 +194,7 @@ void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse if (!offset.IsEmpty()) { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTexelFetchOffsetARB({},{},int({}),{},{}));", *sparse_inst, texture, CastToIntVec(coords, info), lod_lc, - CastToIntVec(ctx.var_alloc.Consume(offset), info), texel); + GetOffsetVec(ctx, offset), texel); } else { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureLodARB({},{},{},{}));", *sparse_inst, texture, coords, lod_lc, texel); @@ -202,7 +223,7 @@ void EmitImageSampleDrefImplicitLod([[maybe_unused]] EmitContext& ctx, const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""}; const auto cast{ShadowSamplerVecCast(info.type)}; if (!offset.IsEmpty()) { - const auto offset_str{CastToIntVec(ctx.var_alloc.Consume(offset), info)}; + const auto offset_str{GetOffsetVec(ctx, offset)}; if (ctx.stage == Stage::Fragment) { ctx.AddF32("{}=textureOffset({},{}({},{}),{}{});", inst, texture, cast, coords, dref, offset_str, bias); @@ -244,7 +265,7 @@ void EmitImageSampleDrefExplicitLod([[maybe_unused]] EmitContext& ctx, const auto texture{Texture(ctx, info, index)}; const auto cast{ShadowSamplerVecCast(info.type)}; if (!offset.IsEmpty()) { - const auto offset_str{CastToIntVec(ctx.var_alloc.Consume(offset), info)}; + const auto offset_str{GetOffsetVec(ctx, offset)}; if (info.type == TextureType::ColorArrayCube) { ctx.AddF32("{}=textureLodOffset({},{},{},{},{});", inst, texture, coords, dref, lod_lc, offset_str); @@ -279,7 +300,7 @@ void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Ins } if (offset2.IsEmpty()) { ctx.Add("{}=textureGatherOffset({},{},{},int({}));", texel, texture, coords, - CastToIntVec(ctx.var_alloc.Consume(offset), info), info.gather_component); + GetOffsetVec(ctx, offset), info.gather_component); return; } // PTP @@ -295,8 +316,8 @@ void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Ins } if (offset2.IsEmpty()) { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},{},int({})));", - *sparse_inst, texture, CastToIntVec(coords, info), - CastToIntVec(ctx.var_alloc.Consume(offset), info), texel, info.gather_component); + *sparse_inst, texture, CastToIntVec(coords, info), GetOffsetVec(ctx, offset), + texel, info.gather_component); } // PTP const auto offsets{PtpOffsets(offset, offset2)}; @@ -322,7 +343,7 @@ void EmitImageGatherDref([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR: } if (offset2.IsEmpty()) { ctx.Add("{}=textureGatherOffset({},{},{},{});", texel, texture, coords, dref, - CastToIntVec(ctx.var_alloc.Consume(offset), info)); + GetOffsetVec(ctx, offset)); return; } // PTP @@ -338,7 +359,7 @@ void EmitImageGatherDref([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR: if (offset2.IsEmpty()) { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},,{},{}));", *sparse_inst, texture, CastToIntVec(coords, info), dref, - CastToIntVec(ctx.var_alloc.Consume(offset), info), texel); + GetOffsetVec(ctx, offset), texel); } // PTP const auto offsets{PtpOffsets(offset, offset2)}; -- cgit v1.2.3 From 747b8556a4611791c1b0afbb500c77de57adfc54 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Fri, 4 Jun 2021 00:46:46 -0400 Subject: glsl: Use textureGrad fallback when EXT_texture_shadow_lod is unsupported --- .../backend/glsl/emit_glsl_image.cpp | 44 ++++++++++++++++++---- 1 file changed, 37 insertions(+), 7 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index a62e2b181..6cf0300ab 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -8,6 +8,7 @@ #include "shader_recompiler/backend/glsl/emit_glsl_instructions.h" #include "shader_recompiler/frontend/ir/modifiers.h" #include "shader_recompiler/frontend/ir/value.h" +#include "shader_recompiler/profile.h" namespace Shader::Backend::GLSL { namespace { @@ -67,14 +68,14 @@ std::string TexelFetchCastToInt(std::string_view value, const IR::TextureInstInf } } -std::string ShadowSamplerVecCast(TextureType type) { +bool NeedsShadowLodExt(TextureType type) { switch (type) { case TextureType::ColorArray2D: case TextureType::ColorCube: case TextureType::ColorArrayCube: - return "vec4"; + return true; default: - return "vec3"; + return false; } } @@ -221,7 +222,22 @@ void EmitImageSampleDrefImplicitLod([[maybe_unused]] EmitContext& ctx, } const auto texture{Texture(ctx, info, index)}; const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""}; - const auto cast{ShadowSamplerVecCast(info.type)}; + const bool needs_shadow_ext{NeedsShadowLodExt(info.type)}; + const auto cast{needs_shadow_ext ? "vec4" : "vec3"}; + const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod && + ctx.stage != Stage::Fragment && needs_shadow_ext}; + if (use_grad) { + // LOG_WARNING(..., "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback"); + if (info.type == TextureType::ColorArrayCube) { + // LOG_WARNING(..., "textureGrad does not support ColorArrayCube. Stubbing"); + ctx.AddF32("{}=0.0f;", inst); + return; + } + const auto d_cast{info.type == TextureType::ColorArray2D ? "vec2" : "vec3"}; + ctx.AddF32("{}=textureGrad({},{}({},{}),{}(0),{}(0));", inst, texture, cast, coords, dref, + d_cast, d_cast); + return; + } if (!offset.IsEmpty()) { const auto offset_str{GetOffsetVec(ctx, offset)}; if (ctx.stage == Stage::Fragment) { @@ -263,15 +279,29 @@ void EmitImageSampleDrefExplicitLod([[maybe_unused]] EmitContext& ctx, throw NotImplementedException("EmitImageSampleDrefExplicitLod Lod clamp samples"); } const auto texture{Texture(ctx, info, index)}; - const auto cast{ShadowSamplerVecCast(info.type)}; + const bool needs_shadow_ext{NeedsShadowLodExt(info.type)}; + const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod && needs_shadow_ext}; + const auto cast{needs_shadow_ext ? "vec4" : "vec3"}; + if (use_grad) { + // LOG_WARNING(..., "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback"); + if (info.type == TextureType::ColorArrayCube) { + // LOG_WARNING(..., "textureGrad does not support ColorArrayCube. Stubbing"); + ctx.AddF32("{}=0.0f;", inst); + return; + } + const auto d_cast{info.type == TextureType::ColorArray2D ? "vec2" : "vec3"}; + ctx.AddF32("{}=textureGrad({},{}({},{}),{}(0),{}(0));", inst, texture, cast, coords, dref, + d_cast, d_cast); + return; + } if (!offset.IsEmpty()) { const auto offset_str{GetOffsetVec(ctx, offset)}; if (info.type == TextureType::ColorArrayCube) { ctx.AddF32("{}=textureLodOffset({},{},{},{},{});", inst, texture, coords, dref, lod_lc, offset_str); } else { - ctx.AddF32("{}=textureLodOffset({},vec3({},{}),{},{});", inst, texture, coords, dref, - lod_lc, offset_str); + ctx.AddF32("{}=textureLodOffset({},{}({},{}),{},{});", inst, texture, cast, coords, + dref, lod_lc, offset_str); } } else { if (info.type == TextureType::ColorArrayCube) { -- cgit v1.2.3 From d41aef03c74e15fb8927bbae741c099694d14e79 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Fri, 4 Jun 2021 13:24:34 -0400 Subject: glsl: Fix image gather logic --- src/shader_recompiler/backend/glsl/emit_glsl_image.cpp | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index 6cf0300ab..f022c5f30 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -343,11 +343,13 @@ void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Ins if (offset.IsEmpty()) { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherARB({},{},{},int({})));", *sparse_inst, texture, coords, texel, info.gather_component); + return; } if (offset2.IsEmpty()) { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},{},int({})));", *sparse_inst, texture, CastToIntVec(coords, info), GetOffsetVec(ctx, offset), texel, info.gather_component); + return; } // PTP const auto offsets{PtpOffsets(offset, offset2)}; @@ -385,11 +387,13 @@ void EmitImageGatherDref([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR: if (offset.IsEmpty()) { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherARB({},{},{},{}));", *sparse_inst, texture, coords, dref, texel); + return; } if (offset2.IsEmpty()) { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},,{},{}));", *sparse_inst, texture, CastToIntVec(coords, info), dref, GetOffsetVec(ctx, offset), texel); + return; } // PTP const auto offsets{PtpOffsets(offset, offset2)}; -- cgit v1.2.3 From 421847cf1e33d5b95c9aa272bf3cf69afda3d964 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Sat, 5 Jun 2021 02:41:29 -0400 Subject: glsl: Implement image atomics and set layer along with some more cleanup/oversight fixes --- .../backend/glsl/emit_glsl_image.cpp | 188 ++++++++++++++++++++- 1 file changed, 185 insertions(+), 3 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index f022c5f30..e3a69e3a5 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -45,7 +45,7 @@ std::string CastToIntVec(std::string_view value, const IR::TextureInstInfo& info case TextureType::ColorArrayCube: return fmt::format("ivec4({})", value); default: - throw NotImplementedException("Offset type {}", info.type.Value()); + throw NotImplementedException("Integer cast for TextureType {}", info.type.Value()); } } @@ -64,7 +64,7 @@ std::string TexelFetchCastToInt(std::string_view value, const IR::TextureInstInf case TextureType::ColorArrayCube: return fmt::format("ivec4({})", value); default: - throw NotImplementedException("Offset type {}", info.type.Value()); + throw NotImplementedException("TexelFetchCast type {}", info.type.Value()); } } @@ -98,7 +98,19 @@ std::string GetOffsetVec(EmitContext& ctx, const IR::Value& offset) { break; } } - return ctx.var_alloc.Consume(offset); + const auto offset_str{ctx.var_alloc.Consume(offset)}; + switch (offset.Type()) { + case IR::Type::U32: + return fmt::format("int({})", offset_str); + case IR::Type::U32x2: + return fmt::format("ivec2({})", offset_str); + case IR::Type::U32x3: + return fmt::format("ivec3({})", offset_str); + case IR::Type::U32x4: + return fmt::format("ivec4({})", offset_str); + default: + throw NotImplementedException("Offset type {}", offset.Type()); + } } std::string PtpOffsets(const IR::Value& offset, const IR::Value& offset2) { @@ -528,6 +540,88 @@ void EmitImageWrite([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst ctx.Add("imageStore({},{},{});", image, TexelFetchCastToInt(coords, info), color); } +void EmitImageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, + std::string_view coords, std::string_view value) { + const auto info{inst.Flags()}; + const auto image{Image(ctx, info, index)}; + ctx.AddU32("{}=imageAtomicAdd({},{},{});", inst, image, TexelFetchCastToInt(coords, info), + value); +} + +void EmitImageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, + std::string_view coords, std::string_view value) { + const auto info{inst.Flags()}; + const auto image{Image(ctx, info, index)}; + ctx.AddU32("{}=imageAtomicMin({},{},int({}));", inst, image, TexelFetchCastToInt(coords, info), + value); +} + +void EmitImageAtomicUMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, + std::string_view coords, std::string_view value) { + const auto info{inst.Flags()}; + const auto image{Image(ctx, info, index)}; + ctx.AddU32("{}=imageAtomicMin({},{},uint({}));", inst, image, TexelFetchCastToInt(coords, info), + value); +} + +void EmitImageAtomicSMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, + std::string_view coords, std::string_view value) { + const auto info{inst.Flags()}; + const auto image{Image(ctx, info, index)}; + ctx.AddU32("{}=imageAtomicMax({},{},int({}));", inst, image, TexelFetchCastToInt(coords, info), + value); +} + +void EmitImageAtomicUMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, + std::string_view coords, std::string_view value) { + const auto info{inst.Flags()}; + const auto image{Image(ctx, info, index)}; + ctx.AddU32("{}=imageAtomicMax({},{},uint({}));", inst, image, TexelFetchCastToInt(coords, info), + value); +} + +void EmitImageAtomicInc32(EmitContext&, IR::Inst&, const IR::Value&, std::string_view, + std::string_view) { + NotImplemented(); +} + +void EmitImageAtomicDec32(EmitContext&, IR::Inst&, const IR::Value&, std::string_view, + std::string_view) { + NotImplemented(); +} + +void EmitImageAtomicAnd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, + std::string_view coords, std::string_view value) { + const auto info{inst.Flags()}; + const auto image{Image(ctx, info, index)}; + ctx.AddU32("{}=imageAtomicAnd({},{},{});", inst, image, TexelFetchCastToInt(coords, info), + value); +} + +void EmitImageAtomicOr32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, + std::string_view coords, std::string_view value) { + const auto info{inst.Flags()}; + const auto image{Image(ctx, info, index)}; + ctx.AddU32("{}=imageAtomicOr({},{},{});", inst, image, TexelFetchCastToInt(coords, info), + value); +} + +void EmitImageAtomicXor32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, + std::string_view coords, std::string_view value) { + const auto info{inst.Flags()}; + const auto image{Image(ctx, info, index)}; + ctx.AddU32("{}=imageAtomicXor({},{},{});", inst, image, TexelFetchCastToInt(coords, info), + value); +} + +void EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, + std::string_view coords, std::string_view value) { + const auto info{inst.Flags()}; + const auto image{Image(ctx, info, index)}; + ctx.AddU32("{}=imageAtomicExchange({},{},{});", inst, image, TexelFetchCastToInt(coords, info), + value); +} + void EmitBindlessImageSampleImplicitLod(EmitContext&) { NotImplemented(); } @@ -624,4 +718,92 @@ void EmitBoundImageWrite(EmitContext&) { NotImplemented(); } +void EmitBindlessImageAtomicIAdd32(EmitContext&) { + NotImplemented(); +} + +void EmitBindlessImageAtomicSMin32(EmitContext&) { + NotImplemented(); +} + +void EmitBindlessImageAtomicUMin32(EmitContext&) { + NotImplemented(); +} + +void EmitBindlessImageAtomicSMax32(EmitContext&) { + NotImplemented(); +} + +void EmitBindlessImageAtomicUMax32(EmitContext&) { + NotImplemented(); +} + +void EmitBindlessImageAtomicInc32(EmitContext&) { + NotImplemented(); +} + +void EmitBindlessImageAtomicDec32(EmitContext&) { + NotImplemented(); +} + +void EmitBindlessImageAtomicAnd32(EmitContext&) { + NotImplemented(); +} + +void EmitBindlessImageAtomicOr32(EmitContext&) { + NotImplemented(); +} + +void EmitBindlessImageAtomicXor32(EmitContext&) { + NotImplemented(); +} + +void EmitBindlessImageAtomicExchange32(EmitContext&) { + NotImplemented(); +} + +void EmitBoundImageAtomicIAdd32(EmitContext&) { + NotImplemented(); +} + +void EmitBoundImageAtomicSMin32(EmitContext&) { + NotImplemented(); +} + +void EmitBoundImageAtomicUMin32(EmitContext&) { + NotImplemented(); +} + +void EmitBoundImageAtomicSMax32(EmitContext&) { + NotImplemented(); +} + +void EmitBoundImageAtomicUMax32(EmitContext&) { + NotImplemented(); +} + +void EmitBoundImageAtomicInc32(EmitContext&) { + NotImplemented(); +} + +void EmitBoundImageAtomicDec32(EmitContext&) { + NotImplemented(); +} + +void EmitBoundImageAtomicAnd32(EmitContext&) { + NotImplemented(); +} + +void EmitBoundImageAtomicOr32(EmitContext&) { + NotImplemented(); +} + +void EmitBoundImageAtomicXor32(EmitContext&) { + NotImplemented(); +} + +void EmitBoundImageAtomicExchange32(EmitContext&) { + NotImplemented(); +} + } // namespace Shader::Backend::GLSL -- cgit v1.2.3 From 85399e119d6d61375fd9304d69bdfb3a85522d2a Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Thu, 10 Jun 2021 00:29:19 -0400 Subject: glsl: Reorganize backend code, remove unneeded [[maybe_unused]] --- .../backend/glsl/emit_glsl_image.cpp | 170 +++++++++------------ 1 file changed, 74 insertions(+), 96 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index e3a69e3a5..00fe288e2 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -12,8 +12,7 @@ namespace Shader::Backend::GLSL { namespace { -std::string Texture(EmitContext& ctx, const IR::TextureInstInfo& info, - [[maybe_unused]] const IR::Value& index) { +std::string Texture(EmitContext& ctx, const IR::TextureInstInfo& info) { if (info.type == TextureType::Buffer) { return fmt::format("tex{}", ctx.texture_buffer_bindings.at(info.descriptor_index)); } else { @@ -21,8 +20,7 @@ std::string Texture(EmitContext& ctx, const IR::TextureInstInfo& info, } } -std::string Image(EmitContext& ctx, const IR::TextureInstInfo& info, - [[maybe_unused]] const IR::Value& index) { +std::string Image(EmitContext& ctx, const IR::TextureInstInfo& info) { if (info.type == TextureType::Buffer) { return fmt::format("img{}", ctx.image_buffer_bindings.at(info.descriptor_index)); } else { @@ -139,16 +137,14 @@ IR::Inst* PrepareSparse(IR::Inst& inst) { } } // Anonymous namespace -void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, - [[maybe_unused]] std::string_view coords, - [[maybe_unused]] std::string_view bias_lc, - [[maybe_unused]] const IR::Value& offset) { +void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, std::string_view coords, + std::string_view bias_lc, const IR::Value& offset) { const auto info{inst.Flags()}; if (info.has_lod_clamp) { throw NotImplementedException("EmitImageSampleImplicitLod Lod clamp samples"); } - const auto texture{Texture(ctx, info, index)}; + const auto texture{Texture(ctx, info)}; const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""}; const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const auto sparse_inst{PrepareSparse(inst)}; @@ -179,11 +175,9 @@ void EmitImageSampleImplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse } } -void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, - [[maybe_unused]] std::string_view coords, - [[maybe_unused]] std::string_view lod_lc, - [[maybe_unused]] const IR::Value& offset) { +void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, std::string_view coords, + std::string_view lod_lc, const IR::Value& offset) { const auto info{inst.Flags()}; if (info.has_bias) { throw NotImplementedException("EmitImageSampleExplicitLod Bias texture samples"); @@ -191,7 +185,7 @@ void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse if (info.has_lod_clamp) { throw NotImplementedException("EmitImageSampleExplicitLod Lod clamp samples"); } - const auto texture{Texture(ctx, info, index)}; + const auto texture{Texture(ctx, info)}; const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const auto sparse_inst{PrepareSparse(inst)}; if (!sparse_inst) { @@ -214,13 +208,10 @@ void EmitImageSampleExplicitLod([[maybe_unused]] EmitContext& ctx, [[maybe_unuse } } -void EmitImageSampleDrefImplicitLod([[maybe_unused]] EmitContext& ctx, - [[maybe_unused]] IR::Inst& inst, +void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, - [[maybe_unused]] std::string_view coords, - [[maybe_unused]] std::string_view dref, - [[maybe_unused]] std::string_view bias_lc, - [[maybe_unused]] const IR::Value& offset) { + std::string_view coords, std::string_view dref, + std::string_view bias_lc, const IR::Value& offset) { const auto info{inst.Flags()}; const auto sparse_inst{PrepareSparse(inst)}; if (sparse_inst) { @@ -232,7 +223,7 @@ void EmitImageSampleDrefImplicitLod([[maybe_unused]] EmitContext& ctx, if (info.has_lod_clamp) { throw NotImplementedException("EmitImageSampleDrefImplicitLod Lod clamp samples"); } - const auto texture{Texture(ctx, info, index)}; + const auto texture{Texture(ctx, info)}; const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""}; const bool needs_shadow_ext{NeedsShadowLodExt(info.type)}; const auto cast{needs_shadow_ext ? "vec4" : "vec3"}; @@ -272,13 +263,10 @@ void EmitImageSampleDrefImplicitLod([[maybe_unused]] EmitContext& ctx, } } -void EmitImageSampleDrefExplicitLod([[maybe_unused]] EmitContext& ctx, - [[maybe_unused]] IR::Inst& inst, +void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, - [[maybe_unused]] std::string_view coords, - [[maybe_unused]] std::string_view dref, - [[maybe_unused]] std::string_view lod_lc, - [[maybe_unused]] const IR::Value& offset) { + std::string_view coords, std::string_view dref, + std::string_view lod_lc, const IR::Value& offset) { const auto info{inst.Flags()}; const auto sparse_inst{PrepareSparse(inst)}; if (sparse_inst) { @@ -290,7 +278,7 @@ void EmitImageSampleDrefExplicitLod([[maybe_unused]] EmitContext& ctx, if (info.has_lod_clamp) { throw NotImplementedException("EmitImageSampleDrefExplicitLod Lod clamp samples"); } - const auto texture{Texture(ctx, info, index)}; + const auto texture{Texture(ctx, info)}; const bool needs_shadow_ext{NeedsShadowLodExt(info.type)}; const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod && needs_shadow_ext}; const auto cast{needs_shadow_ext ? "vec4" : "vec3"}; @@ -325,13 +313,10 @@ void EmitImageSampleDrefExplicitLod([[maybe_unused]] EmitContext& ctx, } } -void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, - [[maybe_unused]] std::string_view coords, - [[maybe_unused]] const IR::Value& offset, - [[maybe_unused]] const IR::Value& offset2) { +void EmitImageGather(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, + std::string_view coords, const IR::Value& offset, const IR::Value& offset2) { const auto info{inst.Flags()}; - const auto texture{Texture(ctx, info, index)}; + const auto texture{Texture(ctx, info)}; const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const auto sparse_inst{PrepareSparse(inst)}; if (!sparse_inst) { @@ -370,14 +355,11 @@ void EmitImageGather([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Ins info.gather_component); } -void EmitImageGatherDref([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, - [[maybe_unused]] std::string_view coords, - [[maybe_unused]] const IR::Value& offset, - [[maybe_unused]] const IR::Value& offset2, - [[maybe_unused]] std::string_view dref) { +void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, + std::string_view coords, const IR::Value& offset, const IR::Value& offset2, + std::string_view dref) { const auto info{inst.Flags()}; - const auto texture{Texture(ctx, info, index)}; + const auto texture{Texture(ctx, info)}; const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const auto sparse_inst{PrepareSparse(inst)}; if (!sparse_inst) { @@ -413,10 +395,8 @@ void EmitImageGatherDref([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR: *sparse_inst, texture, CastToIntVec(coords, info), dref, offsets, texel); } -void EmitImageFetch([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, - [[maybe_unused]] std::string_view coords, - [[maybe_unused]] std::string_view offset, [[maybe_unused]] std::string_view lod, +void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, + std::string_view coords, std::string_view offset, std::string_view lod, [[maybe_unused]] std::string_view ms) { const auto info{inst.Flags()}; if (info.has_bias) { @@ -425,7 +405,7 @@ void EmitImageFetch([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst if (info.has_lod_clamp) { throw NotImplementedException("EmitImageFetch Lod clamp samples"); } - const auto texture{Texture(ctx, info, index)}; + const auto texture{Texture(ctx, info)}; const auto sparse_inst{PrepareSparse(inst)}; const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; if (!sparse_inst) { @@ -453,11 +433,10 @@ void EmitImageFetch([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst } } -void EmitImageQueryDimensions([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, - [[maybe_unused]] std::string_view lod) { +void EmitImageQueryDimensions(EmitContext& ctx, IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, std::string_view lod) { const auto info{inst.Flags()}; - const auto texture{Texture(ctx, info, index)}; + const auto texture{Texture(ctx, info)}; switch (info.type) { case TextureType::Color1D: return ctx.AddU32x4( @@ -481,20 +460,16 @@ void EmitImageQueryDimensions([[maybe_unused]] EmitContext& ctx, [[maybe_unused] throw LogicError("Unspecified image type {}", info.type.Value()); } -void EmitImageQueryLod([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, - [[maybe_unused]] std::string_view coords) { +void EmitImageQueryLod(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, + std::string_view coords) { const auto info{inst.Flags()}; - const auto texture{Texture(ctx, info, index)}; + const auto texture{Texture(ctx, info)}; return ctx.AddF32x4("{}=vec4(textureQueryLod({},{}),0.0,0.0);", inst, texture, coords); } -void EmitImageGradient([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, - [[maybe_unused]] std::string_view coords, - [[maybe_unused]] const IR::Value& derivatives, - [[maybe_unused]] const IR::Value& offset, - [[maybe_unused]] const IR::Value& lod_clamp) { +void EmitImageGradient(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, + std::string_view coords, const IR::Value& derivatives, + const IR::Value& offset, [[maybe_unused]] const IR::Value& lod_clamp) { const auto info{inst.Flags()}; if (info.has_lod_clamp) { throw NotImplementedException("EmitImageGradient Lod clamp samples"); @@ -506,7 +481,7 @@ void EmitImageGradient([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::I if (!offset.IsEmpty()) { throw NotImplementedException("EmitImageGradient offset"); } - const auto texture{Texture(ctx, info, index)}; + const auto texture{Texture(ctx, info)}; const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const bool multi_component{info.num_derivates > 1 || info.has_lod_clamp}; const auto derivatives_vec{ctx.var_alloc.Consume(derivatives)}; @@ -519,63 +494,65 @@ void EmitImageGradient([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::I } } -void EmitImageRead([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, - [[maybe_unused]] std::string_view coords) { +void EmitImageRead(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, + std::string_view coords) { const auto info{inst.Flags()}; const auto sparse_inst{PrepareSparse(inst)}; if (sparse_inst) { throw NotImplementedException("EmitImageRead Sparse"); } - const auto image{Image(ctx, info, index)}; + const auto image{Image(ctx, info)}; ctx.AddU32x4("{}=uvec4(imageLoad({},{}));", inst, image, TexelFetchCastToInt(coords, info)); } -void EmitImageWrite([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, - [[maybe_unused]] std::string_view coords, - [[maybe_unused]] std::string_view color) { +void EmitImageWrite(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, + std::string_view coords, std::string_view color) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info, index)}; + const auto image{Image(ctx, info)}; ctx.Add("imageStore({},{},{});", image, TexelFetchCastToInt(coords, info), color); } -void EmitImageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, - std::string_view coords, std::string_view value) { +void EmitImageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, std::string_view coords, + std::string_view value) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info, index)}; + const auto image{Image(ctx, info)}; ctx.AddU32("{}=imageAtomicAdd({},{},{});", inst, image, TexelFetchCastToInt(coords, info), value); } -void EmitImageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, - std::string_view coords, std::string_view value) { +void EmitImageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, std::string_view coords, + std::string_view value) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info, index)}; + const auto image{Image(ctx, info)}; ctx.AddU32("{}=imageAtomicMin({},{},int({}));", inst, image, TexelFetchCastToInt(coords, info), value); } -void EmitImageAtomicUMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, - std::string_view coords, std::string_view value) { +void EmitImageAtomicUMin32(EmitContext& ctx, IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, std::string_view coords, + std::string_view value) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info, index)}; + const auto image{Image(ctx, info)}; ctx.AddU32("{}=imageAtomicMin({},{},uint({}));", inst, image, TexelFetchCastToInt(coords, info), value); } -void EmitImageAtomicSMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, - std::string_view coords, std::string_view value) { +void EmitImageAtomicSMax32(EmitContext& ctx, IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, std::string_view coords, + std::string_view value) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info, index)}; + const auto image{Image(ctx, info)}; ctx.AddU32("{}=imageAtomicMax({},{},int({}));", inst, image, TexelFetchCastToInt(coords, info), value); } -void EmitImageAtomicUMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, - std::string_view coords, std::string_view value) { +void EmitImageAtomicUMax32(EmitContext& ctx, IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, std::string_view coords, + std::string_view value) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info, index)}; + const auto image{Image(ctx, info)}; ctx.AddU32("{}=imageAtomicMax({},{},uint({}));", inst, image, TexelFetchCastToInt(coords, info), value); } @@ -590,34 +567,35 @@ void EmitImageAtomicDec32(EmitContext&, IR::Inst&, const IR::Value&, std::string NotImplemented(); } -void EmitImageAtomicAnd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, +void EmitImageAtomicAnd32(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info, index)}; + const auto image{Image(ctx, info)}; ctx.AddU32("{}=imageAtomicAnd({},{},{});", inst, image, TexelFetchCastToInt(coords, info), value); } -void EmitImageAtomicOr32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, +void EmitImageAtomicOr32(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info, index)}; + const auto image{Image(ctx, info)}; ctx.AddU32("{}=imageAtomicOr({},{},{});", inst, image, TexelFetchCastToInt(coords, info), value); } -void EmitImageAtomicXor32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, +void EmitImageAtomicXor32(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info, index)}; + const auto image{Image(ctx, info)}; ctx.AddU32("{}=imageAtomicXor({},{},{});", inst, image, TexelFetchCastToInt(coords, info), value); } -void EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, - std::string_view coords, std::string_view value) { +void EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, + [[maybe_unused]] const IR::Value& index, std::string_view coords, + std::string_view value) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info, index)}; + const auto image{Image(ctx, info)}; ctx.AddU32("{}=imageAtomicExchange({},{},{});", inst, image, TexelFetchCastToInt(coords, info), value); } -- cgit v1.2.3 From e81c73a8748ccfcde56acfee5630116c3950e479 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Fri, 11 Jun 2021 02:50:30 -0400 Subject: glsl: Address more feedback. Implement indexed texture reads --- .../backend/glsl/emit_glsl_image.cpp | 136 ++++++++++----------- 1 file changed, 63 insertions(+), 73 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index 00fe288e2..6a98f7ac2 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -12,20 +12,18 @@ namespace Shader::Backend::GLSL { namespace { -std::string Texture(EmitContext& ctx, const IR::TextureInstInfo& info) { - if (info.type == TextureType::Buffer) { - return fmt::format("tex{}", ctx.texture_buffer_bindings.at(info.descriptor_index)); - } else { - return fmt::format("tex{}", ctx.texture_bindings.at(info.descriptor_index)); - } +std::string Texture(EmitContext& ctx, const IR::TextureInstInfo& info, const IR::Value& index) { + const auto def{info.type == TextureType::Buffer ? ctx.texture_buffers.at(info.descriptor_index) + : ctx.textures.at(info.descriptor_index)}; + const auto index_offset{def.count > 1 ? fmt::format("[{}]", ctx.var_alloc.Consume(index)) : ""}; + return fmt::format("tex{}{}", def.binding, index_offset); } -std::string Image(EmitContext& ctx, const IR::TextureInstInfo& info) { - if (info.type == TextureType::Buffer) { - return fmt::format("img{}", ctx.image_buffer_bindings.at(info.descriptor_index)); - } else { - return fmt::format("img{}", ctx.image_bindings.at(info.descriptor_index)); - } +std::string Image(EmitContext& ctx, const IR::TextureInstInfo& info, const IR::Value& index) { + const auto def{info.type == TextureType::Buffer ? ctx.image_buffers.at(info.descriptor_index) + : ctx.images.at(info.descriptor_index)}; + const auto index_offset{def.count > 1 ? fmt::format("[{}]", ctx.var_alloc.Consume(index)) : ""}; + return fmt::format("img{}{}", def.binding, index_offset); } std::string CastToIntVec(std::string_view value, const IR::TextureInstInfo& info) { @@ -137,14 +135,14 @@ IR::Inst* PrepareSparse(IR::Inst& inst) { } } // Anonymous namespace -void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, std::string_view coords, - std::string_view bias_lc, const IR::Value& offset) { +void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, + std::string_view coords, std::string_view bias_lc, + const IR::Value& offset) { const auto info{inst.Flags()}; if (info.has_lod_clamp) { throw NotImplementedException("EmitImageSampleImplicitLod Lod clamp samples"); } - const auto texture{Texture(ctx, info)}; + const auto texture{Texture(ctx, info, index)}; const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""}; const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const auto sparse_inst{PrepareSparse(inst)}; @@ -175,9 +173,9 @@ void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, } } -void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, std::string_view coords, - std::string_view lod_lc, const IR::Value& offset) { +void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, + std::string_view coords, std::string_view lod_lc, + const IR::Value& offset) { const auto info{inst.Flags()}; if (info.has_bias) { throw NotImplementedException("EmitImageSampleExplicitLod Bias texture samples"); @@ -185,7 +183,7 @@ void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, if (info.has_lod_clamp) { throw NotImplementedException("EmitImageSampleExplicitLod Lod clamp samples"); } - const auto texture{Texture(ctx, info)}; + const auto texture{Texture(ctx, info, index)}; const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const auto sparse_inst{PrepareSparse(inst)}; if (!sparse_inst) { @@ -208,8 +206,7 @@ void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, } } -void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, +void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, std::string_view coords, std::string_view dref, std::string_view bias_lc, const IR::Value& offset) { const auto info{inst.Flags()}; @@ -223,7 +220,7 @@ void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst, if (info.has_lod_clamp) { throw NotImplementedException("EmitImageSampleDrefImplicitLod Lod clamp samples"); } - const auto texture{Texture(ctx, info)}; + const auto texture{Texture(ctx, info, index)}; const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""}; const bool needs_shadow_ext{NeedsShadowLodExt(info.type)}; const auto cast{needs_shadow_ext ? "vec4" : "vec3"}; @@ -263,8 +260,7 @@ void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst, } } -void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, +void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, std::string_view coords, std::string_view dref, std::string_view lod_lc, const IR::Value& offset) { const auto info{inst.Flags()}; @@ -278,7 +274,7 @@ void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst, if (info.has_lod_clamp) { throw NotImplementedException("EmitImageSampleDrefExplicitLod Lod clamp samples"); } - const auto texture{Texture(ctx, info)}; + const auto texture{Texture(ctx, info, index)}; const bool needs_shadow_ext{NeedsShadowLodExt(info.type)}; const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod && needs_shadow_ext}; const auto cast{needs_shadow_ext ? "vec4" : "vec3"}; @@ -313,10 +309,10 @@ void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst, } } -void EmitImageGather(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, +void EmitImageGather(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, std::string_view coords, const IR::Value& offset, const IR::Value& offset2) { const auto info{inst.Flags()}; - const auto texture{Texture(ctx, info)}; + const auto texture{Texture(ctx, info, index)}; const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const auto sparse_inst{PrepareSparse(inst)}; if (!sparse_inst) { @@ -355,11 +351,11 @@ void EmitImageGather(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR info.gather_component); } -void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, +void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, std::string_view coords, const IR::Value& offset, const IR::Value& offset2, std::string_view dref) { const auto info{inst.Flags()}; - const auto texture{Texture(ctx, info)}; + const auto texture{Texture(ctx, info, index)}; const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const auto sparse_inst{PrepareSparse(inst)}; if (!sparse_inst) { @@ -395,7 +391,7 @@ void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] cons *sparse_inst, texture, CastToIntVec(coords, info), dref, offsets, texel); } -void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, +void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, std::string_view coords, std::string_view offset, std::string_view lod, [[maybe_unused]] std::string_view ms) { const auto info{inst.Flags()}; @@ -405,7 +401,7 @@ void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR: if (info.has_lod_clamp) { throw NotImplementedException("EmitImageFetch Lod clamp samples"); } - const auto texture{Texture(ctx, info)}; + const auto texture{Texture(ctx, info, index)}; const auto sparse_inst{PrepareSparse(inst)}; const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; if (!sparse_inst) { @@ -433,10 +429,10 @@ void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR: } } -void EmitImageQueryDimensions(EmitContext& ctx, IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, std::string_view lod) { +void EmitImageQueryDimensions(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, + std::string_view lod) { const auto info{inst.Flags()}; - const auto texture{Texture(ctx, info)}; + const auto texture{Texture(ctx, info, index)}; switch (info.type) { case TextureType::Color1D: return ctx.AddU32x4( @@ -460,14 +456,14 @@ void EmitImageQueryDimensions(EmitContext& ctx, IR::Inst& inst, throw LogicError("Unspecified image type {}", info.type.Value()); } -void EmitImageQueryLod(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, +void EmitImageQueryLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, std::string_view coords) { const auto info{inst.Flags()}; - const auto texture{Texture(ctx, info)}; + const auto texture{Texture(ctx, info, index)}; return ctx.AddF32x4("{}=vec4(textureQueryLod({},{}),0.0,0.0);", inst, texture, coords); } -void EmitImageGradient(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, +void EmitImageGradient(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, std::string_view coords, const IR::Value& derivatives, const IR::Value& offset, [[maybe_unused]] const IR::Value& lod_clamp) { const auto info{inst.Flags()}; @@ -481,7 +477,7 @@ void EmitImageGradient(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const if (!offset.IsEmpty()) { throw NotImplementedException("EmitImageGradient offset"); } - const auto texture{Texture(ctx, info)}; + const auto texture{Texture(ctx, info, index)}; const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const bool multi_component{info.num_derivates > 1 || info.has_lod_clamp}; const auto derivatives_vec{ctx.var_alloc.Consume(derivatives)}; @@ -494,65 +490,60 @@ void EmitImageGradient(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const } } -void EmitImageRead(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, +void EmitImageRead(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, std::string_view coords) { const auto info{inst.Flags()}; const auto sparse_inst{PrepareSparse(inst)}; if (sparse_inst) { throw NotImplementedException("EmitImageRead Sparse"); } - const auto image{Image(ctx, info)}; + const auto image{Image(ctx, info, index)}; ctx.AddU32x4("{}=uvec4(imageLoad({},{}));", inst, image, TexelFetchCastToInt(coords, info)); } -void EmitImageWrite(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, +void EmitImageWrite(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, std::string_view coords, std::string_view color) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info)}; + const auto image{Image(ctx, info, index)}; ctx.Add("imageStore({},{},{});", image, TexelFetchCastToInt(coords, info), color); } -void EmitImageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, std::string_view coords, - std::string_view value) { +void EmitImageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, + std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info)}; + const auto image{Image(ctx, info, index)}; ctx.AddU32("{}=imageAtomicAdd({},{},{});", inst, image, TexelFetchCastToInt(coords, info), value); } -void EmitImageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, std::string_view coords, - std::string_view value) { +void EmitImageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, + std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info)}; + const auto image{Image(ctx, info, index)}; ctx.AddU32("{}=imageAtomicMin({},{},int({}));", inst, image, TexelFetchCastToInt(coords, info), value); } -void EmitImageAtomicUMin32(EmitContext& ctx, IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, std::string_view coords, - std::string_view value) { +void EmitImageAtomicUMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, + std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info)}; + const auto image{Image(ctx, info, index)}; ctx.AddU32("{}=imageAtomicMin({},{},uint({}));", inst, image, TexelFetchCastToInt(coords, info), value); } -void EmitImageAtomicSMax32(EmitContext& ctx, IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, std::string_view coords, - std::string_view value) { +void EmitImageAtomicSMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, + std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info)}; + const auto image{Image(ctx, info, index)}; ctx.AddU32("{}=imageAtomicMax({},{},int({}));", inst, image, TexelFetchCastToInt(coords, info), value); } -void EmitImageAtomicUMax32(EmitContext& ctx, IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, std::string_view coords, - std::string_view value) { +void EmitImageAtomicUMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, + std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info)}; + const auto image{Image(ctx, info, index)}; ctx.AddU32("{}=imageAtomicMax({},{},uint({}));", inst, image, TexelFetchCastToInt(coords, info), value); } @@ -567,35 +558,34 @@ void EmitImageAtomicDec32(EmitContext&, IR::Inst&, const IR::Value&, std::string NotImplemented(); } -void EmitImageAtomicAnd32(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, +void EmitImageAtomicAnd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info)}; + const auto image{Image(ctx, info, index)}; ctx.AddU32("{}=imageAtomicAnd({},{},{});", inst, image, TexelFetchCastToInt(coords, info), value); } -void EmitImageAtomicOr32(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, +void EmitImageAtomicOr32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info)}; + const auto image{Image(ctx, info, index)}; ctx.AddU32("{}=imageAtomicOr({},{},{});", inst, image, TexelFetchCastToInt(coords, info), value); } -void EmitImageAtomicXor32(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index, +void EmitImageAtomicXor32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info)}; + const auto image{Image(ctx, info, index)}; ctx.AddU32("{}=imageAtomicXor({},{},{});", inst, image, TexelFetchCastToInt(coords, info), value); } -void EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, - [[maybe_unused]] const IR::Value& index, std::string_view coords, - std::string_view value) { +void EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, + std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; - const auto image{Image(ctx, info)}; + const auto image{Image(ctx, info, index)}; ctx.AddU32("{}=imageAtomicExchange({},{},{});", inst, image, TexelFetchCastToInt(coords, info), value); } -- cgit v1.2.3 From 39c29664f9aff5069c3a06435b1430db9903ff86 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Sat, 12 Jun 2021 01:36:33 -0400 Subject: glsl: Minor cleanup --- .../backend/glsl/emit_glsl_image.cpp | 32 ++++++++++------------ 1 file changed, 14 insertions(+), 18 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index 6a98f7ac2..51181d1c1 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -45,7 +45,7 @@ std::string CastToIntVec(std::string_view value, const IR::TextureInstInfo& info } } -std::string TexelFetchCastToInt(std::string_view value, const IR::TextureInstInfo& info) { +std::string CoordsCastToInt(std::string_view value, const IR::TextureInstInfo& info) { switch (info.type) { case TextureType::Color1D: case TextureType::Buffer: @@ -407,13 +407,13 @@ void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, if (!sparse_inst) { if (!offset.empty()) { ctx.Add("{}=texelFetchOffset({},{},int({}),{});", texel, texture, - TexelFetchCastToInt(coords, info), lod, TexelFetchCastToInt(offset, info)); + CoordsCastToInt(coords, info), lod, CoordsCastToInt(offset, info)); } else { if (info.type == TextureType::Buffer) { ctx.Add("{}=texelFetch({},int({}));", texel, texture, coords); } else { ctx.Add("{}=texelFetch({},{},int({}));", texel, texture, - TexelFetchCastToInt(coords, info), lod); + CoordsCastToInt(coords, info), lod); } } return; @@ -498,29 +498,28 @@ void EmitImageRead(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, throw NotImplementedException("EmitImageRead Sparse"); } const auto image{Image(ctx, info, index)}; - ctx.AddU32x4("{}=uvec4(imageLoad({},{}));", inst, image, TexelFetchCastToInt(coords, info)); + ctx.AddU32x4("{}=uvec4(imageLoad({},{}));", inst, image, CoordsCastToInt(coords, info)); } void EmitImageWrite(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, std::string_view coords, std::string_view color) { const auto info{inst.Flags()}; const auto image{Image(ctx, info, index)}; - ctx.Add("imageStore({},{},{});", image, TexelFetchCastToInt(coords, info), color); + ctx.Add("imageStore({},{},{});", image, CoordsCastToInt(coords, info), color); } void EmitImageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; const auto image{Image(ctx, info, index)}; - ctx.AddU32("{}=imageAtomicAdd({},{},{});", inst, image, TexelFetchCastToInt(coords, info), - value); + ctx.AddU32("{}=imageAtomicAdd({},{},{});", inst, image, CoordsCastToInt(coords, info), value); } void EmitImageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; const auto image{Image(ctx, info, index)}; - ctx.AddU32("{}=imageAtomicMin({},{},int({}));", inst, image, TexelFetchCastToInt(coords, info), + ctx.AddU32("{}=imageAtomicMin({},{},int({}));", inst, image, CoordsCastToInt(coords, info), value); } @@ -528,7 +527,7 @@ void EmitImageAtomicUMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& in std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; const auto image{Image(ctx, info, index)}; - ctx.AddU32("{}=imageAtomicMin({},{},uint({}));", inst, image, TexelFetchCastToInt(coords, info), + ctx.AddU32("{}=imageAtomicMin({},{},uint({}));", inst, image, CoordsCastToInt(coords, info), value); } @@ -536,7 +535,7 @@ void EmitImageAtomicSMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& in std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; const auto image{Image(ctx, info, index)}; - ctx.AddU32("{}=imageAtomicMax({},{},int({}));", inst, image, TexelFetchCastToInt(coords, info), + ctx.AddU32("{}=imageAtomicMax({},{},int({}));", inst, image, CoordsCastToInt(coords, info), value); } @@ -544,7 +543,7 @@ void EmitImageAtomicUMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& in std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; const auto image{Image(ctx, info, index)}; - ctx.AddU32("{}=imageAtomicMax({},{},uint({}));", inst, image, TexelFetchCastToInt(coords, info), + ctx.AddU32("{}=imageAtomicMax({},{},uint({}));", inst, image, CoordsCastToInt(coords, info), value); } @@ -562,31 +561,28 @@ void EmitImageAtomicAnd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& ind std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; const auto image{Image(ctx, info, index)}; - ctx.AddU32("{}=imageAtomicAnd({},{},{});", inst, image, TexelFetchCastToInt(coords, info), - value); + ctx.AddU32("{}=imageAtomicAnd({},{},{});", inst, image, CoordsCastToInt(coords, info), value); } void EmitImageAtomicOr32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; const auto image{Image(ctx, info, index)}; - ctx.AddU32("{}=imageAtomicOr({},{},{});", inst, image, TexelFetchCastToInt(coords, info), - value); + ctx.AddU32("{}=imageAtomicOr({},{},{});", inst, image, CoordsCastToInt(coords, info), value); } void EmitImageAtomicXor32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; const auto image{Image(ctx, info, index)}; - ctx.AddU32("{}=imageAtomicXor({},{},{});", inst, image, TexelFetchCastToInt(coords, info), - value); + ctx.AddU32("{}=imageAtomicXor({},{},{});", inst, image, CoordsCastToInt(coords, info), value); } void EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, std::string_view coords, std::string_view value) { const auto info{inst.Flags()}; const auto image{Image(ctx, info, index)}; - ctx.AddU32("{}=imageAtomicExchange({},{},{});", inst, image, TexelFetchCastToInt(coords, info), + ctx.AddU32("{}=imageAtomicExchange({},{},{});", inst, image, CoordsCastToInt(coords, info), value); } -- cgit v1.2.3 From 5e7b2b9661bf685c3950d7c4065d0d35b488f95c Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Sun, 13 Jun 2021 00:05:19 -0400 Subject: glsl: Add stubs for sparse queries and variable aoffi when not supported --- .../backend/glsl/emit_glsl_image.cpp | 46 ++++++++++++++++------ 1 file changed, 35 insertions(+), 11 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index 51181d1c1..c6b3df9c9 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -94,7 +94,11 @@ std::string GetOffsetVec(EmitContext& ctx, const IR::Value& offset) { break; } } - const auto offset_str{ctx.var_alloc.Consume(offset)}; + const bool has_var_aoffi{ctx.profile.support_gl_variable_aoffi}; + if (!has_var_aoffi) { + // LOG_WARNING("Device does not support variable texture offsets, STUBBING"); + } + const auto offset_str{has_var_aoffi ? ctx.var_alloc.Consume(offset) : "0"}; switch (offset.Type()) { case IR::Type::U32: return fmt::format("int({})", offset_str); @@ -146,7 +150,12 @@ void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Valu const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""}; const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const auto sparse_inst{PrepareSparse(inst)}; - if (!sparse_inst) { + const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; + if (sparse_inst && !supports_sparse) { + // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); + ctx.AddU1("{}=true;", *sparse_inst); + } + if (!sparse_inst || !supports_sparse) { if (!offset.IsEmpty()) { const auto offset_str{GetOffsetVec(ctx, offset)}; if (ctx.stage == Stage::Fragment) { @@ -163,7 +172,6 @@ void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Valu } return; } - // TODO: Query sparseTexels extension support if (!offset.IsEmpty()) { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureOffsetARB({},{},{},{}{}));", *sparse_inst, texture, coords, GetOffsetVec(ctx, offset), texel, bias); @@ -186,7 +194,12 @@ void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Valu const auto texture{Texture(ctx, info, index)}; const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const auto sparse_inst{PrepareSparse(inst)}; - if (!sparse_inst) { + const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; + if (sparse_inst && !supports_sparse) { + // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); + ctx.AddU1("{}=true;", *sparse_inst); + } + if (!sparse_inst || !supports_sparse) { if (!offset.IsEmpty()) { ctx.Add("{}=textureLodOffset({},{},{},{});", texel, texture, coords, lod_lc, GetOffsetVec(ctx, offset)); @@ -195,7 +208,6 @@ void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Valu } return; } - // TODO: Query sparseTexels extension support if (!offset.IsEmpty()) { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTexelFetchOffsetARB({},{},int({}),{},{}));", *sparse_inst, texture, CastToIntVec(coords, info), lod_lc, @@ -315,7 +327,12 @@ void EmitImageGather(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, const auto texture{Texture(ctx, info, index)}; const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const auto sparse_inst{PrepareSparse(inst)}; - if (!sparse_inst) { + const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; + if (sparse_inst && !supports_sparse) { + // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); + ctx.AddU1("{}=true;", *sparse_inst); + } + if (!sparse_inst || !supports_sparse) { if (offset.IsEmpty()) { ctx.Add("{}=textureGather({},{},int({}));", texel, texture, coords, info.gather_component); @@ -332,7 +349,6 @@ void EmitImageGather(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, info.gather_component); return; } - // TODO: Query sparseTexels extension support if (offset.IsEmpty()) { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherARB({},{},{},int({})));", *sparse_inst, texture, coords, texel, info.gather_component); @@ -358,7 +374,12 @@ void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, const IR::Value& inde const auto texture{Texture(ctx, info, index)}; const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const auto sparse_inst{PrepareSparse(inst)}; - if (!sparse_inst) { + const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; + if (sparse_inst && !supports_sparse) { + // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); + ctx.AddU1("{}=true;", *sparse_inst); + } + if (!sparse_inst || !supports_sparse) { if (offset.IsEmpty()) { ctx.Add("{}=textureGather({},{},{});", texel, texture, coords, dref); return; @@ -373,7 +394,6 @@ void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, const IR::Value& inde ctx.Add("{}=textureGatherOffsets({},{},{},{});", texel, texture, coords, dref, offsets); return; } - // TODO: Query sparseTexels extension support if (offset.IsEmpty()) { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherARB({},{},{},{}));", *sparse_inst, texture, coords, dref, texel); @@ -404,7 +424,12 @@ void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, const auto texture{Texture(ctx, info, index)}; const auto sparse_inst{PrepareSparse(inst)}; const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; - if (!sparse_inst) { + const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; + if (sparse_inst && !supports_sparse) { + // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); + ctx.AddU1("{}=true;", *sparse_inst); + } + if (!sparse_inst || !supports_sparse) { if (!offset.empty()) { ctx.Add("{}=texelFetchOffset({},{},int({}),{});", texel, texture, CoordsCastToInt(coords, info), lod, CoordsCastToInt(offset, info)); @@ -418,7 +443,6 @@ void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, } return; } - // TODO: Query sparseTexels extension support if (!offset.empty()) { ctx.AddU1("{}=sparseTexelsResidentARB(sparseTexelFetchOffsetARB({},{},int({}),{},{}));", *sparse_inst, texture, CastToIntVec(coords, info), lod, -- cgit v1.2.3 From ae4e452759573d145738688d9284077934e61ae4 Mon Sep 17 00:00:00 2001 From: ameerj <52414509+ameerj@users.noreply.github.com> Date: Mon, 14 Jun 2021 11:32:28 -0400 Subject: glsl: Add Shader_GLSL logging --- .../backend/glsl/emit_glsl_image.cpp | 24 ++++++++++++---------- 1 file changed, 13 insertions(+), 11 deletions(-) (limited to 'src/shader_recompiler/backend/glsl/emit_glsl_image.cpp') diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp index c6b3df9c9..447eb8e0a 100644 --- a/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp +++ b/src/shader_recompiler/backend/glsl/emit_glsl_image.cpp @@ -96,7 +96,7 @@ std::string GetOffsetVec(EmitContext& ctx, const IR::Value& offset) { } const bool has_var_aoffi{ctx.profile.support_gl_variable_aoffi}; if (!has_var_aoffi) { - // LOG_WARNING("Device does not support variable texture offsets, STUBBING"); + LOG_WARNING(Shader_GLSL, "Device does not support variable texture offsets, STUBBING"); } const auto offset_str{has_var_aoffi ? ctx.var_alloc.Consume(offset) : "0"}; switch (offset.Type()) { @@ -116,7 +116,7 @@ std::string GetOffsetVec(EmitContext& ctx, const IR::Value& offset) { std::string PtpOffsets(const IR::Value& offset, const IR::Value& offset2) { const std::array values{offset.InstRecursive(), offset2.InstRecursive()}; if (!values[0]->AreAllArgsImmediates() || !values[1]->AreAllArgsImmediates()) { - // LOG_WARNING("Not all arguments in PTP are immediate, STUBBING"); + LOG_WARNING(Shader_GLSL, "Not all arguments in PTP are immediate, STUBBING"); return "ivec2[](ivec2(0), ivec2(1), ivec2(2), ivec2(3))"; } const IR::Opcode opcode{values[0]->GetOpcode()}; @@ -152,7 +152,7 @@ void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Valu const auto sparse_inst{PrepareSparse(inst)}; const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; if (sparse_inst && !supports_sparse) { - // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); + LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING"); ctx.AddU1("{}=true;", *sparse_inst); } if (!sparse_inst || !supports_sparse) { @@ -196,7 +196,7 @@ void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Valu const auto sparse_inst{PrepareSparse(inst)}; const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; if (sparse_inst && !supports_sparse) { - // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); + LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING"); ctx.AddU1("{}=true;", *sparse_inst); } if (!sparse_inst || !supports_sparse) { @@ -239,9 +239,10 @@ void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR:: const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod && ctx.stage != Stage::Fragment && needs_shadow_ext}; if (use_grad) { - // LOG_WARNING(..., "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback"); + LOG_WARNING(Shader_GLSL, + "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback"); if (info.type == TextureType::ColorArrayCube) { - // LOG_WARNING(..., "textureGrad does not support ColorArrayCube. Stubbing"); + LOG_WARNING(Shader_GLSL, "textureGrad does not support ColorArrayCube. Stubbing"); ctx.AddF32("{}=0.0f;", inst); return; } @@ -291,9 +292,10 @@ void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR:: const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod && needs_shadow_ext}; const auto cast{needs_shadow_ext ? "vec4" : "vec3"}; if (use_grad) { - // LOG_WARNING(..., "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback"); + LOG_WARNING(Shader_GLSL, + "Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback"); if (info.type == TextureType::ColorArrayCube) { - // LOG_WARNING(..., "textureGrad does not support ColorArrayCube. Stubbing"); + LOG_WARNING(Shader_GLSL, "textureGrad does not support ColorArrayCube. Stubbing"); ctx.AddF32("{}=0.0f;", inst); return; } @@ -329,7 +331,7 @@ void EmitImageGather(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, const auto sparse_inst{PrepareSparse(inst)}; const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; if (sparse_inst && !supports_sparse) { - // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); + LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING"); ctx.AddU1("{}=true;", *sparse_inst); } if (!sparse_inst || !supports_sparse) { @@ -376,7 +378,7 @@ void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, const IR::Value& inde const auto sparse_inst{PrepareSparse(inst)}; const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; if (sparse_inst && !supports_sparse) { - // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); + LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING"); ctx.AddU1("{}=true;", *sparse_inst); } if (!sparse_inst || !supports_sparse) { @@ -426,7 +428,7 @@ void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)}; const bool supports_sparse{ctx.profile.support_gl_sparse_textures}; if (sparse_inst && !supports_sparse) { - // LOG_WARNING(..., "Device does not support sparse texture queries. STUBBING"); + LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING"); ctx.AddU1("{}=true;", *sparse_inst); } if (!sparse_inst || !supports_sparse) { -- cgit v1.2.3