aboutsummaryrefslogtreecommitdiff
path: root/src/video_core/renderer_opengl
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core/renderer_opengl')
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.cpp3
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.h2
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp310
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h60
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer_cache.cpp432
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer_cache.h14
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp1
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.h3
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.cpp1487
-rw-r--r--src/video_core/renderer_opengl/gl_shader_gen.cpp14
-rw-r--r--src/video_core/renderer_opengl/gl_shader_gen.h2
-rw-r--r--src/video_core/renderer_opengl/gl_shader_manager.cpp10
-rw-r--r--src/video_core/renderer_opengl/gl_shader_manager.h29
-rw-r--r--src/video_core/renderer_opengl/gl_state.cpp247
-rw-r--r--src/video_core/renderer_opengl/gl_state.h61
-rw-r--r--src/video_core/renderer_opengl/maxwell_to_gl.h11
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.cpp10
17 files changed, 1363 insertions, 1333 deletions
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
index 075192c3f..46a6c0308 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
@@ -76,7 +76,7 @@ std::tuple<u8*, GLintptr> OGLBufferCache::ReserveMemory(std::size_t size, std::s
return std::make_tuple(uploaded_ptr, uploaded_offset);
}
-void OGLBufferCache::Map(std::size_t max_size) {
+bool OGLBufferCache::Map(std::size_t max_size) {
bool invalidate;
std::tie(buffer_ptr, buffer_offset_base, invalidate) =
stream_buffer.Map(static_cast<GLsizeiptr>(max_size), 4);
@@ -85,6 +85,7 @@ void OGLBufferCache::Map(std::size_t max_size) {
if (invalidate) {
InvalidateAll();
}
+ return invalidate;
}
void OGLBufferCache::Unmap() {
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h
index 91fca3f6c..c11acfb79 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.h
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.h
@@ -50,7 +50,7 @@ public:
/// Reserves memory to be used by host's CPU. Returns mapped address and offset.
std::tuple<u8*, GLintptr> ReserveMemory(std::size_t size, std::size_t alignment = 4);
- void Map(std::size_t max_size);
+ bool Map(std::size_t max_size);
void Unmap();
GLuint GetHandle() const;
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 54cc47a9b..9e93bd609 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -88,27 +88,7 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& window, ScreenInfo
state.texture_units[i].sampler = texture_samplers[i].sampler.handle;
}
- GLint ext_num;
- glGetIntegerv(GL_NUM_EXTENSIONS, &ext_num);
- for (GLint i = 0; i < ext_num; i++) {
- const std::string_view extension{
- reinterpret_cast<const char*>(glGetStringi(GL_EXTENSIONS, i))};
-
- if (extension == "GL_ARB_direct_state_access") {
- has_ARB_direct_state_access = true;
- } else if (extension == "GL_ARB_multi_bind") {
- has_ARB_multi_bind = true;
- } else if (extension == "GL_ARB_separate_shader_objects") {
- has_ARB_separate_shader_objects = true;
- } else if (extension == "GL_ARB_vertex_attrib_binding") {
- has_ARB_vertex_attrib_binding = true;
- }
- }
-
- ASSERT_MSG(has_ARB_separate_shader_objects, "has_ARB_separate_shader_objects is unsupported");
OpenGLState::ApplyDefaultState();
- // Clipping plane 0 is always enabled for PICA fixed clip plane z <= 0
- state.clip_distance[0] = true;
// Create render framebuffer
framebuffer.Create();
@@ -120,10 +100,24 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& window, ScreenInfo
glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, &uniform_buffer_alignment);
LOG_CRITICAL(Render_OpenGL, "Sync fixed function OpenGL state here!");
+ CheckExtensions();
}
RasterizerOpenGL::~RasterizerOpenGL() {}
+void RasterizerOpenGL::CheckExtensions() {
+ if (!GLAD_GL_ARB_texture_filter_anisotropic && !GLAD_GL_EXT_texture_filter_anisotropic) {
+ LOG_WARNING(
+ Render_OpenGL,
+ "Anisotropic filter is not supported! This can cause graphical issues in some games.");
+ }
+ if (!GLAD_GL_ARB_buffer_storage) {
+ LOG_WARNING(
+ Render_OpenGL,
+ "Buffer storage control is not supported! This can cause performance degradation.");
+ }
+}
+
void RasterizerOpenGL::SetupVertexFormat() {
auto& gpu = Core::System::GetInstance().GPU().Maxwell3D();
const auto& regs = gpu.regs;
@@ -183,15 +177,25 @@ void RasterizerOpenGL::SetupVertexFormat() {
}
state.draw.vertex_array = VAO.handle;
state.ApplyVertexBufferState();
+
+ // Rebinding the VAO invalidates the vertex buffer bindings.
+ gpu.dirty_flags.vertex_array = 0xFFFFFFFF;
}
void RasterizerOpenGL::SetupVertexBuffer() {
- MICROPROFILE_SCOPE(OpenGL_VB);
- const auto& gpu = Core::System::GetInstance().GPU().Maxwell3D();
+ auto& gpu = Core::System::GetInstance().GPU().Maxwell3D();
const auto& regs = gpu.regs;
+ if (!gpu.dirty_flags.vertex_array)
+ return;
+
+ MICROPROFILE_SCOPE(OpenGL_VB);
+
// Upload all guest vertex arrays sequentially to our buffer
for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) {
+ if (~gpu.dirty_flags.vertex_array & (1u << index))
+ continue;
+
const auto& vertex_array = regs.vertex_array[index];
if (!vertex_array.IsEnabled())
continue;
@@ -218,6 +222,8 @@ void RasterizerOpenGL::SetupVertexBuffer() {
// Implicit set by glBindVertexBuffer. Stupid glstate handling...
state.draw.vertex_buffer = buffer_cache.GetHandle();
+
+ gpu.dirty_flags.vertex_array = 0;
}
DrawParameters RasterizerOpenGL::SetupDraw() {
@@ -276,6 +282,7 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) {
// shaders. The constbuffer bindpoint starts after the shader stage configuration bind points.
u32 current_constbuffer_bindpoint = Tegra::Engines::Maxwell3D::Regs::MaxShaderStage;
u32 current_texture_bindpoint = 0;
+ std::array<bool, Maxwell::NumClipDistances> clip_distances{};
for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
const auto& shader_config = gpu.regs.shader_config[index];
@@ -336,12 +343,22 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) {
current_texture_bindpoint = SetupTextures(static_cast<Maxwell::ShaderStage>(stage), shader,
primitive_mode, current_texture_bindpoint);
+ // Workaround for Intel drivers.
+ // When a clip distance is enabled but not set in the shader it crops parts of the screen
+ // (sometimes it's half the screen, sometimes three quarters). To avoid this, enable the
+ // clip distances only when it's written by a shader stage.
+ for (std::size_t i = 0; i < Maxwell::NumClipDistances; ++i) {
+ clip_distances[i] |= shader->GetShaderEntries().clip_distances[i];
+ }
+
// When VertexA is enabled, we have dual vertex shaders
if (program == Maxwell::ShaderProgram::VertexA) {
// VertexB was combined with VertexA, so we skip the VertexB iteration
index++;
}
}
+
+ SyncClipEnabled(clip_distances);
}
std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const {
@@ -424,7 +441,7 @@ void RasterizerOpenGL::ConfigureFramebuffers(OpenGLState& current_state, bool us
// TODO(bunnei): Figure out how the below register works. According to envytools, this should be
// used to enable multiple render targets. However, it is left unset on all games that I have
// tested.
- ASSERT_MSG(regs.rt_separate_frag_data == 0, "Unimplemented");
+ UNIMPLEMENTED_IF(regs.rt_separate_frag_data != 0);
// Bind the framebuffer surfaces
current_state.draw.draw_framebuffer = framebuffer.handle;
@@ -544,6 +561,30 @@ void RasterizerOpenGL::Clear() {
ASSERT_MSG(regs.zeta_enable != 0, "Tried to clear stencil but buffer is not enabled!");
use_stencil = true;
clear_state.stencil.test_enabled = true;
+ if (regs.clear_flags.stencil) {
+ // Stencil affects the clear so fill it with the used masks
+ clear_state.stencil.front.test_func = GL_ALWAYS;
+ clear_state.stencil.front.test_mask = regs.stencil_front_func_mask;
+ clear_state.stencil.front.action_stencil_fail = GL_KEEP;
+ clear_state.stencil.front.action_depth_fail = GL_KEEP;
+ clear_state.stencil.front.action_depth_pass = GL_KEEP;
+ clear_state.stencil.front.write_mask = regs.stencil_front_mask;
+ if (regs.stencil_two_side_enable) {
+ clear_state.stencil.back.test_func = GL_ALWAYS;
+ clear_state.stencil.back.test_mask = regs.stencil_back_func_mask;
+ clear_state.stencil.back.action_stencil_fail = GL_KEEP;
+ clear_state.stencil.back.action_depth_fail = GL_KEEP;
+ clear_state.stencil.back.action_depth_pass = GL_KEEP;
+ clear_state.stencil.back.write_mask = regs.stencil_back_mask;
+ } else {
+ clear_state.stencil.back.test_func = GL_ALWAYS;
+ clear_state.stencil.back.test_mask = 0xFFFFFFFF;
+ clear_state.stencil.back.write_mask = 0xFFFFFFFF;
+ clear_state.stencil.back.action_stencil_fail = GL_KEEP;
+ clear_state.stencil.back.action_depth_fail = GL_KEEP;
+ clear_state.stencil.back.action_depth_pass = GL_KEEP;
+ }
+ }
}
if (!use_color && !use_depth && !use_stencil) {
@@ -555,6 +596,14 @@ void RasterizerOpenGL::Clear() {
ConfigureFramebuffers(clear_state, use_color, use_depth || use_stencil, false,
regs.clear_buffers.RT.Value());
+ if (regs.clear_flags.scissor) {
+ SyncScissorTest(clear_state);
+ }
+
+ if (regs.clear_flags.viewport) {
+ clear_state.EmulateViewportWithScissor();
+ }
+
clear_state.Apply();
if (use_color) {
@@ -575,25 +624,27 @@ void RasterizerOpenGL::DrawArrays() {
return;
MICROPROFILE_SCOPE(OpenGL_Drawing);
- const auto& gpu = Core::System::GetInstance().GPU().Maxwell3D();
+ auto& gpu = Core::System::GetInstance().GPU().Maxwell3D();
const auto& regs = gpu.regs;
ScopeAcquireGLContext acquire_context{emu_window};
ConfigureFramebuffers(state);
SyncColorMask();
+ SyncFragmentColorClampState();
+ SyncMultiSampleState();
SyncDepthTestState();
SyncStencilTestState();
SyncBlendState();
SyncLogicOpState();
SyncCullMode();
SyncPrimitiveRestart();
- SyncScissorTest();
+ SyncScissorTest(state);
// Alpha Testing is synced on shaders.
SyncTransformFeedback();
SyncPointState();
CheckAlphaTests();
-
+ SyncPolygonOffset();
// TODO(bunnei): Sync framebuffer_scale uniform here
// TODO(bunnei): Sync scissorbox uniform(s) here
@@ -626,7 +677,11 @@ void RasterizerOpenGL::DrawArrays() {
// Add space for at least 18 constant buffers
buffer_size += Maxwell::MaxConstBuffers * (MaxConstbufferSize + uniform_buffer_alignment);
- buffer_cache.Map(buffer_size);
+ bool invalidate = buffer_cache.Map(buffer_size);
+ if (invalidate) {
+ // As all cached buffers are invalidated, we need to recheck their state.
+ gpu.dirty_flags.vertex_array = 0xFFFFFFFF;
+ }
SetupVertexFormat();
SetupVertexBuffer();
@@ -642,7 +697,7 @@ void RasterizerOpenGL::DrawArrays() {
params.DispatchDraw();
// Disable scissor test
- state.scissor.enabled = false;
+ state.viewports[0].scissor.enabled = false;
accelerate_draw = AccelDraw::Disabled;
@@ -733,9 +788,8 @@ void RasterizerOpenGL::SamplerInfo::Create() {
glSamplerParameteri(sampler.handle, GL_TEXTURE_COMPARE_FUNC, GL_NEVER);
}
-void RasterizerOpenGL::SamplerInfo::SyncWithConfig(const Tegra::Texture::FullTextureInfo& info) {
+void RasterizerOpenGL::SamplerInfo::SyncWithConfig(const Tegra::Texture::TSCEntry& config) {
const GLuint s = sampler.handle;
- const Tegra::Texture::TSCEntry& config = info.tsc;
if (mag_filter != config.mag_filter) {
mag_filter = config.mag_filter;
glSamplerParameteri(
@@ -777,30 +831,50 @@ void RasterizerOpenGL::SamplerInfo::SyncWithConfig(const Tegra::Texture::FullTex
MaxwellToGL::DepthCompareFunc(depth_compare_func));
}
- if (wrap_u == Tegra::Texture::WrapMode::Border || wrap_v == Tegra::Texture::WrapMode::Border ||
- wrap_p == Tegra::Texture::WrapMode::Border) {
- const GLvec4 new_border_color = {{config.border_color_r, config.border_color_g,
- config.border_color_b, config.border_color_a}};
- if (border_color != new_border_color) {
- border_color = new_border_color;
- glSamplerParameterfv(s, GL_TEXTURE_BORDER_COLOR, border_color.data());
- }
+ GLvec4 new_border_color;
+ if (config.srgb_conversion) {
+ new_border_color[0] = config.srgb_border_color_r / 255.0f;
+ new_border_color[1] = config.srgb_border_color_g / 255.0f;
+ new_border_color[2] = config.srgb_border_color_g / 255.0f;
+ } else {
+ new_border_color[0] = config.border_color_r;
+ new_border_color[1] = config.border_color_g;
+ new_border_color[2] = config.border_color_b;
}
- if (info.tic.use_header_opt_control == 0) {
+ new_border_color[3] = config.border_color_a;
+
+ if (border_color != new_border_color) {
+ border_color = new_border_color;
+ glSamplerParameterfv(s, GL_TEXTURE_BORDER_COLOR, border_color.data());
+ }
+
+ const float anisotropic_max = static_cast<float>(1 << config.max_anisotropy.Value());
+ if (anisotropic_max != max_anisotropic) {
+ max_anisotropic = anisotropic_max;
if (GLAD_GL_ARB_texture_filter_anisotropic) {
- glSamplerParameterf(s, GL_TEXTURE_MAX_ANISOTROPY,
- static_cast<float>(1 << info.tic.max_anisotropy.Value()));
+ glSamplerParameterf(s, GL_TEXTURE_MAX_ANISOTROPY, max_anisotropic);
} else if (GLAD_GL_EXT_texture_filter_anisotropic) {
- glSamplerParameterf(s, GL_TEXTURE_MAX_ANISOTROPY_EXT,
- static_cast<float>(1 << info.tic.max_anisotropy.Value()));
+ glSamplerParameterf(s, GL_TEXTURE_MAX_ANISOTROPY_EXT, max_anisotropic);
}
- glSamplerParameterf(s, GL_TEXTURE_MIN_LOD,
- static_cast<float>(info.tic.res_min_mip_level.Value()));
- glSamplerParameterf(s, GL_TEXTURE_MAX_LOD,
- static_cast<float>(info.tic.res_max_mip_level.Value() == 0
- ? 16
- : info.tic.res_max_mip_level.Value()));
- glSamplerParameterf(s, GL_TEXTURE_LOD_BIAS, info.tic.mip_lod_bias.Value() / 256.f);
+ }
+ const float lod_min = static_cast<float>(config.min_lod_clamp.Value()) / 256.0f;
+ if (lod_min != min_lod) {
+ min_lod = lod_min;
+ glSamplerParameterf(s, GL_TEXTURE_MIN_LOD, min_lod);
+ }
+
+ const float lod_max = static_cast<float>(config.max_lod_clamp.Value()) / 256.0f;
+ if (lod_max != max_lod) {
+ max_lod = lod_max;
+ glSamplerParameterf(s, GL_TEXTURE_MAX_LOD, max_lod);
+ }
+ const u32 bias = config.mip_lod_bias.Value();
+ // Sign extend the 13-bit value.
+ constexpr u32 mask = 1U << (13 - 1);
+ const float bias_lod = static_cast<s32>((bias ^ mask) - mask) / 256.f;
+ if (lod_bias != bias_lod) {
+ lod_bias = bias_lod;
+ glSamplerParameterf(s, GL_TEXTURE_LOD_BIAS, lod_bias);
}
}
@@ -899,7 +973,7 @@ u32 RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, Shader& shader,
continue;
}
- texture_samplers[current_bindpoint].SyncWithConfig(texture);
+ texture_samplers[current_bindpoint].SyncWithConfig(texture.tsc);
Surface surface = res_cache.GetTextureSurface(texture, entry);
if (surface != nullptr) {
state.texture_units[current_bindpoint].texture = surface->Texture().handle;
@@ -923,24 +997,42 @@ u32 RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, Shader& shader,
void RasterizerOpenGL::SyncViewport(OpenGLState& current_state) {
const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
- for (size_t i = 0; i < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets; i++) {
- const MathUtil::Rectangle<s32> viewport_rect{regs.viewport_transform[i].GetRect()};
+ const bool geometry_shaders_enabled =
+ regs.IsShaderConfigEnabled(static_cast<size_t>(Maxwell::ShaderProgram::Geometry));
+ const std::size_t viewport_count =
+ geometry_shaders_enabled ? Tegra::Engines::Maxwell3D::Regs::NumViewports : 1;
+ for (std::size_t i = 0; i < viewport_count; i++) {
auto& viewport = current_state.viewports[i];
+ const auto& src = regs.viewports[i];
+ const MathUtil::Rectangle<s32> viewport_rect{regs.viewport_transform[i].GetRect()};
viewport.x = viewport_rect.left;
viewport.y = viewport_rect.bottom;
- viewport.width = static_cast<GLfloat>(viewport_rect.GetWidth());
- viewport.height = static_cast<GLfloat>(viewport_rect.GetHeight());
- viewport.depth_range_far = regs.viewport[i].depth_range_far;
- viewport.depth_range_near = regs.viewport[i].depth_range_near;
+ viewport.width = viewport_rect.GetWidth();
+ viewport.height = viewport_rect.GetHeight();
+ viewport.depth_range_far = regs.viewports[i].depth_range_far;
+ viewport.depth_range_near = regs.viewports[i].depth_range_near;
}
+ state.depth_clamp.far_plane = regs.view_volume_clip_control.depth_clamp_far != 0;
+ state.depth_clamp.near_plane = regs.view_volume_clip_control.depth_clamp_near != 0;
}
-void RasterizerOpenGL::SyncClipEnabled() {
- UNREACHABLE();
+void RasterizerOpenGL::SyncClipEnabled(
+ const std::array<bool, Maxwell::Regs::NumClipDistances>& clip_mask) {
+
+ const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ const std::array<bool, Maxwell::Regs::NumClipDistances> reg_state{
+ regs.clip_distance_enabled.c0 != 0, regs.clip_distance_enabled.c1 != 0,
+ regs.clip_distance_enabled.c2 != 0, regs.clip_distance_enabled.c3 != 0,
+ regs.clip_distance_enabled.c4 != 0, regs.clip_distance_enabled.c5 != 0,
+ regs.clip_distance_enabled.c6 != 0, regs.clip_distance_enabled.c7 != 0};
+
+ for (std::size_t i = 0; i < Maxwell::Regs::NumClipDistances; ++i) {
+ state.clip_distance[i] = reg_state[i] && clip_mask[i];
+ }
}
void RasterizerOpenGL::SyncClipCoef() {
- UNREACHABLE();
+ UNIMPLEMENTED();
}
void RasterizerOpenGL::SyncCullMode() {
@@ -1022,7 +1114,9 @@ void RasterizerOpenGL::SyncStencilTestState() {
void RasterizerOpenGL::SyncColorMask() {
const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
- for (size_t i = 0; i < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets; i++) {
+ const std::size_t count =
+ regs.independent_blend_enable ? Tegra::Engines::Maxwell3D::Regs::NumRenderTargets : 1;
+ for (std::size_t i = 0; i < count; i++) {
const auto& source = regs.color_mask[regs.color_mask_common ? 0 : i];
auto& dest = state.color_mask[i];
dest.red_enabled = (source.R == 0) ? GL_FALSE : GL_TRUE;
@@ -1032,6 +1126,17 @@ void RasterizerOpenGL::SyncColorMask() {
}
}
+void RasterizerOpenGL::SyncMultiSampleState() {
+ const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ state.multisample_control.alpha_to_coverage = regs.multisample_control.alpha_to_coverage != 0;
+ state.multisample_control.alpha_to_one = regs.multisample_control.alpha_to_one != 0;
+}
+
+void RasterizerOpenGL::SyncFragmentColorClampState() {
+ const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ state.fragment_color_clamp.enabled = regs.frag_color_clamp != 0;
+}
+
void RasterizerOpenGL::SyncBlendState() {
const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
@@ -1043,43 +1148,40 @@ void RasterizerOpenGL::SyncBlendState() {
state.independant_blend.enabled = regs.independent_blend_enable;
if (!state.independant_blend.enabled) {
auto& blend = state.blend[0];
- blend.enabled = regs.blend.enable[0] != 0;
- blend.separate_alpha = regs.blend.separate_alpha;
- blend.rgb_equation = MaxwellToGL::BlendEquation(regs.blend.equation_rgb);
- blend.src_rgb_func = MaxwellToGL::BlendFunc(regs.blend.factor_source_rgb);
- blend.dst_rgb_func = MaxwellToGL::BlendFunc(regs.blend.factor_dest_rgb);
- if (blend.separate_alpha) {
- blend.a_equation = MaxwellToGL::BlendEquation(regs.blend.equation_a);
- blend.src_a_func = MaxwellToGL::BlendFunc(regs.blend.factor_source_a);
- blend.dst_a_func = MaxwellToGL::BlendFunc(regs.blend.factor_dest_a);
+ const auto& src = regs.blend;
+ blend.enabled = src.enable[0] != 0;
+ if (blend.enabled) {
+ blend.rgb_equation = MaxwellToGL::BlendEquation(src.equation_rgb);
+ blend.src_rgb_func = MaxwellToGL::BlendFunc(src.factor_source_rgb);
+ blend.dst_rgb_func = MaxwellToGL::BlendFunc(src.factor_dest_rgb);
+ blend.a_equation = MaxwellToGL::BlendEquation(src.equation_a);
+ blend.src_a_func = MaxwellToGL::BlendFunc(src.factor_source_a);
+ blend.dst_a_func = MaxwellToGL::BlendFunc(src.factor_dest_a);
}
- for (size_t i = 1; i < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets; i++) {
+ for (std::size_t i = 1; i < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets; i++) {
state.blend[i].enabled = false;
}
return;
}
- for (size_t i = 0; i < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets; i++) {
+ for (std::size_t i = 0; i < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets; i++) {
auto& blend = state.blend[i];
+ const auto& src = regs.independent_blend[i];
blend.enabled = regs.blend.enable[i] != 0;
if (!blend.enabled)
continue;
- blend.separate_alpha = regs.independent_blend[i].separate_alpha;
- blend.rgb_equation = MaxwellToGL::BlendEquation(regs.independent_blend[i].equation_rgb);
- blend.src_rgb_func = MaxwellToGL::BlendFunc(regs.independent_blend[i].factor_source_rgb);
- blend.dst_rgb_func = MaxwellToGL::BlendFunc(regs.independent_blend[i].factor_dest_rgb);
- if (blend.separate_alpha) {
- blend.a_equation = MaxwellToGL::BlendEquation(regs.independent_blend[i].equation_a);
- blend.src_a_func = MaxwellToGL::BlendFunc(regs.independent_blend[i].factor_source_a);
- blend.dst_a_func = MaxwellToGL::BlendFunc(regs.independent_blend[i].factor_dest_a);
- }
+ blend.rgb_equation = MaxwellToGL::BlendEquation(src.equation_rgb);
+ blend.src_rgb_func = MaxwellToGL::BlendFunc(src.factor_source_rgb);
+ blend.dst_rgb_func = MaxwellToGL::BlendFunc(src.factor_dest_rgb);
+ blend.a_equation = MaxwellToGL::BlendEquation(src.equation_a);
+ blend.src_a_func = MaxwellToGL::BlendFunc(src.factor_source_a);
+ blend.dst_a_func = MaxwellToGL::BlendFunc(src.factor_dest_a);
}
}
void RasterizerOpenGL::SyncLogicOpState() {
const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
- // TODO(Subv): Support more than just render target 0.
state.logic_op.enabled = regs.logic_op.enable != 0;
if (!state.logic_op.enabled)
@@ -1091,20 +1193,26 @@ void RasterizerOpenGL::SyncLogicOpState() {
state.logic_op.operation = MaxwellToGL::LogicOp(regs.logic_op.operation);
}
-void RasterizerOpenGL::SyncScissorTest() {
- // TODO: what is the correct behavior here, a single scissor for all targets
- // or scissor disabled for the rest of the targets?
+void RasterizerOpenGL::SyncScissorTest(OpenGLState& current_state) {
const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
- state.scissor.enabled = (regs.scissor_test.enable != 0);
- if (regs.scissor_test.enable == 0) {
- return;
+ const bool geometry_shaders_enabled =
+ regs.IsShaderConfigEnabled(static_cast<size_t>(Maxwell::ShaderProgram::Geometry));
+ const std::size_t viewport_count =
+ geometry_shaders_enabled ? Tegra::Engines::Maxwell3D::Regs::NumViewports : 1;
+ for (std::size_t i = 0; i < viewport_count; i++) {
+ const auto& src = regs.scissor_test[i];
+ auto& dst = current_state.viewports[i].scissor;
+ dst.enabled = (src.enable != 0);
+ if (dst.enabled == 0) {
+ return;
+ }
+ const u32 width = src.max_x - src.min_x;
+ const u32 height = src.max_y - src.min_y;
+ dst.x = src.min_x;
+ dst.y = src.min_y;
+ dst.width = width;
+ dst.height = height;
}
- const u32 width = regs.scissor_test.max_x - regs.scissor_test.min_x;
- const u32 height = regs.scissor_test.max_y - regs.scissor_test.min_y;
- state.scissor.x = regs.scissor_test.min_x;
- state.scissor.y = regs.scissor_test.min_y;
- state.scissor.width = width;
- state.scissor.height = height;
}
void RasterizerOpenGL::SyncTransformFeedback() {
@@ -1118,11 +1226,17 @@ void RasterizerOpenGL::SyncTransformFeedback() {
void RasterizerOpenGL::SyncPointState() {
const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ state.point.size = regs.point_size;
+}
- // TODO(Rodrigo): Most games do not set a point size. I think this is a case of a
- // register carrying a default value. For now, if the point size is zero, assume it's
- // OpenGL's default (1).
- state.point.size = regs.point_size == 0 ? 1 : regs.point_size;
+void RasterizerOpenGL::SyncPolygonOffset() {
+ const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ state.polygon_offset.fill_enable = regs.polygon_offset_fill_enable != 0;
+ state.polygon_offset.line_enable = regs.polygon_offset_line_enable != 0;
+ state.polygon_offset.point_enable = regs.polygon_offset_point_enable != 0;
+ state.polygon_offset.units = regs.polygon_offset_units;
+ state.polygon_offset.factor = regs.polygon_offset_factor;
+ state.polygon_offset.clamp = regs.polygon_offset_clamp;
}
void RasterizerOpenGL::CheckAlphaTests() {
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index 8ef0f6c12..988fa3e27 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -60,20 +60,6 @@ public:
bool AccelerateDrawBatch(bool is_indexed) override;
void UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta) override;
- /// OpenGL shader generated for a given Maxwell register state
- struct MaxwellShader {
- /// OpenGL shader resource
- OGLProgram shader;
- };
-
- struct VertexShader {
- OGLShader shader;
- };
-
- struct FragmentShader {
- OGLShader shader;
- };
-
/// Maximum supported size that a constbuffer can have in bytes.
static constexpr std::size_t MaxConstbufferSize = 0x10000;
static_assert(MaxConstbufferSize % sizeof(GLvec4) == 0,
@@ -88,18 +74,23 @@ private:
/// SamplerInfo struct.
void Create();
/// Syncs the sampler object with the config, updating any necessary state.
- void SyncWithConfig(const Tegra::Texture::FullTextureInfo& info);
+ void SyncWithConfig(const Tegra::Texture::TSCEntry& info);
private:
- Tegra::Texture::TextureFilter mag_filter;
- Tegra::Texture::TextureFilter min_filter;
- Tegra::Texture::TextureMipmapFilter mip_filter;
- Tegra::Texture::WrapMode wrap_u;
- Tegra::Texture::WrapMode wrap_v;
- Tegra::Texture::WrapMode wrap_p;
- bool uses_depth_compare;
- Tegra::Texture::DepthCompareFunc depth_compare_func;
- GLvec4 border_color;
+ Tegra::Texture::TextureFilter mag_filter = Tegra::Texture::TextureFilter::Nearest;
+ Tegra::Texture::TextureFilter min_filter = Tegra::Texture::TextureFilter::Nearest;
+ Tegra::Texture::TextureMipmapFilter mip_filter = Tegra::Texture::TextureMipmapFilter::None;
+ Tegra::Texture::WrapMode wrap_u = Tegra::Texture::WrapMode::ClampToEdge;
+ Tegra::Texture::WrapMode wrap_v = Tegra::Texture::WrapMode::ClampToEdge;
+ Tegra::Texture::WrapMode wrap_p = Tegra::Texture::WrapMode::ClampToEdge;
+ bool uses_depth_compare = false;
+ Tegra::Texture::DepthCompareFunc depth_compare_func =
+ Tegra::Texture::DepthCompareFunc::Always;
+ GLvec4 border_color = {};
+ float min_lod = 0.0f;
+ float max_lod = 16.0f;
+ float lod_bias = 0.0f;
+ float max_anisotropic = 1.0f;
};
/**
@@ -137,7 +128,8 @@ private:
void SyncViewport(OpenGLState& current_state);
/// Syncs the clip enabled status to match the guest state
- void SyncClipEnabled();
+ void SyncClipEnabled(
+ const std::array<bool, Tegra::Engines::Maxwell3D::Regs::NumClipDistances>& clip_mask);
/// Syncs the clip coefficients to match the guest state
void SyncClipCoef();
@@ -160,8 +152,14 @@ private:
/// Syncs the LogicOp state to match the guest state
void SyncLogicOpState();
+ /// Syncs the the color clamp state
+ void SyncFragmentColorClampState();
+
+ /// Syncs the alpha coverage and alpha to one
+ void SyncMultiSampleState();
+
/// Syncs the scissor test state to match the guest state
- void SyncScissorTest();
+ void SyncScissorTest(OpenGLState& current_state);
/// Syncs the transform feedback state to match the guest state
void SyncTransformFeedback();
@@ -172,13 +170,15 @@ private:
/// Syncs Color Mask
void SyncColorMask();
+ /// Syncs the polygon offsets
+ void SyncPolygonOffset();
+
/// Check asserts for alpha testing.
void CheckAlphaTests();
- bool has_ARB_direct_state_access = false;
- bool has_ARB_multi_bind = false;
- bool has_ARB_separate_shader_objects = false;
- bool has_ARB_vertex_attrib_binding = false;
+ /// Check for extension that are not strictly required
+ /// but are needed for correct emulation
+ void CheckExtensions();
OpenGLState state;
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
index 894f4f294..5f4cdd119 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
@@ -15,6 +15,7 @@
#include "core/memory.h"
#include "core/settings.h"
#include "video_core/engines/maxwell_3d.h"
+#include "video_core/morton.h"
#include "video_core/renderer_opengl/gl_rasterizer.h"
#include "video_core/renderer_opengl/gl_rasterizer_cache.h"
#include "video_core/renderer_opengl/gl_state.h"
@@ -22,10 +23,11 @@
#include "video_core/surface.h"
#include "video_core/textures/astc.h"
#include "video_core/textures/decoders.h"
-#include "video_core/utils.h"
namespace OpenGL {
+using VideoCore::MortonSwizzle;
+using VideoCore::MortonSwizzleMode;
using VideoCore::Surface::ComponentTypeFromDepthFormat;
using VideoCore::Surface::ComponentTypeFromRenderTarget;
using VideoCore::Surface::ComponentTypeFromTexture;
@@ -95,6 +97,7 @@ std::size_t SurfaceParams::InnerMemorySize(bool force_gl, bool layer_only,
params.block_width = params.is_tiled ? config.tic.BlockWidth() : 0,
params.block_height = params.is_tiled ? config.tic.BlockHeight() : 0,
params.block_depth = params.is_tiled ? config.tic.BlockDepth() : 0,
+ params.tile_width_spacing = params.is_tiled ? (1 << config.tic.tile_width_spacing.Value()) : 1;
params.srgb_conversion = config.tic.IsSrgbConversionEnabled();
params.pixel_format = PixelFormatFromTextureFormat(config.tic.format, config.tic.r_type.Value(),
params.srgb_conversion);
@@ -160,6 +163,7 @@ std::size_t SurfaceParams::InnerMemorySize(bool force_gl, bool layer_only,
params.block_width = 1 << config.memory_layout.block_width;
params.block_height = 1 << config.memory_layout.block_height;
params.block_depth = 1 << config.memory_layout.block_depth;
+ params.tile_width_spacing = 1;
params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
params.srgb_conversion = config.format == Tegra::RenderTargetFormat::BGRA8_SRGB ||
config.format == Tegra::RenderTargetFormat::RGBA8_SRGB;
@@ -195,6 +199,7 @@ std::size_t SurfaceParams::InnerMemorySize(bool force_gl, bool layer_only,
params.block_width = 1 << std::min(block_width, 5U);
params.block_height = 1 << std::min(block_height, 5U);
params.block_depth = 1 << std::min(block_depth, 5U);
+ params.tile_width_spacing = 1;
params.pixel_format = PixelFormatFromDepthFormat(format);
params.component_type = ComponentTypeFromDepthFormat(format);
params.type = GetFormatType(params.pixel_format);
@@ -221,6 +226,7 @@ std::size_t SurfaceParams::InnerMemorySize(bool force_gl, bool layer_only,
params.block_width = params.is_tiled ? std::min(config.BlockWidth(), 32U) : 0,
params.block_height = params.is_tiled ? std::min(config.BlockHeight(), 32U) : 0,
params.block_depth = params.is_tiled ? std::min(config.BlockDepth(), 32U) : 0,
+ params.tile_width_spacing = 1;
params.pixel_format = PixelFormatFromRenderTargetFormat(config.format);
params.srgb_conversion = config.format == Tegra::RenderTargetFormat::BGRA8_SRGB ||
config.format == Tegra::RenderTargetFormat::RGBA8_SRGB;
@@ -265,11 +271,11 @@ static constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex
{GL_COMPRESSED_RG_RGTC2, GL_RG, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
true}, // DXN2UNORM
{GL_COMPRESSED_SIGNED_RG_RGTC2, GL_RG, GL_INT, ComponentType::SNorm, true}, // DXN2SNORM
- {GL_COMPRESSED_RGBA_BPTC_UNORM_ARB, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ {GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
true}, // BC7U
- {GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB, GL_RGB, GL_UNSIGNED_INT_8_8_8_8,
- ComponentType::Float, true}, // BC6H_UF16
- {GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, ComponentType::Float,
+ {GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, ComponentType::Float,
+ true}, // BC6H_UF16
+ {GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, ComponentType::Float,
true}, // BC6H_SF16
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_4X4
{GL_RG8, GL_RG, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // G8R8U
@@ -306,14 +312,16 @@ static constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex
true}, // DXT23_SRGB
{GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
true}, // DXT45_SRGB
- {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_ARB, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8,
- ComponentType::UNorm, true}, // BC7U_SRGB
+ {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, ComponentType::UNorm,
+ true}, // BC7U_SRGB
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_4X4_SRGB
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X8_SRGB
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X5_SRGB
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X4_SRGB
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X5
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_5X5_SRGB
+ {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_10X8
+ {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_10X8_SRGB
// Depth formats
{GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT, ComponentType::Float, false}, // Z32F
@@ -344,7 +352,7 @@ static GLenum SurfaceTargetToGL(SurfaceTarget target) {
case SurfaceTarget::TextureCubemap:
return GL_TEXTURE_CUBE_MAP;
case SurfaceTarget::TextureCubeArray:
- return GL_TEXTURE_CUBE_MAP_ARRAY_ARB;
+ return GL_TEXTURE_CUBE_MAP_ARRAY;
}
LOG_CRITICAL(Render_OpenGL, "Unimplemented texture target={}", static_cast<u32>(target));
UNREACHABLE();
@@ -368,173 +376,7 @@ MathUtil::Rectangle<u32> SurfaceParams::GetRect(u32 mip_level) const {
return {0, actual_height, MipWidth(mip_level), 0};
}
-template <bool morton_to_gl, PixelFormat format>
-void MortonCopy(u32 stride, u32 block_height, u32 height, u32 block_depth, u32 depth, u8* gl_buffer,
- std::size_t gl_buffer_size, VAddr addr) {
- constexpr u32 bytes_per_pixel = GetBytesPerPixel(format);
-
- // With the BCn formats (DXT and DXN), each 4x4 tile is swizzled instead of just individual
- // pixel values.
- const u32 tile_size_x{GetDefaultBlockWidth(format)};
- const u32 tile_size_y{GetDefaultBlockHeight(format)};
-
- if (morton_to_gl) {
- const std::vector<u8> data =
- Tegra::Texture::UnswizzleTexture(addr, tile_size_x, tile_size_y, bytes_per_pixel,
- stride, height, depth, block_height, block_depth);
- const std::size_t size_to_copy{std::min(gl_buffer_size, data.size())};
- memcpy(gl_buffer, data.data(), size_to_copy);
- } else {
- Tegra::Texture::CopySwizzledData((stride + tile_size_x - 1) / tile_size_x,
- (height + tile_size_y - 1) / tile_size_y, depth,
- bytes_per_pixel, bytes_per_pixel, Memory::GetPointer(addr),
- gl_buffer, false, block_height, block_depth);
- }
-}
-
-using GLConversionArray = std::array<void (*)(u32, u32, u32, u32, u32, u8*, std::size_t, VAddr),
- VideoCore::Surface::MaxPixelFormat>;
-
-static constexpr GLConversionArray morton_to_gl_fns = {
- // clang-format off
- MortonCopy<true, PixelFormat::ABGR8U>,
- MortonCopy<true, PixelFormat::ABGR8S>,
- MortonCopy<true, PixelFormat::ABGR8UI>,
- MortonCopy<true, PixelFormat::B5G6R5U>,
- MortonCopy<true, PixelFormat::A2B10G10R10U>,
- MortonCopy<true, PixelFormat::A1B5G5R5U>,
- MortonCopy<true, PixelFormat::R8U>,
- MortonCopy<true, PixelFormat::R8UI>,
- MortonCopy<true, PixelFormat::RGBA16F>,
- MortonCopy<true, PixelFormat::RGBA16U>,
- MortonCopy<true, PixelFormat::RGBA16UI>,
- MortonCopy<true, PixelFormat::R11FG11FB10F>,
- MortonCopy<true, PixelFormat::RGBA32UI>,
- MortonCopy<true, PixelFormat::DXT1>,
- MortonCopy<true, PixelFormat::DXT23>,
- MortonCopy<true, PixelFormat::DXT45>,
- MortonCopy<true, PixelFormat::DXN1>,
- MortonCopy<true, PixelFormat::DXN2UNORM>,
- MortonCopy<true, PixelFormat::DXN2SNORM>,
- MortonCopy<true, PixelFormat::BC7U>,
- MortonCopy<true, PixelFormat::BC6H_UF16>,
- MortonCopy<true, PixelFormat::BC6H_SF16>,
- MortonCopy<true, PixelFormat::ASTC_2D_4X4>,
- MortonCopy<true, PixelFormat::G8R8U>,
- MortonCopy<true, PixelFormat::G8R8S>,
- MortonCopy<true, PixelFormat::BGRA8>,
- MortonCopy<true, PixelFormat::RGBA32F>,
- MortonCopy<true, PixelFormat::RG32F>,
- MortonCopy<true, PixelFormat::R32F>,
- MortonCopy<true, PixelFormat::R16F>,
- MortonCopy<true, PixelFormat::R16U>,
- MortonCopy<true, PixelFormat::R16S>,
- MortonCopy<true, PixelFormat::R16UI>,
- MortonCopy<true, PixelFormat::R16I>,
- MortonCopy<true, PixelFormat::RG16>,
- MortonCopy<true, PixelFormat::RG16F>,
- MortonCopy<true, PixelFormat::RG16UI>,
- MortonCopy<true, PixelFormat::RG16I>,
- MortonCopy<true, PixelFormat::RG16S>,
- MortonCopy<true, PixelFormat::RGB32F>,
- MortonCopy<true, PixelFormat::RGBA8_SRGB>,
- MortonCopy<true, PixelFormat::RG8U>,
- MortonCopy<true, PixelFormat::RG8S>,
- MortonCopy<true, PixelFormat::RG32UI>,
- MortonCopy<true, PixelFormat::R32UI>,
- MortonCopy<true, PixelFormat::ASTC_2D_8X8>,
- MortonCopy<true, PixelFormat::ASTC_2D_8X5>,
- MortonCopy<true, PixelFormat::ASTC_2D_5X4>,
- MortonCopy<true, PixelFormat::BGRA8_SRGB>,
- MortonCopy<true, PixelFormat::DXT1_SRGB>,
- MortonCopy<true, PixelFormat::DXT23_SRGB>,
- MortonCopy<true, PixelFormat::DXT45_SRGB>,
- MortonCopy<true, PixelFormat::BC7U_SRGB>,
- MortonCopy<true, PixelFormat::ASTC_2D_4X4_SRGB>,
- MortonCopy<true, PixelFormat::ASTC_2D_8X8_SRGB>,
- MortonCopy<true, PixelFormat::ASTC_2D_8X5_SRGB>,
- MortonCopy<true, PixelFormat::ASTC_2D_5X4_SRGB>,
- MortonCopy<true, PixelFormat::ASTC_2D_5X5>,
- MortonCopy<true, PixelFormat::ASTC_2D_5X5_SRGB>,
- MortonCopy<true, PixelFormat::Z32F>,
- MortonCopy<true, PixelFormat::Z16>,
- MortonCopy<true, PixelFormat::Z24S8>,
- MortonCopy<true, PixelFormat::S8Z24>,
- MortonCopy<true, PixelFormat::Z32FS8>,
- // clang-format on
-};
-
-static constexpr GLConversionArray gl_to_morton_fns = {
- // clang-format off
- MortonCopy<false, PixelFormat::ABGR8U>,
- MortonCopy<false, PixelFormat::ABGR8S>,
- MortonCopy<false, PixelFormat::ABGR8UI>,
- MortonCopy<false, PixelFormat::B5G6R5U>,
- MortonCopy<false, PixelFormat::A2B10G10R10U>,
- MortonCopy<false, PixelFormat::A1B5G5R5U>,
- MortonCopy<false, PixelFormat::R8U>,
- MortonCopy<false, PixelFormat::R8UI>,
- MortonCopy<false, PixelFormat::RGBA16F>,
- MortonCopy<false, PixelFormat::RGBA16U>,
- MortonCopy<false, PixelFormat::RGBA16UI>,
- MortonCopy<false, PixelFormat::R11FG11FB10F>,
- MortonCopy<false, PixelFormat::RGBA32UI>,
- MortonCopy<false, PixelFormat::DXT1>,
- MortonCopy<false, PixelFormat::DXT23>,
- MortonCopy<false, PixelFormat::DXT45>,
- MortonCopy<false, PixelFormat::DXN1>,
- MortonCopy<false, PixelFormat::DXN2UNORM>,
- MortonCopy<false, PixelFormat::DXN2SNORM>,
- MortonCopy<false, PixelFormat::BC7U>,
- MortonCopy<false, PixelFormat::BC6H_UF16>,
- MortonCopy<false, PixelFormat::BC6H_SF16>,
- // TODO(Subv): Swizzling ASTC formats are not supported
- nullptr,
- MortonCopy<false, PixelFormat::G8R8U>,
- MortonCopy<false, PixelFormat::G8R8S>,
- MortonCopy<false, PixelFormat::BGRA8>,
- MortonCopy<false, PixelFormat::RGBA32F>,
- MortonCopy<false, PixelFormat::RG32F>,
- MortonCopy<false, PixelFormat::R32F>,
- MortonCopy<false, PixelFormat::R16F>,
- MortonCopy<false, PixelFormat::R16U>,
- MortonCopy<false, PixelFormat::R16S>,
- MortonCopy<false, PixelFormat::R16UI>,
- MortonCopy<false, PixelFormat::R16I>,
- MortonCopy<false, PixelFormat::RG16>,
- MortonCopy<false, PixelFormat::RG16F>,
- MortonCopy<false, PixelFormat::RG16UI>,
- MortonCopy<false, PixelFormat::RG16I>,
- MortonCopy<false, PixelFormat::RG16S>,
- MortonCopy<false, PixelFormat::RGB32F>,
- MortonCopy<false, PixelFormat::RGBA8_SRGB>,
- MortonCopy<false, PixelFormat::RG8U>,
- MortonCopy<false, PixelFormat::RG8S>,
- MortonCopy<false, PixelFormat::RG32UI>,
- MortonCopy<false, PixelFormat::R32UI>,
- nullptr,
- nullptr,
- nullptr,
- MortonCopy<false, PixelFormat::BGRA8_SRGB>,
- MortonCopy<false, PixelFormat::DXT1_SRGB>,
- MortonCopy<false, PixelFormat::DXT23_SRGB>,
- MortonCopy<false, PixelFormat::DXT45_SRGB>,
- MortonCopy<false, PixelFormat::BC7U_SRGB>,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- MortonCopy<false, PixelFormat::Z32F>,
- MortonCopy<false, PixelFormat::Z16>,
- MortonCopy<false, PixelFormat::Z24S8>,
- MortonCopy<false, PixelFormat::S8Z24>,
- MortonCopy<false, PixelFormat::Z32FS8>,
- // clang-format on
-};
-
-void SwizzleFunc(const GLConversionArray& functions, const SurfaceParams& params,
+void SwizzleFunc(const MortonSwizzleMode& mode, const SurfaceParams& params,
std::vector<u8>& gl_buffer, u32 mip_level) {
u32 depth = params.MipDepth(mip_level);
if (params.target == SurfaceTarget::Texture2D) {
@@ -544,157 +386,25 @@ void SwizzleFunc(const GLConversionArray& functions, const SurfaceParams& params
if (params.is_layered) {
u64 offset = params.GetMipmapLevelOffset(mip_level);
u64 offset_gl = 0;
- u64 layer_size = params.LayerMemorySize();
- u64 gl_size = params.LayerSizeGL(mip_level);
+ const u64 layer_size = params.LayerMemorySize();
+ const u64 gl_size = params.LayerSizeGL(mip_level);
for (u32 i = 0; i < params.depth; i++) {
- functions[static_cast<std::size_t>(params.pixel_format)](
- params.MipWidth(mip_level), params.MipBlockHeight(mip_level),
- params.MipHeight(mip_level), params.MipBlockDepth(mip_level), 1,
- gl_buffer.data() + offset_gl, gl_size, params.addr + offset);
+ MortonSwizzle(mode, params.pixel_format, params.MipWidth(mip_level),
+ params.MipBlockHeight(mip_level), params.MipHeight(mip_level),
+ params.MipBlockDepth(mip_level), params.tile_width_spacing, 1,
+ gl_buffer.data() + offset_gl, gl_size, params.addr + offset);
offset += layer_size;
offset_gl += gl_size;
}
} else {
- u64 offset = params.GetMipmapLevelOffset(mip_level);
- functions[static_cast<std::size_t>(params.pixel_format)](
- params.MipWidth(mip_level), params.MipBlockHeight(mip_level),
- params.MipHeight(mip_level), params.MipBlockDepth(mip_level), depth, gl_buffer.data(),
- gl_buffer.size(), params.addr + offset);
+ const u64 offset = params.GetMipmapLevelOffset(mip_level);
+ MortonSwizzle(mode, params.pixel_format, params.MipWidth(mip_level),
+ params.MipBlockHeight(mip_level), params.MipHeight(mip_level),
+ params.MipBlockDepth(mip_level), depth, params.tile_width_spacing,
+ gl_buffer.data(), gl_buffer.size(), params.addr + offset);
}
}
-MICROPROFILE_DEFINE(OpenGL_BlitSurface, "OpenGL", "BlitSurface", MP_RGB(128, 192, 64));
-static bool BlitSurface(const Surface& src_surface, const Surface& dst_surface,
- GLuint read_fb_handle, GLuint draw_fb_handle, GLenum src_attachment = 0,
- GLenum dst_attachment = 0, std::size_t cubemap_face = 0) {
- MICROPROFILE_SCOPE(OpenGL_BlitSurface);
-
- const auto& src_params{src_surface->GetSurfaceParams()};
- const auto& dst_params{dst_surface->GetSurfaceParams()};
-
- OpenGLState prev_state{OpenGLState::GetCurState()};
- SCOPE_EXIT({ prev_state.Apply(); });
-
- OpenGLState state;
- state.draw.read_framebuffer = read_fb_handle;
- state.draw.draw_framebuffer = draw_fb_handle;
- // Set sRGB enabled if the destination surfaces need it
- state.framebuffer_srgb.enabled = dst_params.srgb_conversion;
- state.ApplyFramebufferState();
-
- u32 buffers{};
-
- if (src_params.type == SurfaceType::ColorTexture) {
- switch (src_params.target) {
- case SurfaceTarget::Texture2D:
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- GL_TEXTURE_2D, src_surface->Texture().handle, 0);
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
- 0, 0);
- break;
- case SurfaceTarget::TextureCubemap:
- glFramebufferTexture2D(
- GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- static_cast<GLenum>(GL_TEXTURE_CUBE_MAP_POSITIVE_X + cubemap_face),
- src_surface->Texture().handle, 0);
- glFramebufferTexture2D(
- GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
- static_cast<GLenum>(GL_TEXTURE_CUBE_MAP_POSITIVE_X + cubemap_face), 0, 0);
- break;
- case SurfaceTarget::Texture2DArray:
- glFramebufferTextureLayer(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- src_surface->Texture().handle, 0, 0);
- glFramebufferTextureLayer(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, 0, 0, 0);
- break;
- case SurfaceTarget::Texture3D:
- glFramebufferTexture3D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- SurfaceTargetToGL(src_params.target),
- src_surface->Texture().handle, 0, 0);
- glFramebufferTexture3D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
- SurfaceTargetToGL(src_params.target), 0, 0, 0);
- break;
- default:
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- GL_TEXTURE_2D, src_surface->Texture().handle, 0);
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
- 0, 0);
- break;
- }
-
- switch (dst_params.target) {
- case SurfaceTarget::Texture2D:
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- GL_TEXTURE_2D, dst_surface->Texture().handle, 0);
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
- 0, 0);
- break;
- case SurfaceTarget::TextureCubemap:
- glFramebufferTexture2D(
- GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- static_cast<GLenum>(GL_TEXTURE_CUBE_MAP_POSITIVE_X + cubemap_face),
- dst_surface->Texture().handle, 0);
- glFramebufferTexture2D(
- GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
- static_cast<GLenum>(GL_TEXTURE_CUBE_MAP_POSITIVE_X + cubemap_face), 0, 0);
- break;
- case SurfaceTarget::Texture2DArray:
- glFramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- dst_surface->Texture().handle, 0, 0);
- glFramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, 0, 0, 0);
- break;
-
- case SurfaceTarget::Texture3D:
- glFramebufferTexture3D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- SurfaceTargetToGL(dst_params.target),
- dst_surface->Texture().handle, 0, 0);
- glFramebufferTexture3D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
- SurfaceTargetToGL(dst_params.target), 0, 0, 0);
- break;
- default:
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- GL_TEXTURE_2D, dst_surface->Texture().handle, 0);
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
- 0, 0);
- break;
- }
-
- buffers = GL_COLOR_BUFFER_BIT;
- } else if (src_params.type == SurfaceType::Depth) {
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- GL_TEXTURE_2D, 0, 0);
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D,
- src_surface->Texture().handle, 0);
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
-
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- GL_TEXTURE_2D, 0, 0);
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D,
- dst_surface->Texture().handle, 0);
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
-
- buffers = GL_DEPTH_BUFFER_BIT;
- } else if (src_params.type == SurfaceType::DepthStencil) {
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + src_attachment,
- GL_TEXTURE_2D, 0, 0);
- glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
- src_surface->Texture().handle, 0);
-
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + dst_attachment,
- GL_TEXTURE_2D, 0, 0);
- glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
- dst_surface->Texture().handle, 0);
-
- buffers = GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
- }
-
- const auto& rect{src_params.GetRect()};
- glBlitFramebuffer(rect.left, rect.bottom, rect.right, rect.top, rect.left, rect.bottom,
- rect.right, rect.top, buffers,
- buffers == GL_COLOR_BUFFER_BIT ? GL_LINEAR : GL_NEAREST);
-
- return true;
-}
-
static void FastCopySurface(const Surface& src_surface, const Surface& dst_surface) {
const auto& src_params{src_surface->GetSurfaceParams()};
const auto& dst_params{dst_surface->GetSurfaceParams()};
@@ -709,21 +419,21 @@ static void FastCopySurface(const Surface& src_surface, const Surface& dst_surfa
MICROPROFILE_DEFINE(OpenGL_CopySurface, "OpenGL", "CopySurface", MP_RGB(128, 192, 64));
static void CopySurface(const Surface& src_surface, const Surface& dst_surface,
- GLuint copy_pbo_handle, GLenum src_attachment = 0,
- GLenum dst_attachment = 0, std::size_t cubemap_face = 0) {
+ const GLuint copy_pbo_handle, const GLenum src_attachment = 0,
+ const GLenum dst_attachment = 0, const std::size_t cubemap_face = 0) {
MICROPROFILE_SCOPE(OpenGL_CopySurface);
ASSERT_MSG(dst_attachment == 0, "Unimplemented");
const auto& src_params{src_surface->GetSurfaceParams()};
const auto& dst_params{dst_surface->GetSurfaceParams()};
- auto source_format = GetFormatTuple(src_params.pixel_format, src_params.component_type);
- auto dest_format = GetFormatTuple(dst_params.pixel_format, dst_params.component_type);
+ const auto source_format = GetFormatTuple(src_params.pixel_format, src_params.component_type);
+ const auto dest_format = GetFormatTuple(dst_params.pixel_format, dst_params.component_type);
- std::size_t buffer_size = std::max(src_params.size_in_bytes, dst_params.size_in_bytes);
+ const std::size_t buffer_size = std::max(src_params.size_in_bytes, dst_params.size_in_bytes);
glBindBuffer(GL_PIXEL_PACK_BUFFER, copy_pbo_handle);
- glBufferData(GL_PIXEL_PACK_BUFFER, buffer_size, nullptr, GL_STREAM_DRAW_ARB);
+ glBufferData(GL_PIXEL_PACK_BUFFER, buffer_size, nullptr, GL_STREAM_DRAW);
if (source_format.compressed) {
glGetCompressedTextureImage(src_surface->Texture().handle, src_attachment,
static_cast<GLsizei>(src_params.size_in_bytes), nullptr);
@@ -744,13 +454,10 @@ static void CopySurface(const Surface& src_surface, const Surface& dst_surface,
LOG_DEBUG(HW_GPU, "Trying to upload extra texture data from the CPU during "
"reinterpretation but the texture is tiled.");
}
- std::size_t remaining_size = dst_params.size_in_bytes - src_params.size_in_bytes;
- std::vector<u8> data(remaining_size);
- std::memcpy(data.data(), Memory::GetPointer(dst_params.addr + src_params.size_in_bytes),
- data.size());
+ const std::size_t remaining_size = dst_params.size_in_bytes - src_params.size_in_bytes;
glBufferSubData(GL_PIXEL_PACK_BUFFER, src_params.size_in_bytes, remaining_size,
- data.data());
+ Memory::GetPointer(dst_params.addr + src_params.size_in_bytes));
}
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
@@ -932,7 +639,9 @@ static void ConvertFormatAsNeeded_LoadGLBuffer(std::vector<u8>& data, PixelForma
case PixelFormat::ASTC_2D_8X8_SRGB:
case PixelFormat::ASTC_2D_8X5_SRGB:
case PixelFormat::ASTC_2D_5X4_SRGB:
- case PixelFormat::ASTC_2D_5X5_SRGB: {
+ case PixelFormat::ASTC_2D_5X5_SRGB:
+ case PixelFormat::ASTC_2D_10X8:
+ case PixelFormat::ASTC_2D_10X8_SRGB: {
// Convert ASTC pixel formats to RGBA8, as most desktop GPUs do not support ASTC.
u32 block_width{};
u32 block_height{};
@@ -967,7 +676,11 @@ static void ConvertFormatAsNeeded_FlushGLBuffer(std::vector<u8>& data, PixelForm
case PixelFormat::ASTC_2D_4X4:
case PixelFormat::ASTC_2D_8X8:
case PixelFormat::ASTC_2D_4X4_SRGB:
- case PixelFormat::ASTC_2D_8X8_SRGB: {
+ case PixelFormat::ASTC_2D_8X8_SRGB:
+ case PixelFormat::ASTC_2D_5X5:
+ case PixelFormat::ASTC_2D_5X5_SRGB:
+ case PixelFormat::ASTC_2D_10X8:
+ case PixelFormat::ASTC_2D_10X8_SRGB: {
LOG_CRITICAL(HW_GPU, "Conversion of format {} after texture flushing is not implemented",
static_cast<u32>(pixel_format));
UNREACHABLE();
@@ -990,15 +703,16 @@ void CachedSurface::LoadGLBuffer() {
ASSERT_MSG(params.block_width == 1, "Block width is defined as {} on texture type {}",
params.block_width, static_cast<u32>(params.target));
for (u32 i = 0; i < params.max_mip_level; i++)
- SwizzleFunc(morton_to_gl_fns, params, gl_buffer[i], i);
+ SwizzleFunc(MortonSwizzleMode::MortonToLinear, params, gl_buffer[i], i);
} else {
const auto texture_src_data{Memory::GetPointer(params.addr)};
const auto texture_src_data_end{texture_src_data + params.size_in_bytes_gl};
gl_buffer[0].assign(texture_src_data, texture_src_data_end);
}
- for (u32 i = 0; i < params.max_mip_level; i++)
+ for (u32 i = 0; i < params.max_mip_level; i++) {
ConvertFormatAsNeeded_LoadGLBuffer(gl_buffer[i], params.pixel_format, params.MipWidth(i),
params.MipHeight(i), params.MipDepth(i));
+ }
}
MICROPROFILE_DEFINE(OpenGL_SurfaceFlush, "OpenGL", "Surface Flush", MP_RGB(128, 192, 64));
@@ -1029,7 +743,7 @@ void CachedSurface::FlushGLBuffer() {
ASSERT_MSG(params.block_width == 1, "Block width is defined as {} on texture type {}",
params.block_width, static_cast<u32>(params.target));
- SwizzleFunc(gl_to_morton_fns, params, gl_buffer[0], 0);
+ SwizzleFunc(MortonSwizzleMode::LinearToMorton, params, gl_buffer[0], 0);
} else {
std::memcpy(Memory::GetPointer(GetAddr()), gl_buffer[0].data(), GetSizeInBytes());
}
@@ -1269,6 +983,31 @@ Surface RasterizerCacheOpenGL::GetUncachedSurface(const SurfaceParams& params) {
return surface;
}
+void RasterizerCacheOpenGL::FastLayeredCopySurface(const Surface& src_surface,
+ const Surface& dst_surface) {
+ const auto& init_params{src_surface->GetSurfaceParams()};
+ const auto& dst_params{dst_surface->GetSurfaceParams()};
+ VAddr address = init_params.addr;
+ const std::size_t layer_size = dst_params.LayerMemorySize();
+ for (u32 layer = 0; layer < dst_params.depth; layer++) {
+ for (u32 mipmap = 0; mipmap < dst_params.max_mip_level; mipmap++) {
+ const VAddr sub_address = address + dst_params.GetMipmapLevelOffset(mipmap);
+ const Surface& copy = TryGet(sub_address);
+ if (!copy)
+ continue;
+ const auto& src_params{copy->GetSurfaceParams()};
+ const u32 width{std::min(src_params.width, dst_params.MipWidth(mipmap))};
+ const u32 height{std::min(src_params.height, dst_params.MipHeight(mipmap))};
+
+ glCopyImageSubData(copy->Texture().handle, SurfaceTargetToGL(src_params.target), 0, 0,
+ 0, 0, dst_surface->Texture().handle,
+ SurfaceTargetToGL(dst_params.target), mipmap, 0, 0, layer, width,
+ height, 1);
+ }
+ address += layer_size;
+ }
+}
+
void RasterizerCacheOpenGL::FermiCopySurface(
const Tegra::Engines::Fermi2D::Regs::Surface& src_config,
const Tegra::Engines::Fermi2D::Regs::Surface& dst_config) {
@@ -1293,7 +1032,10 @@ void RasterizerCacheOpenGL::AccurateCopySurface(const Surface& src_surface,
const Surface& dst_surface) {
const auto& src_params{src_surface->GetSurfaceParams()};
const auto& dst_params{dst_surface->GetSurfaceParams()};
- FlushRegion(src_params.addr, dst_params.MemorySize());
+
+ // Flush enough memory for both the source and destination surface
+ FlushRegion(src_params.addr, std::max(src_params.MemorySize(), dst_params.MemorySize()));
+
LoadSurface(dst_surface);
}
@@ -1319,26 +1061,18 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& old_surface,
return new_surface;
}
- // If the format is the same, just do a framebuffer blit. This is significantly faster than
- // using PBOs. The is also likely less accurate, as textures will be converted rather than
- // reinterpreted. When use_accurate_gpu_emulation setting is enabled, perform a more accurate
- // surface copy, where pixels are reinterpreted as a new format (without conversion). This
- // code path uses OpenGL PBOs and is quite slow.
- const bool is_blit{old_params.pixel_format == new_params.pixel_format};
-
switch (new_params.target) {
case SurfaceTarget::Texture2D:
- if (is_blit) {
- BlitSurface(old_surface, new_surface, read_framebuffer.handle, draw_framebuffer.handle);
- } else {
- CopySurface(old_surface, new_surface, copy_pbo.handle);
- }
+ CopySurface(old_surface, new_surface, copy_pbo.handle);
break;
- case SurfaceTarget::TextureCubemap:
case SurfaceTarget::Texture3D:
- case SurfaceTarget::TextureCubeArray:
AccurateCopySurface(old_surface, new_surface);
break;
+ case SurfaceTarget::TextureCubemap:
+ case SurfaceTarget::Texture2DArray:
+ case SurfaceTarget::TextureCubeArray:
+ FastLayeredCopySurface(old_surface, new_surface);
+ break;
default:
LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}",
static_cast<u32>(new_params.target));
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h
index 494f6b903..c710aa245 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h
@@ -196,9 +196,15 @@ struct SurfaceParams {
/// Checks if surfaces are compatible for caching
bool IsCompatibleSurface(const SurfaceParams& other) const {
- return std::tie(pixel_format, type, width, height, target, depth) ==
- std::tie(other.pixel_format, other.type, other.width, other.height, other.target,
- other.depth);
+ if (std::tie(pixel_format, type, width, height, target, depth, is_tiled) ==
+ std::tie(other.pixel_format, other.type, other.width, other.height, other.target,
+ other.depth, other.is_tiled)) {
+ if (!is_tiled)
+ return true;
+ return std::tie(block_height, block_depth, tile_width_spacing) ==
+ std::tie(other.block_height, other.block_depth, other.tile_width_spacing);
+ }
+ return false;
}
/// Initializes parameters for caching, should be called after everything has been initialized
@@ -208,6 +214,7 @@ struct SurfaceParams {
u32 block_width;
u32 block_height;
u32 block_depth;
+ u32 tile_width_spacing;
PixelFormat pixel_format;
ComponentType component_type;
SurfaceType type;
@@ -350,6 +357,7 @@ private:
/// Performs a slow but accurate surface copy, flushing to RAM and reinterpreting the data
void AccurateCopySurface(const Surface& src_surface, const Surface& dst_surface);
+ void FastLayeredCopySurface(const Surface& src_surface, const Surface& dst_surface);
/// The surface reserve is a "backup" cache, this is where we put unique surfaces that have
/// previously been used. This is to prevent surfaces from being constantly created and
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index a85a7c0c5..038b25c75 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -84,6 +84,7 @@ CachedShader::CachedShader(VAddr addr, Maxwell::ShaderProgram program_type)
}
entries = program_result.second;
+ shader_length = entries.shader_length;
if (program_type != Maxwell::ShaderProgram::Geometry) {
OGLShader shader;
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h
index ffbf21831..08f470de3 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_cache.h
@@ -30,7 +30,7 @@ public:
}
std::size_t GetSizeInBytes() const override {
- return GLShader::MAX_PROGRAM_CODE_LENGTH * sizeof(u64);
+ return shader_length;
}
// We do not have to flush this cache as things in it are never modified by us.
@@ -82,6 +82,7 @@ private:
u32 max_vertices, const std::string& debug_name);
VAddr addr;
+ std::size_t shader_length;
Maxwell::ShaderProgram program_type;
GLShader::ShaderSetup setup;
GLShader::ShaderEntries entries;
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
index 5fde22ad4..8d68156bf 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
@@ -34,6 +34,17 @@ constexpr u32 PROGRAM_HEADER_SIZE = sizeof(Tegra::Shader::Header);
constexpr u32 MAX_GEOMETRY_BUFFERS = 6;
constexpr u32 MAX_ATTRIBUTES = 0x100; // Size in vec4s, this value is untested
+static const char* INTERNAL_FLAG_NAMES[] = {"zero_flag", "sign_flag", "carry_flag",
+ "overflow_flag"};
+
+enum class InternalFlag : u64 {
+ ZeroFlag = 0,
+ SignFlag = 1,
+ CarryFlag = 2,
+ OverflowFlag = 3,
+ Amount
+};
+
class DecompileFail : public std::runtime_error {
public:
using std::runtime_error::runtime_error;
@@ -49,8 +60,7 @@ static std::string GetTopologyName(Tegra::Shader::OutputTopology topology) {
case Tegra::Shader::OutputTopology::TriangleStrip:
return "triangle_strip";
default:
- LOG_CRITICAL(Render_OpenGL, "Unknown output topology {}", static_cast<u32>(topology));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unknown output topology: {}", static_cast<u32>(topology));
return "points";
}
}
@@ -85,7 +95,8 @@ struct Subroutine {
class ControlFlowAnalyzer {
public:
ControlFlowAnalyzer(const ProgramCode& program_code, u32 main_offset, const std::string& suffix)
- : program_code(program_code) {
+ : program_code(program_code), shader_coverage_begin(main_offset),
+ shader_coverage_end(main_offset + 1) {
// Recursively finds all subroutines.
const Subroutine& program_main = AddSubroutine(main_offset, PROGRAM_END, suffix);
@@ -97,10 +108,16 @@ public:
return std::move(subroutines);
}
+ std::size_t GetShaderLength() const {
+ return shader_coverage_end * sizeof(u64);
+ }
+
private:
const ProgramCode& program_code;
std::set<Subroutine> subroutines;
std::map<std::pair<u32, u32>, ExitMethod> exit_method_map;
+ u32 shader_coverage_begin;
+ u32 shader_coverage_end;
/// Adds and analyzes a new subroutine if it is not added yet.
const Subroutine& AddSubroutine(u32 begin, u32 end, const std::string& suffix) {
@@ -142,6 +159,9 @@ private:
return exit_method;
for (u32 offset = begin; offset != end && offset != PROGRAM_END; ++offset) {
+ shader_coverage_begin = std::min(shader_coverage_begin, offset);
+ shader_coverage_end = std::max(shader_coverage_end, offset + 1);
+
const Instruction instr = {program_code[offset]};
if (const auto opcode = OpCode::Decode(instr)) {
switch (opcode->get().GetId()) {
@@ -167,8 +187,8 @@ private:
case OpCode::Id::SSY:
case OpCode::Id::PBK: {
// The SSY and PBK use a similar encoding as the BRA instruction.
- ASSERT_MSG(instr.bra.constant_buffer == 0,
- "Constant buffer branching is not supported");
+ UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0,
+ "Constant buffer branching is not supported");
const u32 target = offset + instr.bra.GetBranchTarget();
labels.insert(target);
// Continue scanning for an exit method.
@@ -181,14 +201,53 @@ private:
}
};
+template <typename T>
+class ShaderScopedScope {
+public:
+ explicit ShaderScopedScope(T& writer, std::string_view begin_expr, std::string end_expr)
+ : writer(writer), end_expr(std::move(end_expr)) {
+
+ if (begin_expr.empty()) {
+ writer.AddLine('{');
+ } else {
+ writer.AddExpression(begin_expr);
+ writer.AddLine(" {");
+ }
+ ++writer.scope;
+ }
+
+ ShaderScopedScope(const ShaderScopedScope&) = delete;
+
+ ~ShaderScopedScope() {
+ --writer.scope;
+ if (end_expr.empty()) {
+ writer.AddLine('}');
+ } else {
+ writer.AddExpression("} ");
+ writer.AddExpression(end_expr);
+ writer.AddLine(';');
+ }
+ }
+
+ ShaderScopedScope& operator=(const ShaderScopedScope&) = delete;
+
+private:
+ T& writer;
+ std::string end_expr;
+};
+
class ShaderWriter {
public:
- void AddLine(std::string_view text) {
+ void AddExpression(std::string_view text) {
DEBUG_ASSERT(scope >= 0);
if (!text.empty()) {
AppendIndentation();
}
shader_source += text;
+ }
+
+ void AddLine(std::string_view text) {
+ AddExpression(text);
AddNewLine();
}
@@ -208,6 +267,11 @@ public:
return std::move(shader_source);
}
+ ShaderScopedScope<ShaderWriter> Scope(std::string_view begin_expr = {},
+ std::string end_expr = {}) {
+ return ShaderScopedScope(*this, begin_expr, end_expr);
+ }
+
int scope = 0;
private:
@@ -258,14 +322,6 @@ private:
const std::string& suffix;
};
-enum class InternalFlag : u64 {
- ZeroFlag = 0,
- CarryFlag = 1,
- OverflowFlag = 2,
- NaNFlag = 3,
- Amount
-};
-
/**
* Used to manage shader registers that are emulated with GLSL. This class keeps track of the state
* of all registers (e.g. whether they are currently being used as Floats or Integers), and
@@ -299,8 +355,7 @@ public:
// Default - do nothing
return value;
default:
- LOG_CRITICAL(HW_GPU, "Unimplemented conversion size {}", static_cast<u32>(size));
- UNREACHABLE();
+ UNREACHABLE_MSG("Unimplemented conversion size: {}", static_cast<u32>(size));
}
}
@@ -363,7 +418,7 @@ public:
u64 value_num_components, bool is_saturated = false,
u64 dest_elem = 0, Register::Size size = Register::Size::Word,
bool sets_cc = false) {
- ASSERT_MSG(!is_saturated, "Unimplemented");
+ UNIMPLEMENTED_IF(is_saturated);
const std::string func{is_signed ? "intBitsToFloat" : "uintBitsToFloat"};
@@ -373,7 +428,7 @@ public:
if (sets_cc) {
const std::string zero_condition = "( " + ConvertIntegerSize(value, size) + " == 0 )";
SetInternalFlag(InternalFlag::ZeroFlag, zero_condition);
- LOG_WARNING(HW_GPU, "Control Codes Imcomplete.");
+ LOG_WARNING(HW_GPU, "Condition codes implementation is incomplete.");
}
}
@@ -392,7 +447,7 @@ public:
Tegra::Shader::HalfMerge merge, u64 dest_num_components,
u64 value_num_components, bool is_saturated = false,
u64 dest_elem = 0) {
- ASSERT_MSG(!is_saturated, "Unimplemented");
+ UNIMPLEMENTED_IF(is_saturated);
const std::string result = [&]() {
switch (merge) {
@@ -456,24 +511,25 @@ public:
shader.AddLine("lmem[" + index + "] = " + func + '(' + value + ");");
}
- std::string GetControlCode(const Tegra::Shader::ControlCode cc) const {
+ std::string GetConditionCode(const Tegra::Shader::ConditionCode cc) const {
switch (cc) {
- case Tegra::Shader::ControlCode::NEU:
+ case Tegra::Shader::ConditionCode::NEU:
return "!(" + GetInternalFlag(InternalFlag::ZeroFlag) + ')';
default:
- LOG_CRITICAL(HW_GPU, "Unimplemented Control Code {}", static_cast<u32>(cc));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unimplemented condition code: {}", static_cast<u32>(cc));
return "false";
}
}
- std::string GetInternalFlag(const InternalFlag ii) const {
- const u32 code = static_cast<u32>(ii);
- return "internalFlag_" + std::to_string(code) + suffix;
+ std::string GetInternalFlag(const InternalFlag flag) const {
+ const auto index = static_cast<u32>(flag);
+ ASSERT(index < static_cast<u32>(InternalFlag::Amount));
+
+ return std::string(INTERNAL_FLAG_NAMES[index]) + '_' + suffix;
}
- void SetInternalFlag(const InternalFlag ii, const std::string& value) const {
- shader.AddLine(GetInternalFlag(ii) + " = " + value + ';');
+ void SetInternalFlag(const InternalFlag flag, const std::string& value) const {
+ shader.AddLine(GetInternalFlag(flag) + " = " + value + ';');
}
/**
@@ -488,27 +544,43 @@ public:
const Register& buf_reg) {
const std::string dest = GetOutputAttribute(attribute);
const std::string src = GetRegisterAsFloat(val_reg);
+ if (dest.empty())
+ return;
- if (!dest.empty()) {
- // Can happen with unknown/unimplemented output attributes, in which case we ignore the
- // instruction for now.
- if (stage == Maxwell3D::Regs::ShaderStage::Geometry) {
- // TODO(Rodrigo): nouveau sets some attributes after setting emitting a geometry
- // shader. These instructions use a dirty register as buffer index, to avoid some
- // drivers from complaining about out of boundary writes, guard them.
- const std::string buf_index{"((" + GetRegisterAsInteger(buf_reg) + ") % " +
- std::to_string(MAX_GEOMETRY_BUFFERS) + ')'};
- shader.AddLine("amem[" + buf_index + "][" +
- std::to_string(static_cast<u32>(attribute)) + ']' +
- GetSwizzle(elem) + " = " + src + ';');
- } else {
- if (attribute == Attribute::Index::PointSize) {
- fixed_pipeline_output_attributes_used.insert(attribute);
- shader.AddLine(dest + " = " + src + ';');
- } else {
- shader.AddLine(dest + GetSwizzle(elem) + " = " + src + ';');
- }
- }
+ // Can happen with unknown/unimplemented output attributes, in which case we ignore the
+ // instruction for now.
+ if (stage == Maxwell3D::Regs::ShaderStage::Geometry) {
+ // TODO(Rodrigo): nouveau sets some attributes after setting emitting a geometry
+ // shader. These instructions use a dirty register as buffer index, to avoid some
+ // drivers from complaining about out of boundary writes, guard them.
+ const std::string buf_index{"((" + GetRegisterAsInteger(buf_reg) + ") % " +
+ std::to_string(MAX_GEOMETRY_BUFFERS) + ')'};
+ shader.AddLine("amem[" + buf_index + "][" +
+ std::to_string(static_cast<u32>(attribute)) + ']' + GetSwizzle(elem) +
+ " = " + src + ';');
+ return;
+ }
+
+ switch (attribute) {
+ case Attribute::Index::ClipDistances0123:
+ case Attribute::Index::ClipDistances4567: {
+ const u64 index = (attribute == Attribute::Index::ClipDistances4567 ? 4 : 0) + elem;
+ UNIMPLEMENTED_IF_MSG(
+ ((header.vtg.clip_distances >> index) & 1) == 0,
+ "Shader is setting gl_ClipDistance{} without enabling it in the header", index);
+
+ clip_distances[index] = true;
+ fixed_pipeline_output_attributes_used.insert(attribute);
+ shader.AddLine(dest + '[' + std::to_string(index) + "] = " + src + ';');
+ break;
+ }
+ case Attribute::Index::PointSize:
+ fixed_pipeline_output_attributes_used.insert(attribute);
+ shader.AddLine(dest + " = " + src + ';');
+ break;
+ default:
+ shader.AddLine(dest + GetSwizzle(elem) + " = " + src + ';');
+ break;
}
}
@@ -575,6 +647,11 @@ public:
return used_samplers;
}
+ /// Returns an array of the used clip distances.
+ const std::array<bool, Maxwell::NumClipDistances>& GetClipDistances() const {
+ return clip_distances;
+ }
+
/// Returns the GLSL sampler used for the input shader sampler, and creates a new one if
/// necessary.
std::string AccessSampler(const Sampler& sampler, Tegra::Shader::TextureType type,
@@ -624,8 +701,8 @@ private:
/// Generates declarations for internal flags.
void GenerateInternalFlags() {
- for (u32 ii = 0; ii < static_cast<u64>(InternalFlag::Amount); ii++) {
- const InternalFlag code = static_cast<InternalFlag>(ii);
+ for (u32 flag = 0; flag < static_cast<u32>(InternalFlag::Amount); flag++) {
+ const InternalFlag code = static_cast<InternalFlag>(flag);
declarations.AddLine("bool " + GetInternalFlag(code) + " = false;");
}
declarations.AddNewLine();
@@ -728,12 +805,19 @@ private:
void GenerateVertex() {
if (stage != Maxwell3D::Regs::ShaderStage::Vertex)
return;
+ bool clip_distances_declared = false;
+
declarations.AddLine("out gl_PerVertex {");
++declarations.scope;
declarations.AddLine("vec4 gl_Position;");
for (auto& o : fixed_pipeline_output_attributes_used) {
if (o == Attribute::Index::PointSize)
declarations.AddLine("float gl_PointSize;");
+ if (!clip_distances_declared && (o == Attribute::Index::ClipDistances0123 ||
+ o == Attribute::Index::ClipDistances4567)) {
+ declarations.AddLine("float gl_ClipDistance[];");
+ clip_distances_declared = true;
+ }
}
--declarations.scope;
declarations.AddLine("};");
@@ -761,8 +845,7 @@ private:
u64 dest_num_components, u64 value_num_components, u64 dest_elem,
bool precise) {
if (reg == Register::ZeroIndex) {
- LOG_CRITICAL(HW_GPU, "Cannot set Register::ZeroIndex");
- UNREACHABLE();
+ // Setting RZ is a nop in hardware.
return;
}
@@ -777,14 +860,12 @@ private:
}
if (precise && stage != Maxwell3D::Regs::ShaderStage::Fragment) {
- shader.AddLine('{');
- ++shader.scope;
+ const auto scope = shader.Scope();
+
// This avoids optimizations of constant propagation and keeps the code as the original
// Sadly using the precise keyword causes "linking" errors on fragment shaders.
shader.AddLine("precise float tmp = " + src + ';');
shader.AddLine(dest + " = tmp;");
- --shader.scope;
- shader.AddLine('}');
} else {
shader.AddLine(dest + " = " + src + ';');
}
@@ -834,7 +915,8 @@ private:
// vertex shader, and what's the value of the fourth element when inside a Tess Eval
// shader.
ASSERT(stage == Maxwell3D::Regs::ShaderStage::Vertex);
- return "vec4(0, 0, uintBitsToFloat(instance_id.x), uintBitsToFloat(gl_VertexID))";
+ // Config pack's first value is instance_id.
+ return "vec4(0, 0, uintBitsToFloat(config_pack[0]), uintBitsToFloat(gl_VertexID))";
case Attribute::Index::FrontFacing:
// TODO(Subv): Find out what the values are for the other elements.
ASSERT(stage == Maxwell3D::Regs::ShaderStage::Fragment);
@@ -847,16 +929,13 @@ private:
if (declr_input_attribute.count(attribute) == 0) {
declr_input_attribute[attribute] = input_mode;
} else {
- if (declr_input_attribute[attribute] != input_mode) {
- LOG_CRITICAL(HW_GPU, "Same Input multiple input modes");
- UNREACHABLE();
- }
+ UNIMPLEMENTED_IF_MSG(declr_input_attribute[attribute] != input_mode,
+ "Multiple input modes for the same attribute");
}
return GeometryPass("input_attribute_" + std::to_string(index));
}
- LOG_CRITICAL(HW_GPU, "Unhandled input attribute: {}", static_cast<u32>(attribute));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled input attribute: {}", static_cast<u32>(attribute));
}
return "vec4(0, 0, 0, 0)";
@@ -882,24 +961,20 @@ private:
break;
}
default: {
- LOG_CRITICAL(HW_GPU, "Unhandled Ipa InterpMode: {}", static_cast<u32>(interp_mode));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled IPA interp mode: {}", static_cast<u32>(interp_mode));
}
}
switch (sample_mode) {
- case Tegra::Shader::IpaSampleMode::Centroid: {
- // Note not implemented, it can be implemented with the "centroid " keyword in glsl;
- LOG_CRITICAL(HW_GPU, "Ipa Sampler Mode: centroid, not implemented");
- UNREACHABLE();
+ case Tegra::Shader::IpaSampleMode::Centroid:
+ // It can be implemented with the "centroid " keyword in glsl
+ UNIMPLEMENTED_MSG("Unimplemented IPA sampler mode centroid");
break;
- }
- case Tegra::Shader::IpaSampleMode::Default: {
+ case Tegra::Shader::IpaSampleMode::Default:
// Default, n/a
break;
- }
default: {
- LOG_CRITICAL(HW_GPU, "Unhandled Ipa SampleMode: {}", static_cast<u32>(sample_mode));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unimplemented IPA sampler mode: {}", static_cast<u32>(sample_mode));
+ break;
}
}
return out;
@@ -912,6 +987,10 @@ private:
return "gl_PointSize";
case Attribute::Index::Position:
return "position";
+ case Attribute::Index::ClipDistances0123:
+ case Attribute::Index::ClipDistances4567: {
+ return "gl_ClipDistance";
+ }
default:
const u32 index{static_cast<u32>(attribute) -
static_cast<u32>(Attribute::Index::Attribute_0)};
@@ -920,8 +999,7 @@ private:
return "output_attribute_" + std::to_string(index);
}
- LOG_CRITICAL(HW_GPU, "Unhandled output attribute: {}", index);
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled output attribute={}", index);
return {};
}
}
@@ -945,15 +1023,17 @@ private:
const std::string& suffix;
const Tegra::Shader::Header& header;
std::unordered_set<Attribute::Index> fixed_pipeline_output_attributes_used;
+ std::array<bool, Maxwell::NumClipDistances> clip_distances{};
u64 local_memory_size;
};
class GLSLGenerator {
public:
GLSLGenerator(const std::set<Subroutine>& subroutines, const ProgramCode& program_code,
- u32 main_offset, Maxwell3D::Regs::ShaderStage stage, const std::string& suffix)
+ u32 main_offset, Maxwell3D::Regs::ShaderStage stage, const std::string& suffix,
+ std::size_t shader_length)
: subroutines(subroutines), program_code(program_code), main_offset(main_offset),
- stage(stage), suffix(suffix) {
+ stage(stage), suffix(suffix), shader_length(shader_length) {
std::memcpy(&header, program_code.data(), sizeof(Tegra::Shader::Header));
local_memory_size = header.GetLocalMemorySize();
regs.SetLocalMemory(local_memory_size);
@@ -966,7 +1046,8 @@ public:
/// Returns entries in the shader that are useful for external functions
ShaderEntries GetEntries() const {
- return {regs.GetConstBuffersDeclarations(), regs.GetSamplers()};
+ return {regs.GetConstBuffersDeclarations(), regs.GetSamplers(), regs.GetClipDistances(),
+ shader_length};
}
private:
@@ -1071,19 +1152,26 @@ private:
const std::string& op_a, const std::string& op_b) const {
using Tegra::Shader::PredCondition;
static const std::unordered_map<PredCondition, const char*> PredicateComparisonStrings = {
- {PredCondition::LessThan, "<"}, {PredCondition::Equal, "=="},
- {PredCondition::LessEqual, "<="}, {PredCondition::GreaterThan, ">"},
- {PredCondition::NotEqual, "!="}, {PredCondition::GreaterEqual, ">="},
- {PredCondition::LessThanWithNan, "<"}, {PredCondition::NotEqualWithNan, "!="},
- {PredCondition::GreaterThanWithNan, ">"}, {PredCondition::GreaterEqualWithNan, ">="}};
+ {PredCondition::LessThan, "<"},
+ {PredCondition::Equal, "=="},
+ {PredCondition::LessEqual, "<="},
+ {PredCondition::GreaterThan, ">"},
+ {PredCondition::NotEqual, "!="},
+ {PredCondition::GreaterEqual, ">="},
+ {PredCondition::LessThanWithNan, "<"},
+ {PredCondition::NotEqualWithNan, "!="},
+ {PredCondition::LessEqualWithNan, "<="},
+ {PredCondition::GreaterThanWithNan, ">"},
+ {PredCondition::GreaterEqualWithNan, ">="}};
const auto& comparison{PredicateComparisonStrings.find(condition)};
- ASSERT_MSG(comparison != PredicateComparisonStrings.end(),
- "Unknown predicate comparison operation");
+ UNIMPLEMENTED_IF_MSG(comparison == PredicateComparisonStrings.end(),
+ "Unknown predicate comparison operation");
std::string predicate{'(' + op_a + ") " + comparison->second + " (" + op_b + ')'};
if (condition == PredCondition::LessThanWithNan ||
condition == PredCondition::NotEqualWithNan ||
+ condition == PredCondition::LessEqualWithNan ||
condition == PredCondition::GreaterThanWithNan ||
condition == PredCondition::GreaterEqualWithNan) {
predicate += " || isnan(" + op_a + ") || isnan(" + op_b + ')';
@@ -1107,7 +1195,7 @@ private:
};
auto op = PredicateOperationStrings.find(operation);
- ASSERT_MSG(op != PredicateOperationStrings.end(), "Unknown predicate operation");
+ UNIMPLEMENTED_IF_MSG(op == PredicateOperationStrings.end(), "Unknown predicate operation");
return op->second;
}
@@ -1205,8 +1293,7 @@ private:
break;
}
default:
- LOG_CRITICAL(HW_GPU, "Unimplemented logic operation: {}", static_cast<u32>(logic_op));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unimplemented logic operation={}", static_cast<u32>(logic_op));
}
if (dest != Tegra::Shader::Register::ZeroIndex) {
@@ -1224,9 +1311,8 @@ private:
SetPredicate(static_cast<u64>(predicate), '(' + result + ") != 0");
break;
default:
- LOG_CRITICAL(HW_GPU, "Unimplemented predicate result mode: {}",
- static_cast<u32>(predicate_mode));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unimplemented predicate result mode: {}",
+ static_cast<u32>(predicate_mode));
}
}
@@ -1257,14 +1343,7 @@ private:
regs.SetRegisterToInteger(dest, true, 0, result, 1, 1);
}
- void WriteTexsInstruction(const Instruction& instr, const std::string& coord,
- const std::string& texture) {
- // Add an extra scope and declare the texture coords inside to prevent
- // overwriting them in case they are used as outputs of the texs instruction.
- shader.AddLine('{');
- ++shader.scope;
- shader.AddLine(coord);
-
+ void WriteTexsInstruction(const Instruction& instr, const std::string& texture) {
// TEXS has two destination registers and a swizzle. The first two elements in the swizzle
// go into gpr0+0 and gpr0+1, and the rest goes into gpr28+0 and gpr28+1
@@ -1287,26 +1366,19 @@ private:
++written_components;
}
-
- --shader.scope;
- shader.AddLine('}');
}
static u32 TextureCoordinates(Tegra::Shader::TextureType texture_type) {
switch (texture_type) {
- case Tegra::Shader::TextureType::Texture1D: {
+ case Tegra::Shader::TextureType::Texture1D:
return 1;
- }
- case Tegra::Shader::TextureType::Texture2D: {
+ case Tegra::Shader::TextureType::Texture2D:
return 2;
- }
case Tegra::Shader::TextureType::Texture3D:
- case Tegra::Shader::TextureType::TextureCube: {
+ case Tegra::Shader::TextureType::TextureCube:
return 3;
- }
default:
- LOG_CRITICAL(HW_GPU, "Unhandled texture type {}", static_cast<u32>(texture_type));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled texture type: {}", static_cast<u32>(texture_type));
return 0;
}
}
@@ -1316,12 +1388,10 @@ private:
* top.
*/
void EmitPushToFlowStack(u32 target) {
- shader.AddLine('{');
- ++shader.scope;
+ const auto scope = shader.Scope();
+
shader.AddLine("flow_stack[flow_stack_top] = " + std::to_string(target) + "u;");
shader.AddLine("flow_stack_top++;");
- --shader.scope;
- shader.AddLine('}');
}
/*
@@ -1329,20 +1399,18 @@ private:
* popped address and decrementing the stack top.
*/
void EmitPopFromFlowStack() {
- shader.AddLine('{');
- ++shader.scope;
+ const auto scope = shader.Scope();
+
shader.AddLine("flow_stack_top--;");
shader.AddLine("jmp_to = flow_stack[flow_stack_top];");
shader.AddLine("break;");
- --shader.scope;
- shader.AddLine('}');
}
/// Writes the output values from a fragment shader to the corresponding GLSL output variables.
void EmitFragmentOutputsWrite() {
ASSERT(stage == Maxwell3D::Regs::ShaderStage::Fragment);
- ASSERT_MSG(header.ps.omap.sample_mask == 0, "Samplemask write is unimplemented");
+ UNIMPLEMENTED_IF_MSG(header.ps.omap.sample_mask != 0, "Samplemask write is unimplemented");
shader.AddLine("if (alpha_test[0] != 0) {");
++shader.scope;
@@ -1408,7 +1476,7 @@ private:
case Tegra::Shader::VideoType::Size32:
// TODO(Rodrigo): From my hardware tests it becomes a bit "mad" when
// this type is used (1 * 1 + 0 == 0x5b800000). Until a better
- // explanation is found: assert.
+ // explanation is found: abort.
UNIMPLEMENTED();
return zero;
case Tegra::Shader::VideoType::Invalid:
@@ -1447,6 +1515,161 @@ private:
}
}
+ std::pair<size_t, std::string> ValidateAndGetCoordinateElement(
+ const Tegra::Shader::TextureType texture_type, const bool depth_compare,
+ const bool is_array, const bool lod_bias_enabled, size_t max_coords, size_t max_inputs) {
+ const size_t coord_count = TextureCoordinates(texture_type);
+
+ size_t total_coord_count = coord_count + (is_array ? 1 : 0) + (depth_compare ? 1 : 0);
+ const size_t total_reg_count = total_coord_count + (lod_bias_enabled ? 1 : 0);
+ if (total_coord_count > max_coords || total_reg_count > max_inputs) {
+ UNIMPLEMENTED_MSG("Unsupported Texture operation");
+ total_coord_count = std::min(total_coord_count, max_coords);
+ }
+ // 1D.DC opengl is using a vec3 but 2nd component is ignored later.
+ total_coord_count +=
+ (depth_compare && !is_array && texture_type == Tegra::Shader::TextureType::Texture1D)
+ ? 1
+ : 0;
+
+ constexpr std::array<const char*, 5> coord_container{
+ {"", "float coord = (", "vec2 coord = vec2(", "vec3 coord = vec3(",
+ "vec4 coord = vec4("}};
+
+ return std::pair<size_t, std::string>(coord_count, coord_container[total_coord_count]);
+ }
+
+ std::string GetTextureCode(const Tegra::Shader::Instruction& instr,
+ const Tegra::Shader::TextureType texture_type,
+ const Tegra::Shader::TextureProcessMode process_mode,
+ const bool depth_compare, const bool is_array,
+ const size_t bias_offset) {
+
+ if ((texture_type == Tegra::Shader::TextureType::Texture3D &&
+ (is_array || depth_compare)) ||
+ (texture_type == Tegra::Shader::TextureType::TextureCube && is_array &&
+ depth_compare)) {
+ UNIMPLEMENTED_MSG("This method is not supported.");
+ }
+
+ const std::string sampler =
+ GetSampler(instr.sampler, texture_type, is_array, depth_compare);
+
+ const bool lod_needed = process_mode == Tegra::Shader::TextureProcessMode::LZ ||
+ process_mode == Tegra::Shader::TextureProcessMode::LL ||
+ process_mode == Tegra::Shader::TextureProcessMode::LLA;
+
+ const bool gl_lod_supported = !(
+ (texture_type == Tegra::Shader::TextureType::Texture2D && is_array && depth_compare) ||
+ (texture_type == Tegra::Shader::TextureType::TextureCube && !is_array &&
+ depth_compare));
+
+ const std::string read_method = lod_needed && gl_lod_supported ? "textureLod(" : "texture(";
+ std::string texture = read_method + sampler + ", coord";
+
+ if (process_mode != Tegra::Shader::TextureProcessMode::None) {
+ if (process_mode == Tegra::Shader::TextureProcessMode::LZ) {
+ if (gl_lod_supported) {
+ texture += ", 0";
+ } else {
+ // Lod 0 is emulated by a big negative bias
+ // in scenarios that are not supported by glsl
+ texture += ", -1000";
+ }
+ } else {
+ // If present, lod or bias are always stored in the register indexed by the
+ // gpr20
+ // field with an offset depending on the usage of the other registers
+ texture += ',' + regs.GetRegisterAsFloat(instr.gpr20.Value() + bias_offset);
+ }
+ }
+ texture += ")";
+ return texture;
+ }
+
+ std::pair<std::string, std::string> GetTEXCode(
+ const Instruction& instr, const Tegra::Shader::TextureType texture_type,
+ const Tegra::Shader::TextureProcessMode process_mode, const bool depth_compare,
+ const bool is_array) {
+ const bool lod_bias_enabled = (process_mode != Tegra::Shader::TextureProcessMode::None &&
+ process_mode != Tegra::Shader::TextureProcessMode::LZ);
+
+ const auto [coord_count, coord_dcl] = ValidateAndGetCoordinateElement(
+ texture_type, depth_compare, is_array, lod_bias_enabled, 4, 5);
+ // If enabled arrays index is always stored in the gpr8 field
+ const u64 array_register = instr.gpr8.Value();
+ // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used
+ const u64 coord_register = array_register + (is_array ? 1 : 0);
+
+ std::string coord = coord_dcl;
+ for (size_t i = 0; i < coord_count;) {
+ coord += regs.GetRegisterAsFloat(coord_register + i);
+ ++i;
+ if (i != coord_count) {
+ coord += ',';
+ }
+ }
+ // 1D.DC in opengl the 2nd component is ignored.
+ if (depth_compare && !is_array && texture_type == Tegra::Shader::TextureType::Texture1D) {
+ coord += ",0.0";
+ }
+ if (depth_compare) {
+ // Depth is always stored in the register signaled by gpr20
+ // or in the next register if lod or bias are used
+ const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0);
+ coord += ',' + regs.GetRegisterAsFloat(depth_register);
+ }
+ if (is_array) {
+ coord += ',' + regs.GetRegisterAsInteger(array_register);
+ }
+ coord += ");";
+ return std::make_pair(
+ coord, GetTextureCode(instr, texture_type, process_mode, depth_compare, is_array, 0));
+ }
+
+ std::pair<std::string, std::string> GetTEXSCode(
+ const Instruction& instr, const Tegra::Shader::TextureType texture_type,
+ const Tegra::Shader::TextureProcessMode process_mode, const bool depth_compare,
+ const bool is_array) {
+ const bool lod_bias_enabled = (process_mode != Tegra::Shader::TextureProcessMode::None &&
+ process_mode != Tegra::Shader::TextureProcessMode::LZ);
+
+ const auto [coord_count, coord_dcl] = ValidateAndGetCoordinateElement(
+ texture_type, depth_compare, is_array, lod_bias_enabled, 4, 4);
+ // If enabled arrays index is always stored in the gpr8 field
+ const u64 array_register = instr.gpr8.Value();
+ // First coordinate index is stored in gpr8 field or (gpr8 + 1) when arrays are used
+ const u64 coord_register = array_register + (is_array ? 1 : 0);
+ const u64 last_coord_register =
+ (is_array || !(lod_bias_enabled || depth_compare) || (coord_count > 2))
+ ? static_cast<u64>(instr.gpr20.Value())
+ : coord_register + 1;
+
+ std::string coord = coord_dcl;
+ for (size_t i = 0; i < coord_count; ++i) {
+ const bool last = (i == (coord_count - 1)) && (coord_count > 1);
+ coord += regs.GetRegisterAsFloat(last ? last_coord_register : coord_register + i);
+ if (!last) {
+ coord += ',';
+ }
+ }
+
+ if (depth_compare) {
+ // Depth is always stored in the register signaled by gpr20
+ // or in the next register if lod or bias are used
+ const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0);
+ coord += ',' + regs.GetRegisterAsFloat(depth_register);
+ }
+ if (is_array) {
+ coord += ',' + regs.GetRegisterAsInteger(array_register);
+ }
+ coord += ");";
+
+ return std::make_pair(coord,
+ GetTextureCode(instr, texture_type, process_mode, depth_compare,
+ is_array, (coord_count > 2 ? 1 : 0)));
+ }
+
/**
* Compiles a single instruction from Tegra to GLSL.
* @param offset the offset of the Tegra shader instruction.
@@ -1464,8 +1687,7 @@ private:
// Decoding failure
if (!opcode) {
- LOG_CRITICAL(HW_GPU, "Unhandled instruction: {0:x}", instr.value);
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled instruction: {0:x}", instr.value);
return offset + 1;
}
@@ -1473,8 +1695,8 @@ private:
fmt::format("// {}: {} (0x{:016x})", offset, opcode->get().GetName(), instr.value));
using Tegra::Shader::Pred;
- ASSERT_MSG(instr.pred.full_pred != Pred::NeverExecute,
- "NeverExecute predicate not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.pred.full_pred == Pred::NeverExecute,
+ "NeverExecute predicate not implemented");
// Some instructions (like SSY) don't have a predicate field, they are always
// unconditionally executed.
@@ -1517,37 +1739,36 @@ private:
case OpCode::Id::FMUL_R:
case OpCode::Id::FMUL_IMM: {
// FMUL does not have 'abs' bits and only the second operand has a 'neg' bit.
- ASSERT_MSG(instr.fmul.tab5cb8_2 == 0, "FMUL tab5cb8_2({}) is not implemented",
- instr.fmul.tab5cb8_2.Value());
- ASSERT_MSG(instr.fmul.tab5c68_1 == 0, "FMUL tab5cb8_1({}) is not implemented",
- instr.fmul.tab5c68_1.Value());
- ASSERT_MSG(instr.fmul.tab5c68_0 == 1, "FMUL tab5cb8_0({}) is not implemented",
- instr.fmul.tab5c68_0
- .Value()); // SMO typical sends 1 here which seems to be the default
- ASSERT_MSG(instr.fmul.cc == 0, "FMUL cc is not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.fmul.tab5cb8_2 != 0,
+ "FMUL tab5cb8_2({}) is not implemented",
+ instr.fmul.tab5cb8_2.Value());
+ UNIMPLEMENTED_IF_MSG(instr.fmul.tab5c68_1 != 0,
+ "FMUL tab5cb8_1({}) is not implemented",
+ instr.fmul.tab5c68_1.Value());
+ UNIMPLEMENTED_IF_MSG(
+ instr.fmul.tab5c68_0 != 1, "FMUL tab5cb8_0({}) is not implemented",
+ instr.fmul.tab5c68_0
+ .Value()); // SMO typical sends 1 here which seems to be the default
+ UNIMPLEMENTED_IF_MSG(instr.generates_cc,
+ "Condition codes generation in FMUL is not implemented");
op_b = GetOperandAbsNeg(op_b, false, instr.fmul.negate_b);
regs.SetRegisterToFloat(instr.gpr0, 0, op_a + " * " + op_b, 1, 1,
instr.alu.saturate_d, 0, true);
- if (instr.generates_cc) {
- LOG_CRITICAL(HW_GPU, "FMUL Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
}
case OpCode::Id::FADD_C:
case OpCode::Id::FADD_R:
case OpCode::Id::FADD_IMM: {
+ UNIMPLEMENTED_IF_MSG(instr.generates_cc,
+ "Condition codes generation in FADD is not implemented");
+
op_a = GetOperandAbsNeg(op_a, instr.alu.abs_a, instr.alu.negate_a);
op_b = GetOperandAbsNeg(op_b, instr.alu.abs_b, instr.alu.negate_b);
regs.SetRegisterToFloat(instr.gpr0, 0, op_a + " + " + op_b, 1, 1,
instr.alu.saturate_d, 0, true);
- if (instr.generates_cc) {
- LOG_CRITICAL(HW_GPU, "FADD Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
}
case OpCode::Id::MUFU: {
@@ -1582,15 +1803,17 @@ private:
instr.alu.saturate_d, 0, true);
break;
default:
- LOG_CRITICAL(HW_GPU, "Unhandled MUFU sub op: {0:x}",
- static_cast<unsigned>(instr.sub_op.Value()));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled MUFU sub op={0:x}",
+ static_cast<unsigned>(instr.sub_op.Value()));
}
break;
}
case OpCode::Id::FMNMX_C:
case OpCode::Id::FMNMX_R:
case OpCode::Id::FMNMX_IMM: {
+ UNIMPLEMENTED_IF_MSG(instr.generates_cc,
+ "Condition codes generation in FMNMX is not implemented");
+
op_a = GetOperandAbsNeg(op_a, instr.alu.abs_a, instr.alu.negate_a);
op_b = GetOperandAbsNeg(op_b, instr.alu.abs_b, instr.alu.negate_b);
@@ -1601,10 +1824,6 @@ private:
'(' + condition + ") ? min(" + parameters + ") : max(" +
parameters + ')',
1, 1, false, 0, true);
- if (instr.generates_cc) {
- LOG_CRITICAL(HW_GPU, "FMNMX Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
}
case OpCode::Id::RRO_C:
@@ -1617,9 +1836,7 @@ private:
break;
}
default: {
- LOG_CRITICAL(HW_GPU, "Unhandled arithmetic instruction: {}",
- opcode->get().GetName());
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled arithmetic instruction: {}", opcode->get().GetName());
}
}
break;
@@ -1631,17 +1848,19 @@ private:
break;
}
case OpCode::Id::FMUL32_IMM: {
+ UNIMPLEMENTED_IF_MSG(instr.op_32.generates_cc,
+ "Condition codes generation in FMUL32 is not implemented");
+
regs.SetRegisterToFloat(instr.gpr0, 0,
regs.GetRegisterAsFloat(instr.gpr8) + " * " +
GetImmediate32(instr),
1, 1, instr.fmul32.saturate, 0, true);
- if (instr.op_32.generates_cc) {
- LOG_CRITICAL(HW_GPU, "FMUL32 Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
}
case OpCode::Id::FADD32I: {
+ UNIMPLEMENTED_IF_MSG(instr.op_32.generates_cc,
+ "Condition codes generation in FADD32I is not implemented");
+
std::string op_a = regs.GetRegisterAsFloat(instr.gpr8);
std::string op_b = GetImmediate32(instr);
@@ -1662,23 +1881,22 @@ private:
}
regs.SetRegisterToFloat(instr.gpr0, 0, op_a + " + " + op_b, 1, 1, false, 0, true);
- if (instr.op_32.generates_cc) {
- LOG_CRITICAL(HW_GPU, "FADD32 Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
}
}
break;
}
case OpCode::Type::Bfe: {
- ASSERT_MSG(!instr.bfe.negate_b, "Unimplemented");
+ UNIMPLEMENTED_IF(instr.bfe.negate_b);
std::string op_a = instr.bfe.negate_a ? "-" : "";
op_a += regs.GetRegisterAsInteger(instr.gpr8);
switch (opcode->get().GetId()) {
case OpCode::Id::BFE_IMM: {
+ UNIMPLEMENTED_IF_MSG(instr.generates_cc,
+ "Condition codes generation in BFE is not implemented");
+
std::string inner_shift =
'(' + op_a + " << " + std::to_string(instr.bfe.GetLeftShiftValue()) + ')';
std::string outer_shift =
@@ -1686,20 +1904,35 @@ private:
std::to_string(instr.bfe.GetLeftShiftValue() + instr.bfe.shift_position) + ')';
regs.SetRegisterToInteger(instr.gpr0, true, 0, outer_shift, 1, 1);
- if (instr.generates_cc) {
- LOG_CRITICAL(HW_GPU, "BFE Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
}
default: {
- LOG_CRITICAL(HW_GPU, "Unhandled BFE instruction: {}", opcode->get().GetName());
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled BFE instruction: {}", opcode->get().GetName());
}
}
break;
}
+ case OpCode::Type::Bfi: {
+ UNIMPLEMENTED_IF(instr.generates_cc);
+
+ const auto [base, packed_shift] = [&]() -> std::tuple<std::string, std::string> {
+ switch (opcode->get().GetId()) {
+ case OpCode::Id::BFI_IMM_R:
+ return {regs.GetRegisterAsInteger(instr.gpr39, 0, false),
+ std::to_string(instr.alu.GetSignedImm20_20())};
+ default:
+ UNREACHABLE();
+ }
+ }();
+ const std::string offset = '(' + packed_shift + " & 0xff)";
+ const std::string bits = "((" + packed_shift + " >> 8) & 0xff)";
+ const std::string insert = regs.GetRegisterAsInteger(instr.gpr8, 0, false);
+ regs.SetRegisterToInteger(
+ instr.gpr0, false, 0,
+ "bitfieldInsert(" + base + ", " + insert + ", " + offset + ", " + bits + ')', 1, 1);
+ break;
+ }
case OpCode::Type::Shift: {
std::string op_a = regs.GetRegisterAsInteger(instr.gpr8, 0, true);
std::string op_b;
@@ -1719,6 +1952,9 @@ private:
case OpCode::Id::SHR_C:
case OpCode::Id::SHR_R:
case OpCode::Id::SHR_IMM: {
+ UNIMPLEMENTED_IF_MSG(instr.generates_cc,
+ "Condition codes generation in SHR is not implemented");
+
if (!instr.shift.is_signed) {
// Logical shift right
op_a = "uint(" + op_a + ')';
@@ -1727,24 +1963,17 @@ private:
// Cast to int is superfluous for arithmetic shift, it's only for a logical shift
regs.SetRegisterToInteger(instr.gpr0, true, 0, "int(" + op_a + " >> " + op_b + ')',
1, 1);
- if (instr.generates_cc) {
- LOG_CRITICAL(HW_GPU, "SHR Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
}
case OpCode::Id::SHL_C:
case OpCode::Id::SHL_R:
case OpCode::Id::SHL_IMM:
+ UNIMPLEMENTED_IF_MSG(instr.generates_cc,
+ "Condition codes generation in SHL is not implemented");
regs.SetRegisterToInteger(instr.gpr0, true, 0, op_a + " << " + op_b, 1, 1);
- if (instr.generates_cc) {
- LOG_CRITICAL(HW_GPU, "SHL Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
default: {
- LOG_CRITICAL(HW_GPU, "Unhandled shift instruction: {}", opcode->get().GetName());
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled shift instruction: {}", opcode->get().GetName());
}
}
break;
@@ -1755,17 +1984,19 @@ private:
switch (opcode->get().GetId()) {
case OpCode::Id::IADD32I:
+ UNIMPLEMENTED_IF_MSG(instr.op_32.generates_cc,
+ "Condition codes generation in IADD32I is not implemented");
+
if (instr.iadd32i.negate_a)
op_a = "-(" + op_a + ')';
regs.SetRegisterToInteger(instr.gpr0, true, 0, op_a + " + " + op_b, 1, 1,
instr.iadd32i.saturate != 0);
- if (instr.op_32.generates_cc) {
- LOG_CRITICAL(HW_GPU, "IADD32 Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
case OpCode::Id::LOP32I: {
+ UNIMPLEMENTED_IF_MSG(instr.op_32.generates_cc,
+ "Condition codes generation in LOP32I is not implemented");
+
if (instr.alu.lop32i.invert_a)
op_a = "~(" + op_a + ')';
@@ -1775,16 +2006,11 @@ private:
WriteLogicOperation(instr.gpr0, instr.alu.lop32i.operation, op_a, op_b,
Tegra::Shader::PredicateResultMode::None,
Tegra::Shader::Pred::UnusedIndex);
- if (instr.op_32.generates_cc) {
- LOG_CRITICAL(HW_GPU, "LOP32I Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
}
default: {
- LOG_CRITICAL(HW_GPU, "Unhandled ArithmeticIntegerImmediate instruction: {}",
- opcode->get().GetName());
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled ArithmeticIntegerImmediate instruction: {}",
+ opcode->get().GetName());
}
}
break;
@@ -1807,6 +2033,9 @@ private:
case OpCode::Id::IADD_C:
case OpCode::Id::IADD_R:
case OpCode::Id::IADD_IMM: {
+ UNIMPLEMENTED_IF_MSG(instr.generates_cc,
+ "Condition codes generation in IADD is not implemented");
+
if (instr.alu_integer.negate_a)
op_a = "-(" + op_a + ')';
@@ -1815,15 +2044,14 @@ private:
regs.SetRegisterToInteger(instr.gpr0, true, 0, op_a + " + " + op_b, 1, 1,
instr.alu.saturate_d);
- if (instr.generates_cc) {
- LOG_CRITICAL(HW_GPU, "IADD Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
}
case OpCode::Id::IADD3_C:
case OpCode::Id::IADD3_R:
case OpCode::Id::IADD3_IMM: {
+ UNIMPLEMENTED_IF_MSG(instr.generates_cc,
+ "Condition codes generation in IADD3 is not implemented");
+
std::string op_c = regs.GetRegisterAsInteger(instr.gpr39);
auto apply_height = [](auto height, auto& oprand) {
@@ -1837,9 +2065,8 @@ private:
oprand = "((" + oprand + ") >> 16)";
break;
default:
- LOG_CRITICAL(HW_GPU, "Unhandled IADD3 height: {}",
- static_cast<u32>(height.Value()));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled IADD3 height: {}",
+ static_cast<u32>(height.Value()));
}
};
@@ -1880,16 +2107,14 @@ private:
}
regs.SetRegisterToInteger(instr.gpr0, true, 0, result, 1, 1);
-
- if (instr.generates_cc) {
- LOG_CRITICAL(HW_GPU, "IADD3 Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
}
case OpCode::Id::ISCADD_C:
case OpCode::Id::ISCADD_R:
case OpCode::Id::ISCADD_IMM: {
+ UNIMPLEMENTED_IF_MSG(instr.generates_cc,
+ "Condition codes generation in ISCADD is not implemented");
+
if (instr.alu_integer.negate_a)
op_a = "-(" + op_a + ')';
@@ -1900,10 +2125,6 @@ private:
regs.SetRegisterToInteger(instr.gpr0, true, 0,
"((" + op_a + " << " + shift + ") + " + op_b + ')', 1, 1);
- if (instr.generates_cc) {
- LOG_CRITICAL(HW_GPU, "ISCADD Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
}
case OpCode::Id::POPC_C:
@@ -1927,6 +2148,9 @@ private:
case OpCode::Id::LOP_C:
case OpCode::Id::LOP_R:
case OpCode::Id::LOP_IMM: {
+ UNIMPLEMENTED_IF_MSG(instr.generates_cc,
+ "Condition codes generation in LOP is not implemented");
+
if (instr.alu.lop.invert_a)
op_a = "~(" + op_a + ')';
@@ -1935,15 +2159,14 @@ private:
WriteLogicOperation(instr.gpr0, instr.alu.lop.operation, op_a, op_b,
instr.alu.lop.pred_result_mode, instr.alu.lop.pred48);
- if (instr.generates_cc) {
- LOG_CRITICAL(HW_GPU, "LOP Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
}
case OpCode::Id::LOP3_C:
case OpCode::Id::LOP3_R:
case OpCode::Id::LOP3_IMM: {
+ UNIMPLEMENTED_IF_MSG(instr.generates_cc,
+ "Condition codes generation in LOP3 is not implemented");
+
const std::string op_c = regs.GetRegisterAsInteger(instr.gpr39);
std::string lut;
@@ -1954,17 +2177,15 @@ private:
}
WriteLop3Instruction(instr.gpr0, op_a, op_b, op_c, lut);
- if (instr.generates_cc) {
- LOG_CRITICAL(HW_GPU, "LOP3 Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
}
case OpCode::Id::IMNMX_C:
case OpCode::Id::IMNMX_R:
case OpCode::Id::IMNMX_IMM: {
- ASSERT_MSG(instr.imnmx.exchange == Tegra::Shader::IMinMaxExchange::None,
- "Unimplemented");
+ UNIMPLEMENTED_IF(instr.imnmx.exchange != Tegra::Shader::IMinMaxExchange::None);
+ UNIMPLEMENTED_IF_MSG(instr.generates_cc,
+ "Condition codes generation in IMNMX is not implemented");
+
const std::string condition =
GetPredicateCondition(instr.imnmx.pred, instr.imnmx.negate_pred != 0);
const std::string parameters = op_a + ',' + op_b;
@@ -1972,10 +2193,6 @@ private:
'(' + condition + ") ? min(" + parameters + ") : max(" +
parameters + ')',
1, 1);
- if (instr.generates_cc) {
- LOG_CRITICAL(HW_GPU, "IMNMX Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
}
case OpCode::Id::LEA_R2:
@@ -2030,24 +2247,19 @@ private:
op_b = regs.GetRegisterAsInteger(instr.gpr8);
op_a = std::to_string(instr.lea.imm.entry_a);
op_c = std::to_string(instr.lea.imm.entry_b);
- LOG_CRITICAL(HW_GPU, "Unhandled LEA subinstruction: {}",
- opcode->get().GetName());
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled LEA subinstruction: {}", opcode->get().GetName());
}
}
- if (instr.lea.pred48 != static_cast<u64>(Pred::UnusedIndex)) {
- LOG_ERROR(HW_GPU, "Unhandled LEA Predicate");
- UNREACHABLE();
- }
+ UNIMPLEMENTED_IF_MSG(instr.lea.pred48 != static_cast<u64>(Pred::UnusedIndex),
+ "Unhandled LEA Predicate");
const std::string value = '(' + op_a + " + (" + op_b + "*(1 << " + op_c + ")))";
regs.SetRegisterToInteger(instr.gpr0, true, 0, value, 1, 1);
break;
}
default: {
- LOG_CRITICAL(HW_GPU, "Unhandled ArithmeticInteger instruction: {}",
- opcode->get().GetName());
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled ArithmeticInteger instruction: {}",
+ opcode->get().GetName());
}
}
@@ -2056,7 +2268,7 @@ private:
case OpCode::Type::ArithmeticHalf: {
if (opcode->get().GetId() == OpCode::Id::HADD2_C ||
opcode->get().GetId() == OpCode::Id::HADD2_R) {
- ASSERT_MSG(instr.alu_half.ftz == 0, "Unimplemented");
+ UNIMPLEMENTED_IF(instr.alu_half.ftz != 0);
}
const bool negate_a =
opcode->get().GetId() != OpCode::Id::HMUL2_R && instr.alu_half.negate_a != 0;
@@ -2094,9 +2306,8 @@ private:
case OpCode::Id::HMUL2_R:
return '(' + op_a + " * " + op_b + ')';
default:
- LOG_CRITICAL(HW_GPU, "Unhandled half float instruction: {}",
- opcode->get().GetName());
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled half float instruction: {}",
+ opcode->get().GetName());
return std::string("0");
}
}();
@@ -2107,10 +2318,10 @@ private:
}
case OpCode::Type::ArithmeticHalfImmediate: {
if (opcode->get().GetId() == OpCode::Id::HADD2_IMM) {
- ASSERT_MSG(instr.alu_half_imm.ftz == 0, "Unimplemented");
+ UNIMPLEMENTED_IF(instr.alu_half_imm.ftz != 0);
} else {
- ASSERT_MSG(instr.alu_half_imm.precision == Tegra::Shader::HalfPrecision::None,
- "Unimplemented");
+ UNIMPLEMENTED_IF(instr.alu_half_imm.precision !=
+ Tegra::Shader::HalfPrecision::None);
}
const std::string op_a = GetHalfFloat(
@@ -2140,11 +2351,14 @@ private:
std::string op_b = instr.ffma.negate_b ? "-" : "";
std::string op_c = instr.ffma.negate_c ? "-" : "";
- ASSERT_MSG(instr.ffma.cc == 0, "FFMA cc not implemented");
- ASSERT_MSG(instr.ffma.tab5980_0 == 1, "FFMA tab5980_0({}) not implemented",
- instr.ffma.tab5980_0.Value()); // Seems to be 1 by default based on SMO
- ASSERT_MSG(instr.ffma.tab5980_1 == 0, "FFMA tab5980_1({}) not implemented",
- instr.ffma.tab5980_1.Value());
+ UNIMPLEMENTED_IF_MSG(instr.ffma.cc != 0, "FFMA cc not implemented");
+ UNIMPLEMENTED_IF_MSG(
+ instr.ffma.tab5980_0 != 1, "FFMA tab5980_0({}) not implemented",
+ instr.ffma.tab5980_0.Value()); // Seems to be 1 by default based on SMO
+ UNIMPLEMENTED_IF_MSG(instr.ffma.tab5980_1 != 0, "FFMA tab5980_1({}) not implemented",
+ instr.ffma.tab5980_1.Value());
+ UNIMPLEMENTED_IF_MSG(instr.generates_cc,
+ "Condition codes generation in FFMA is not implemented");
switch (opcode->get().GetId()) {
case OpCode::Id::FFMA_CR: {
@@ -2170,27 +2384,19 @@ private:
break;
}
default: {
- LOG_CRITICAL(HW_GPU, "Unhandled FFMA instruction: {}", opcode->get().GetName());
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled FFMA instruction: {}", opcode->get().GetName());
}
}
regs.SetRegisterToFloat(instr.gpr0, 0, "fma(" + op_a + ", " + op_b + ", " + op_c + ')',
1, 1, instr.alu.saturate_d, 0, true);
- if (instr.generates_cc) {
- LOG_CRITICAL(HW_GPU, "FFMA Generates an unhandled Control Code");
- UNREACHABLE();
- }
-
break;
}
case OpCode::Type::Hfma2: {
if (opcode->get().GetId() == OpCode::Id::HFMA2_RR) {
- ASSERT_MSG(instr.hfma2.rr.precision == Tegra::Shader::HalfPrecision::None,
- "Unimplemented");
+ UNIMPLEMENTED_IF(instr.hfma2.rr.precision != Tegra::Shader::HalfPrecision::None);
} else {
- ASSERT_MSG(instr.hfma2.precision == Tegra::Shader::HalfPrecision::None,
- "Unimplemented");
+ UNIMPLEMENTED_IF(instr.hfma2.precision != Tegra::Shader::HalfPrecision::None);
}
const bool saturate = opcode->get().GetId() == OpCode::Id::HFMA2_RR
? instr.hfma2.rr.saturate != 0
@@ -2240,7 +2446,7 @@ private:
case OpCode::Type::Conversion: {
switch (opcode->get().GetId()) {
case OpCode::Id::I2I_R: {
- ASSERT_MSG(!instr.conversion.selector, "Unimplemented");
+ UNIMPLEMENTED_IF(instr.conversion.selector);
std::string op_a = regs.GetRegisterAsInteger(
instr.gpr20, 0, instr.conversion.is_input_signed, instr.conversion.src_size);
@@ -2260,10 +2466,11 @@ private:
}
case OpCode::Id::I2F_R:
case OpCode::Id::I2F_C: {
- ASSERT_MSG(instr.conversion.dest_size == Register::Size::Word, "Unimplemented");
- ASSERT_MSG(!instr.conversion.selector, "Unimplemented");
-
- std::string op_a{};
+ UNIMPLEMENTED_IF(instr.conversion.dest_size != Register::Size::Word);
+ UNIMPLEMENTED_IF(instr.conversion.selector);
+ UNIMPLEMENTED_IF_MSG(instr.generates_cc,
+ "Condition codes generation in I2F is not implemented");
+ std::string op_a;
if (instr.is_b_gpr) {
op_a =
@@ -2286,16 +2493,13 @@ private:
}
regs.SetRegisterToFloat(instr.gpr0, 0, op_a, 1, 1);
-
- if (instr.generates_cc) {
- LOG_CRITICAL(HW_GPU, "I2F Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
}
case OpCode::Id::F2F_R: {
- ASSERT_MSG(instr.conversion.dest_size == Register::Size::Word, "Unimplemented");
- ASSERT_MSG(instr.conversion.src_size == Register::Size::Word, "Unimplemented");
+ UNIMPLEMENTED_IF(instr.conversion.dest_size != Register::Size::Word);
+ UNIMPLEMENTED_IF(instr.conversion.src_size != Register::Size::Word);
+ UNIMPLEMENTED_IF_MSG(instr.generates_cc,
+ "Condition codes generation in F2F is not implemented");
std::string op_a = regs.GetRegisterAsFloat(instr.gpr20);
if (instr.conversion.abs_a) {
@@ -2322,23 +2526,19 @@ private:
op_a = "trunc(" + op_a + ')';
break;
default:
- LOG_CRITICAL(HW_GPU, "Unimplemented f2f rounding mode {}",
- static_cast<u32>(instr.conversion.f2f.rounding.Value()));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unimplemented F2F rounding mode {}",
+ static_cast<u32>(instr.conversion.f2f.rounding.Value()));
break;
}
regs.SetRegisterToFloat(instr.gpr0, 0, op_a, 1, 1, instr.alu.saturate_d);
-
- if (instr.generates_cc) {
- LOG_CRITICAL(HW_GPU, "F2F Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
}
case OpCode::Id::F2I_R:
case OpCode::Id::F2I_C: {
- ASSERT_MSG(instr.conversion.src_size == Register::Size::Word, "Unimplemented");
+ UNIMPLEMENTED_IF(instr.conversion.src_size != Register::Size::Word);
+ UNIMPLEMENTED_IF_MSG(instr.generates_cc,
+ "Condition codes generation in F2I is not implemented");
std::string op_a{};
if (instr.is_b_gpr) {
@@ -2369,9 +2569,8 @@ private:
op_a = "trunc(" + op_a + ')';
break;
default:
- LOG_CRITICAL(HW_GPU, "Unimplemented f2i rounding mode {}",
- static_cast<u32>(instr.conversion.f2i.rounding.Value()));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unimplemented F2I rounding mode {}",
+ static_cast<u32>(instr.conversion.f2i.rounding.Value()));
break;
}
@@ -2383,16 +2582,10 @@ private:
regs.SetRegisterToInteger(instr.gpr0, instr.conversion.is_output_signed, 0, op_a, 1,
1, false, 0, instr.conversion.dest_size);
- if (instr.generates_cc) {
- LOG_CRITICAL(HW_GPU, "F2I Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
}
default: {
- LOG_CRITICAL(HW_GPU, "Unhandled conversion instruction: {}",
- opcode->get().GetName());
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled conversion instruction: {}", opcode->get().GetName());
}
}
break;
@@ -2401,10 +2594,10 @@ private:
switch (opcode->get().GetId()) {
case OpCode::Id::LD_A: {
// Note: Shouldn't this be interp mode flat? As in no interpolation made.
- ASSERT_MSG(instr.gpr8.Value() == Register::ZeroIndex,
- "Indirect attribute loads are not supported");
- ASSERT_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) == 0,
- "Unaligned attribute loads are not supported");
+ UNIMPLEMENTED_IF_MSG(instr.gpr8.Value() != Register::ZeroIndex,
+ "Indirect attribute loads are not supported");
+ UNIMPLEMENTED_IF_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) != 0,
+ "Unaligned attribute loads are not supported");
Tegra::Shader::IpaMode input_mode{Tegra::Shader::IpaInterpMode::Perspective,
Tegra::Shader::IpaSampleMode::Default};
@@ -2431,12 +2624,9 @@ private:
break;
}
case OpCode::Id::LD_C: {
- ASSERT_MSG(instr.ld_c.unknown == 0, "Unimplemented");
+ UNIMPLEMENTED_IF(instr.ld_c.unknown != 0);
- // Add an extra scope and declare the index register inside to prevent
- // overwriting it in case it is used as an output of the LD instruction.
- shader.AddLine("{");
- ++shader.scope;
+ const auto scope = shader.Scope();
shader.AddLine("uint index = (" + regs.GetRegisterAsInteger(instr.gpr8, 0, false) +
" / 4) & (MAX_CONSTBUFFER_ELEMENTS - 1);");
@@ -2459,20 +2649,16 @@ private:
break;
}
default:
- LOG_CRITICAL(HW_GPU, "Unhandled type: {}",
- static_cast<unsigned>(instr.ld_c.type.Value()));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled type: {}",
+ static_cast<unsigned>(instr.ld_c.type.Value()));
}
-
- --shader.scope;
- shader.AddLine("}");
break;
}
case OpCode::Id::LD_L: {
- // Add an extra scope and declare the index register inside to prevent
- // overwriting it in case it is used as an output of the LD instruction.
- shader.AddLine('{');
- ++shader.scope;
+ UNIMPLEMENTED_IF_MSG(instr.ld_l.unknown == 1, "LD_L Unhandled mode: {}",
+ static_cast<unsigned>(instr.ld_l.unknown.Value()));
+
+ const auto scope = shader.Scope();
std::string op = '(' + regs.GetRegisterAsInteger(instr.gpr8, 0, false) + " + " +
std::to_string(instr.smem_imm.Value()) + ')';
@@ -2481,31 +2667,21 @@ private:
const std::string op_a = regs.GetLocalMemoryAsFloat("index");
- if (instr.ld_l.unknown != 1) {
- LOG_CRITICAL(HW_GPU, "LD_L Unhandled mode: {}",
- static_cast<unsigned>(instr.ld_l.unknown.Value()));
- UNREACHABLE();
- }
-
switch (instr.ldst_sl.type.Value()) {
case Tegra::Shader::StoreType::Bytes32:
regs.SetRegisterToFloat(instr.gpr0, 0, op_a, 1, 1);
break;
default:
- LOG_CRITICAL(HW_GPU, "LD_L Unhandled type: {}",
- static_cast<unsigned>(instr.ldst_sl.type.Value()));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("LD_L Unhandled type: {}",
+ static_cast<unsigned>(instr.ldst_sl.type.Value()));
}
-
- --shader.scope;
- shader.AddLine('}');
break;
}
case OpCode::Id::ST_A: {
- ASSERT_MSG(instr.gpr8.Value() == Register::ZeroIndex,
- "Indirect attribute loads are not supported");
- ASSERT_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) == 0,
- "Unaligned attribute loads are not supported");
+ UNIMPLEMENTED_IF_MSG(instr.gpr8.Value() != Register::ZeroIndex,
+ "Indirect attribute loads are not supported");
+ UNIMPLEMENTED_IF_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) != 0,
+ "Unaligned attribute loads are not supported");
u64 next_element = instr.attribute.fmt20.element;
u64 next_index = static_cast<u64>(instr.attribute.fmt20.index.Value());
@@ -2530,360 +2706,157 @@ private:
break;
}
case OpCode::Id::ST_L: {
- // Add an extra scope and declare the index register inside to prevent
- // overwriting it in case it is used as an output of the LD instruction.
- shader.AddLine('{');
- ++shader.scope;
+ UNIMPLEMENTED_IF_MSG(instr.st_l.unknown == 0, "ST_L Unhandled mode: {}",
+ static_cast<unsigned>(instr.st_l.unknown.Value()));
+
+ const auto scope = shader.Scope();
std::string op = '(' + regs.GetRegisterAsInteger(instr.gpr8, 0, false) + " + " +
std::to_string(instr.smem_imm.Value()) + ')';
shader.AddLine("uint index = (" + op + " / 4);");
- if (instr.st_l.unknown != 0) {
- LOG_CRITICAL(HW_GPU, "ST_L Unhandled mode: {}",
- static_cast<unsigned>(instr.st_l.unknown.Value()));
- UNREACHABLE();
- }
-
switch (instr.ldst_sl.type.Value()) {
case Tegra::Shader::StoreType::Bytes32:
regs.SetLocalMemoryAsFloat("index", regs.GetRegisterAsFloat(instr.gpr0));
break;
default:
- LOG_CRITICAL(HW_GPU, "ST_L Unhandled type: {}",
- static_cast<unsigned>(instr.ldst_sl.type.Value()));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("ST_L Unhandled type: {}",
+ static_cast<unsigned>(instr.ldst_sl.type.Value()));
}
-
- --shader.scope;
- shader.AddLine('}');
break;
}
case OpCode::Id::TEX: {
Tegra::Shader::TextureType texture_type{instr.tex.texture_type};
- std::string coord;
const bool is_array = instr.tex.array != 0;
-
- ASSERT_MSG(!instr.tex.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP),
- "NODEP is not implemented");
- ASSERT_MSG(!instr.tex.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI),
- "AOFFI is not implemented");
-
const bool depth_compare =
instr.tex.UsesMiscMode(Tegra::Shader::TextureMiscMode::DC);
- u32 num_coordinates = TextureCoordinates(texture_type);
- if (depth_compare)
- num_coordinates += 1;
-
- switch (num_coordinates) {
- case 1: {
- if (is_array) {
- const std::string index = regs.GetRegisterAsInteger(instr.gpr8);
- const std::string x = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1);
- coord = "vec2 coords = vec2(" + x + ", " + index + ");";
- } else {
- const std::string x = regs.GetRegisterAsFloat(instr.gpr8);
- coord = "float coords = " + x + ';';
- }
- break;
- }
- case 2: {
- if (is_array) {
- const std::string index = regs.GetRegisterAsInteger(instr.gpr8);
- const std::string x = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1);
- const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 2);
- coord = "vec3 coords = vec3(" + x + ", " + y + ", " + index + ");";
- } else {
- const std::string x = regs.GetRegisterAsFloat(instr.gpr8);
- const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1);
- coord = "vec2 coords = vec2(" + x + ", " + y + ");";
- }
- break;
- }
- case 3: {
- if (depth_compare) {
- if (is_array) {
- const std::string index = regs.GetRegisterAsInteger(instr.gpr8);
- const std::string x = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1);
- const std::string y = regs.GetRegisterAsFloat(instr.gpr20);
- const std::string z = regs.GetRegisterAsFloat(instr.gpr20.Value() + 1);
- coord = "vec4 coords = vec4(" + x + ", " + y + ", " + z + ", " + index +
- ");";
- } else {
- const std::string x = regs.GetRegisterAsFloat(instr.gpr8);
- const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1);
- const std::string z = regs.GetRegisterAsFloat(instr.gpr20);
- coord = "vec3 coords = vec3(" + x + ", " + y + ", " + z + ");";
- }
- } else {
- if (is_array) {
- const std::string index = regs.GetRegisterAsInteger(instr.gpr8);
- const std::string x = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1);
- const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 2);
- const std::string z = regs.GetRegisterAsFloat(instr.gpr8.Value() + 3);
- coord = "vec4 coords = vec4(" + x + ", " + y + ", " + z + ", " + index +
- ");";
- } else {
- const std::string x = regs.GetRegisterAsFloat(instr.gpr8);
- const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1);
- const std::string z = regs.GetRegisterAsFloat(instr.gpr8.Value() + 2);
- coord = "vec3 coords = vec3(" + x + ", " + y + ", " + z + ");";
- }
- }
- break;
- }
- default:
- LOG_CRITICAL(HW_GPU, "Unhandled coordinates number {}",
- static_cast<u32>(num_coordinates));
- UNREACHABLE();
+ const auto process_mode = instr.tex.GetTextureProcessMode();
+ UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP),
+ "NODEP is not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI),
+ "AOFFI is not implemented");
- // Fallback to interpreting as a 2D texture for now
- const std::string x = regs.GetRegisterAsFloat(instr.gpr8);
- const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1);
- coord = "vec2 coords = vec2(" + x + ", " + y + ");";
- texture_type = Tegra::Shader::TextureType::Texture2D;
- }
- // TODO: make sure coordinates are always indexed to gpr8 and gpr20 is always bias
- // or lod.
- std::string op_c;
-
- const std::string sampler =
- GetSampler(instr.sampler, texture_type, is_array, depth_compare);
- // Add an extra scope and declare the texture coords inside to prevent
- // overwriting them in case they are used as outputs of the texs instruction.
+ const auto [coord, texture] =
+ GetTEXCode(instr, texture_type, process_mode, depth_compare, is_array);
- shader.AddLine("{");
- ++shader.scope;
+ const auto scope = shader.Scope();
shader.AddLine(coord);
- std::string texture;
- switch (instr.tex.GetTextureProcessMode()) {
- case Tegra::Shader::TextureProcessMode::None: {
- texture = "texture(" + sampler + ", coords)";
- break;
- }
- case Tegra::Shader::TextureProcessMode::LZ: {
- texture = "textureLod(" + sampler + ", coords, 0.0)";
- break;
- }
- case Tegra::Shader::TextureProcessMode::LB:
- case Tegra::Shader::TextureProcessMode::LBA: {
- if (depth_compare) {
- if (is_array)
- op_c = regs.GetRegisterAsFloat(instr.gpr20.Value() + 2);
- else
- op_c = regs.GetRegisterAsFloat(instr.gpr20.Value() + 1);
- } else {
- op_c = regs.GetRegisterAsFloat(instr.gpr20);
- }
- // TODO: Figure if A suffix changes the equation at all.
- texture = "texture(" + sampler + ", coords, " + op_c + ')';
- break;
- }
- case Tegra::Shader::TextureProcessMode::LL:
- case Tegra::Shader::TextureProcessMode::LLA: {
- if (num_coordinates <= 2) {
- op_c = regs.GetRegisterAsFloat(instr.gpr20);
- } else {
- op_c = regs.GetRegisterAsFloat(instr.gpr20.Value() + 1);
- }
- // TODO: Figure if A suffix changes the equation at all.
- texture = "textureLod(" + sampler + ", coords, " + op_c + ')';
- break;
- }
- default: {
- texture = "texture(" + sampler + ", coords)";
- LOG_CRITICAL(HW_GPU, "Unhandled texture process mode {}",
- static_cast<u32>(instr.tex.GetTextureProcessMode()));
- UNREACHABLE();
- }
- }
- if (!depth_compare) {
+ if (depth_compare) {
+ regs.SetRegisterToFloat(instr.gpr0, 0, texture, 1, 1, false);
+ } else {
+ shader.AddLine("vec4 texture_tmp = " + texture + ';');
std::size_t dest_elem{};
for (std::size_t elem = 0; elem < 4; ++elem) {
if (!instr.tex.IsComponentEnabled(elem)) {
// Skip disabled components
continue;
}
- regs.SetRegisterToFloat(instr.gpr0, elem, texture, 1, 4, false, dest_elem);
+ regs.SetRegisterToFloat(instr.gpr0, elem, "texture_tmp", 1, 4, false,
+ dest_elem);
++dest_elem;
}
- } else {
- regs.SetRegisterToFloat(instr.gpr0, 0, texture, 1, 1, false);
}
- --shader.scope;
- shader.AddLine("}");
break;
}
case OpCode::Id::TEXS: {
- std::string coord;
Tegra::Shader::TextureType texture_type{instr.texs.GetTextureType()};
- bool is_array{instr.texs.IsArrayTexture()};
-
- ASSERT_MSG(!instr.texs.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP),
- "NODEP is not implemented");
-
+ const bool is_array{instr.texs.IsArrayTexture()};
const bool depth_compare =
instr.texs.UsesMiscMode(Tegra::Shader::TextureMiscMode::DC);
- u32 num_coordinates = TextureCoordinates(texture_type);
- if (depth_compare)
- num_coordinates += 1;
+ const auto process_mode = instr.texs.GetTextureProcessMode();
+ UNIMPLEMENTED_IF_MSG(instr.texs.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP),
+ "NODEP is not implemented");
- switch (num_coordinates) {
- case 2: {
- if (is_array) {
- const std::string index = regs.GetRegisterAsInteger(instr.gpr8);
- const std::string x = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1);
- const std::string y = regs.GetRegisterAsFloat(instr.gpr20);
- coord = "vec3 coords = vec3(" + x + ", " + y + ", " + index + ");";
- } else {
- const std::string x = regs.GetRegisterAsFloat(instr.gpr8);
- const std::string y = regs.GetRegisterAsFloat(instr.gpr20);
- coord = "vec2 coords = vec2(" + x + ", " + y + ");";
- }
- break;
- }
- case 3: {
- if (is_array) {
- const std::string index = regs.GetRegisterAsInteger(instr.gpr8);
- const std::string x = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1);
- const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 2);
- const std::string z = regs.GetRegisterAsFloat(instr.gpr20);
- coord =
- "vec4 coords = vec4(" + x + ", " + y + ", " + z + ", " + index + ");";
- } else {
- const std::string x = regs.GetRegisterAsFloat(instr.gpr8);
- const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1);
- const std::string z = regs.GetRegisterAsFloat(instr.gpr20);
- coord = "vec3 coords = vec3(" + x + ", " + y + ", " + z + ");";
- }
- break;
- }
- default:
- LOG_CRITICAL(HW_GPU, "Unhandled coordinates number {}",
- static_cast<u32>(num_coordinates));
- UNREACHABLE();
+ const auto scope = shader.Scope();
+
+ const auto [coord, texture] =
+ GetTEXSCode(instr, texture_type, process_mode, depth_compare, is_array);
+
+ shader.AddLine(coord);
- // Fallback to interpreting as a 2D texture for now
- const std::string x = regs.GetRegisterAsFloat(instr.gpr8);
- const std::string y = regs.GetRegisterAsFloat(instr.gpr20);
- coord = "vec2 coords = vec2(" + x + ", " + y + ");";
- texture_type = Tegra::Shader::TextureType::Texture2D;
- is_array = false;
- }
- const std::string sampler =
- GetSampler(instr.sampler, texture_type, is_array, depth_compare);
- std::string texture;
- switch (instr.texs.GetTextureProcessMode()) {
- case Tegra::Shader::TextureProcessMode::None: {
- texture = "texture(" + sampler + ", coords)";
- break;
- }
- case Tegra::Shader::TextureProcessMode::LZ: {
- if (depth_compare && is_array) {
- texture = "texture(" + sampler + ", coords)";
- } else {
- texture = "textureLod(" + sampler + ", coords, 0.0)";
- }
- break;
- }
- case Tegra::Shader::TextureProcessMode::LL: {
- const std::string op_c = regs.GetRegisterAsFloat(instr.gpr20.Value() + 1);
- texture = "textureLod(" + sampler + ", coords, " + op_c + ')';
- break;
- }
- default: {
- texture = "texture(" + sampler + ", coords)";
- LOG_CRITICAL(HW_GPU, "Unhandled texture process mode {}",
- static_cast<u32>(instr.texs.GetTextureProcessMode()));
- UNREACHABLE();
- }
- }
if (!depth_compare) {
- WriteTexsInstruction(instr, coord, texture);
+ shader.AddLine("vec4 texture_tmp = " + texture + ';');
+
} else {
- WriteTexsInstruction(instr, coord, "vec4(" + texture + ')');
+ shader.AddLine("vec4 texture_tmp = vec4(" + texture + ");");
}
+
+ WriteTexsInstruction(instr, "texture_tmp");
break;
}
case OpCode::Id::TLDS: {
- std::string coord;
const Tegra::Shader::TextureType texture_type{instr.tlds.GetTextureType()};
const bool is_array{instr.tlds.IsArrayTexture()};
ASSERT(texture_type == Tegra::Shader::TextureType::Texture2D);
ASSERT(is_array == false);
- ASSERT_MSG(!instr.tlds.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP),
- "NODEP is not implemented");
- ASSERT_MSG(!instr.tlds.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI),
- "AOFFI is not implemented");
- ASSERT_MSG(!instr.tlds.UsesMiscMode(Tegra::Shader::TextureMiscMode::MZ),
- "MZ is not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP),
+ "NODEP is not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI),
+ "AOFFI is not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(Tegra::Shader::TextureMiscMode::MZ),
+ "MZ is not implemented");
- u32 op_c_offset = 0;
+ u32 extra_op_offset = 0;
+
+ ShaderScopedScope scope = shader.Scope();
switch (texture_type) {
case Tegra::Shader::TextureType::Texture1D: {
const std::string x = regs.GetRegisterAsInteger(instr.gpr8);
- coord = "int coords = " + x + ';';
+ shader.AddLine("float coords = " + x + ';');
break;
}
case Tegra::Shader::TextureType::Texture2D: {
- if (is_array) {
- LOG_CRITICAL(HW_GPU, "Unhandled 2d array texture");
- UNREACHABLE();
- } else {
- const std::string x = regs.GetRegisterAsInteger(instr.gpr8);
- const std::string y = regs.GetRegisterAsInteger(instr.gpr20);
- coord = "ivec2 coords = ivec2(" + x + ", " + y + ");";
- op_c_offset = 1;
- }
+ UNIMPLEMENTED_IF_MSG(is_array, "Unhandled 2d array texture");
+
+ const std::string x = regs.GetRegisterAsInteger(instr.gpr8);
+ const std::string y = regs.GetRegisterAsInteger(instr.gpr20);
+ // shader.AddLine("ivec2 coords = ivec2(" + x + ", " + y + ");");
+ shader.AddLine("ivec2 coords = ivec2(" + x + ", " + y + ");");
+ extra_op_offset = 1;
break;
}
default:
- LOG_CRITICAL(HW_GPU, "Unhandled texture type {}",
- static_cast<u32>(texture_type));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<u32>(texture_type));
}
const std::string sampler =
GetSampler(instr.sampler, texture_type, is_array, false);
- std::string texture = "texelFetch(" + sampler + ", coords, 0)";
- switch (instr.tlds.GetTextureProcessMode()) {
- case Tegra::Shader::TextureProcessMode::LZ: {
- texture = "texelFetch(" + sampler + ", coords, 0)";
- break;
- }
- case Tegra::Shader::TextureProcessMode::LL: {
- const std::string op_c =
- regs.GetRegisterAsInteger(instr.gpr20.Value() + op_c_offset);
- texture = "texelFetch(" + sampler + ", coords, " + op_c + ')';
- break;
- }
- default: {
- texture = "texelFetch(" + sampler + ", coords, 0)";
- LOG_CRITICAL(HW_GPU, "Unhandled texture process mode {}",
- static_cast<u32>(instr.tlds.GetTextureProcessMode()));
- UNREACHABLE();
- }
- }
- WriteTexsInstruction(instr, coord, texture);
+
+ const std::string texture = [&]() {
+ switch (instr.tlds.GetTextureProcessMode()) {
+ case Tegra::Shader::TextureProcessMode::LZ:
+ return "texelFetch(" + sampler + ", coords, 0)";
+ case Tegra::Shader::TextureProcessMode::LL:
+ shader.AddLine(
+ "float lod = " +
+ regs.GetRegisterAsInteger(instr.gpr20.Value() + extra_op_offset) + ';');
+ return "texelFetch(" + sampler + ", coords, lod)";
+ default:
+ UNIMPLEMENTED_MSG("Unhandled texture process mode {}",
+ static_cast<u32>(instr.tlds.GetTextureProcessMode()));
+ return "texelFetch(" + sampler + ", coords, 0)";
+ }
+ }();
+
+ WriteTexsInstruction(instr, texture);
break;
}
case OpCode::Id::TLD4: {
ASSERT(instr.tld4.texture_type == Tegra::Shader::TextureType::Texture2D);
ASSERT(instr.tld4.array == 0);
- std::string coord;
-
- ASSERT_MSG(!instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP),
- "NODEP is not implemented");
- ASSERT_MSG(!instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI),
- "AOFFI is not implemented");
- ASSERT_MSG(!instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV),
- "NDV is not implemented");
- ASSERT_MSG(!instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::PTP),
- "PTP is not implemented");
+
+ UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP),
+ "NODEP is not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI),
+ "AOFFI is not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV),
+ "NDV is not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::PTP),
+ "PTP is not implemented");
const bool depth_compare =
instr.tld4.UsesMiscMode(Tegra::Shader::TextureMiscMode::DC);
auto texture_type = instr.tld4.texture_type.Value();
@@ -2891,40 +2864,40 @@ private:
if (depth_compare)
num_coordinates += 1;
+ const auto scope = shader.Scope();
+
switch (num_coordinates) {
case 2: {
const std::string x = regs.GetRegisterAsFloat(instr.gpr8);
const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1);
- coord = "vec2 coords = vec2(" + x + ", " + y + ");";
+ shader.AddLine("vec2 coords = vec2(" + x + ", " + y + ");");
break;
}
case 3: {
const std::string x = regs.GetRegisterAsFloat(instr.gpr8);
const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1);
const std::string z = regs.GetRegisterAsFloat(instr.gpr8.Value() + 2);
- coord = "vec3 coords = vec3(" + x + ", " + y + ", " + z + ");";
+ shader.AddLine("vec3 coords = vec3(" + x + ", " + y + ", " + z + ");");
break;
}
default:
- LOG_CRITICAL(HW_GPU, "Unhandled coordinates number {}",
- static_cast<u32>(num_coordinates));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled coordinates number {}",
+ static_cast<u32>(num_coordinates));
const std::string x = regs.GetRegisterAsFloat(instr.gpr8);
const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1);
- coord = "vec2 coords = vec2(" + x + ", " + y + ");";
+ shader.AddLine("vec2 coords = vec2(" + x + ", " + y + ");");
texture_type = Tegra::Shader::TextureType::Texture2D;
}
const std::string sampler =
GetSampler(instr.sampler, texture_type, false, depth_compare);
- // Add an extra scope and declare the texture coords inside to prevent
- // overwriting them in case they are used as outputs of the texs instruction.
- shader.AddLine("{");
- ++shader.scope;
- shader.AddLine(coord);
+
const std::string texture = "textureGather(" + sampler + ", coords, " +
std::to_string(instr.tld4.component) + ')';
- if (!depth_compare) {
+
+ if (depth_compare) {
+ regs.SetRegisterToFloat(instr.gpr0, 0, texture, 1, 1, false);
+ } else {
std::size_t dest_elem{};
for (std::size_t elem = 0; elem < 4; ++elem) {
if (!instr.tex.IsComponentEnabled(elem)) {
@@ -2934,18 +2907,18 @@ private:
regs.SetRegisterToFloat(instr.gpr0, elem, texture, 1, 4, false, dest_elem);
++dest_elem;
}
- } else {
- regs.SetRegisterToFloat(instr.gpr0, 0, texture, 1, 1, false);
}
- --shader.scope;
- shader.AddLine("}");
break;
}
case OpCode::Id::TLD4S: {
- ASSERT_MSG(!instr.tld4s.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP),
- "NODEP is not implemented");
- ASSERT_MSG(!instr.tld4s.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI),
- "AOFFI is not implemented");
+ UNIMPLEMENTED_IF_MSG(
+ instr.tld4s.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP),
+ "NODEP is not implemented");
+ UNIMPLEMENTED_IF_MSG(
+ instr.tld4s.UsesMiscMode(Tegra::Shader::TextureMiscMode::AOFFI),
+ "AOFFI is not implemented");
+
+ const auto scope = shader.Scope();
const bool depth_compare =
instr.tld4s.UsesMiscMode(Tegra::Shader::TextureMiscMode::DC);
@@ -2954,52 +2927,58 @@ private:
// TODO(Subv): Figure out how the sampler type is encoded in the TLD4S instruction.
const std::string sampler = GetSampler(
instr.sampler, Tegra::Shader::TextureType::Texture2D, false, depth_compare);
- std::string coord;
- if (!depth_compare) {
- coord = "vec2 coords = vec2(" + op_a + ", " + op_b + ");";
- } else {
+ if (depth_compare) {
// Note: TLD4S coordinate encoding works just like TEXS's
- const std::string op_c = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1);
- coord = "vec3 coords = vec3(" + op_a + ", " + op_c + ", " + op_b + ");";
+ const std::string op_y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1);
+ shader.AddLine("vec3 coords = vec3(" + op_a + ", " + op_y + ", " + op_b + ");");
+ } else {
+ shader.AddLine("vec2 coords = vec2(" + op_a + ", " + op_b + ");");
}
- const std::string texture = "textureGather(" + sampler + ", coords, " +
- std::to_string(instr.tld4s.component) + ')';
- if (!depth_compare) {
- WriteTexsInstruction(instr, coord, texture);
- } else {
- WriteTexsInstruction(instr, coord, "vec4(" + texture + ')');
+ std::string texture = "textureGather(" + sampler + ", coords, " +
+ std::to_string(instr.tld4s.component) + ')';
+ if (depth_compare) {
+ texture = "vec4(" + texture + ')';
}
+ WriteTexsInstruction(instr, texture);
break;
}
case OpCode::Id::TXQ: {
- ASSERT_MSG(!instr.txq.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP),
- "NODEP is not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.txq.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP),
+ "NODEP is not implemented");
+
+ const auto scope = shader.Scope();
- // TODO: the new commits on the texture refactor, change the way samplers work.
+ // TODO: The new commits on the texture refactor, change the way samplers work.
// Sadly, not all texture instructions specify the type of texture their sampler
// uses. This must be fixed at a later instance.
const std::string sampler =
GetSampler(instr.sampler, Tegra::Shader::TextureType::Texture2D, false, false);
switch (instr.txq.query_type) {
case Tegra::Shader::TextureQueryType::Dimension: {
- const std::string texture = "textureQueryLevels(" + sampler + ')';
- regs.SetRegisterToInteger(instr.gpr0, true, 0, texture, 1, 1);
+ const std::string texture = "textureSize(" + sampler + ", " +
+ regs.GetRegisterAsInteger(instr.gpr8) + ')';
+ const std::string mip_level = "textureQueryLevels(" + sampler + ')';
+ shader.AddLine("ivec2 sizes = " + texture + ';');
+
+ regs.SetRegisterToInteger(instr.gpr0.Value() + 0, true, 0, "sizes.x", 1, 1);
+ regs.SetRegisterToInteger(instr.gpr0.Value() + 1, true, 0, "sizes.y", 1, 1);
+ regs.SetRegisterToInteger(instr.gpr0.Value() + 2, true, 0, "0", 1, 1);
+ regs.SetRegisterToInteger(instr.gpr0.Value() + 3, true, 0, mip_level, 1, 1);
break;
}
default: {
- LOG_CRITICAL(HW_GPU, "Unhandled texture query type: {}",
- static_cast<u32>(instr.txq.query_type.Value()));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled texture query type: {}",
+ static_cast<u32>(instr.txq.query_type.Value()));
}
}
break;
}
case OpCode::Id::TMML: {
- ASSERT_MSG(!instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP),
- "NODEP is not implemented");
- ASSERT_MSG(!instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV),
- "NDV is not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NODEP),
+ "NODEP is not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV),
+ "NDV is not implemented");
const std::string x = regs.GetRegisterAsFloat(instr.gpr8);
const bool is_array = instr.tmml.array != 0;
@@ -3007,47 +2986,38 @@ private:
const std::string sampler =
GetSampler(instr.sampler, texture_type, is_array, false);
- // TODO: add coordinates for different samplers once other texture types are
+ const auto scope = shader.Scope();
+
+ // TODO: Add coordinates for different samplers once other texture types are
// implemented.
- std::string coord;
switch (texture_type) {
case Tegra::Shader::TextureType::Texture1D: {
- coord = "float coords = " + x + ';';
+ shader.AddLine("float coords = " + x + ';');
break;
}
case Tegra::Shader::TextureType::Texture2D: {
const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1);
- coord = "vec2 coords = vec2(" + x + ", " + y + ");";
+ shader.AddLine("vec2 coords = vec2(" + x + ", " + y + ");");
break;
}
default:
- LOG_CRITICAL(HW_GPU, "Unhandled texture type {}",
- static_cast<u32>(texture_type));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<u32>(texture_type));
// Fallback to interpreting as a 2D texture for now
const std::string y = regs.GetRegisterAsFloat(instr.gpr8.Value() + 1);
- coord = "vec2 coords = vec2(" + x + ", " + y + ");";
+ shader.AddLine("vec2 coords = vec2(" + x + ", " + y + ");");
texture_type = Tegra::Shader::TextureType::Texture2D;
}
- // Add an extra scope and declare the texture coords inside to prevent
- // overwriting them in case they are used as outputs of the texs instruction.
- shader.AddLine('{');
- ++shader.scope;
- shader.AddLine(coord);
+
const std::string texture = "textureQueryLod(" + sampler + ", coords)";
- const std::string tmp = "vec2 tmp = " + texture + "*vec2(256.0, 256.0);";
- shader.AddLine(tmp);
+ shader.AddLine("vec2 tmp = " + texture + " * vec2(256.0, 256.0);");
regs.SetRegisterToInteger(instr.gpr0, true, 0, "int(tmp.y)", 1, 1);
regs.SetRegisterToInteger(instr.gpr0.Value() + 1, false, 0, "uint(tmp.x)", 1, 1);
- --shader.scope;
- shader.AddLine('}');
break;
}
default: {
- LOG_CRITICAL(HW_GPU, "Unhandled memory instruction: {}", opcode->get().GetName());
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled memory instruction: {}", opcode->get().GetName());
}
}
break;
@@ -3133,7 +3103,7 @@ private:
break;
}
case OpCode::Type::HalfSetPredicate: {
- ASSERT_MSG(instr.hsetp2.ftz == 0, "Unimplemented");
+ UNIMPLEMENTED_IF(instr.hsetp2.ftz != 0);
const std::string op_a =
GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.hsetp2.type_a,
@@ -3178,6 +3148,9 @@ private:
break;
}
case OpCode::Type::PredicateSetRegister: {
+ UNIMPLEMENTED_IF_MSG(instr.generates_cc,
+ "Condition codes generation in PSET is not implemented");
+
const std::string op_a =
GetPredicateCondition(instr.pset.pred12, instr.pset.neg_pred12 != 0);
const std::string op_b =
@@ -3198,12 +3171,6 @@ private:
const std::string value = '(' + result + ") ? 1.0 : 0.0";
regs.SetRegisterToFloat(instr.gpr0, 0, value, 1, 1);
}
-
- if (instr.generates_cc) {
- LOG_CRITICAL(HW_GPU, "PSET Generates an unhandled Control Code");
- UNREACHABLE();
- }
-
break;
}
case OpCode::Type::PredicateSetPredicate: {
@@ -3241,25 +3208,51 @@ private:
const std::string pred =
GetPredicateCondition(instr.csetp.pred39, instr.csetp.neg_pred39 != 0);
const std::string combiner = GetPredicateCombiner(instr.csetp.op);
- const std::string control_code = regs.GetControlCode(instr.csetp.cc);
+ const std::string condition_code = regs.GetConditionCode(instr.csetp.cc);
if (instr.csetp.pred3 != static_cast<u64>(Pred::UnusedIndex)) {
SetPredicate(instr.csetp.pred3,
- '(' + control_code + ") " + combiner + " (" + pred + ')');
+ '(' + condition_code + ") " + combiner + " (" + pred + ')');
}
if (instr.csetp.pred0 != static_cast<u64>(Pred::UnusedIndex)) {
SetPredicate(instr.csetp.pred0,
- "!(" + control_code + ") " + combiner + " (" + pred + ')');
+ "!(" + condition_code + ") " + combiner + " (" + pred + ')');
}
break;
}
default: {
- LOG_CRITICAL(HW_GPU, "Unhandled predicate instruction: {}",
- opcode->get().GetName());
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled predicate instruction: {}", opcode->get().GetName());
}
}
break;
}
+ case OpCode::Type::RegisterSetPredicate: {
+ UNIMPLEMENTED_IF(instr.r2p.mode != Tegra::Shader::R2pMode::Pr);
+
+ const std::string apply_mask = [&]() {
+ switch (opcode->get().GetId()) {
+ case OpCode::Id::R2P_IMM:
+ return std::to_string(instr.r2p.immediate_mask);
+ default:
+ UNREACHABLE();
+ }
+ }();
+ const std::string mask = '(' + regs.GetRegisterAsInteger(instr.gpr8, 0, false) +
+ " >> " + std::to_string(instr.r2p.byte) + ')';
+
+ constexpr u64 programmable_preds = 7;
+ for (u64 pred = 0; pred < programmable_preds; ++pred) {
+ const auto shift = std::to_string(1 << pred);
+
+ shader.AddLine("if ((" + apply_mask + " & " + shift + ") != 0) {");
+ ++shader.scope;
+
+ SetPredicate(pred, '(' + mask + " & " + shift + ") != 0");
+
+ --shader.scope;
+ shader.AddLine('}');
+ }
+ break;
+ }
case OpCode::Type::FloatSet: {
const std::string op_a = GetOperandAbsNeg(regs.GetRegisterAsFloat(instr.gpr8),
instr.fset.abs_a != 0, instr.fset.neg_a != 0);
@@ -3297,6 +3290,10 @@ private:
regs.SetRegisterToInteger(instr.gpr0, false, 0, predicate + " ? 0xFFFFFFFF : 0", 1,
1);
}
+ if (instr.generates_cc.Value() != 0) {
+ regs.SetInternalFlag(InternalFlag::ZeroFlag, predicate);
+ LOG_WARNING(HW_GPU, "FSET Condition Code is incomplete");
+ }
break;
}
case OpCode::Type::IntegerSet: {
@@ -3335,7 +3332,7 @@ private:
break;
}
case OpCode::Type::HalfSet: {
- ASSERT_MSG(instr.hset2.ftz == 0, "Unimplemented");
+ UNIMPLEMENTED_IF(instr.hset2.ftz != 0);
const std::string op_a =
GetHalfFloat(regs.GetRegisterAsInteger(instr.gpr8, 0, false), instr.hset2.type_a,
@@ -3379,15 +3376,17 @@ private:
break;
}
case OpCode::Type::Xmad: {
- ASSERT_MSG(!instr.xmad.sign_a, "Unimplemented");
- ASSERT_MSG(!instr.xmad.sign_b, "Unimplemented");
+ UNIMPLEMENTED_IF(instr.xmad.sign_a);
+ UNIMPLEMENTED_IF(instr.xmad.sign_b);
+ UNIMPLEMENTED_IF_MSG(instr.generates_cc,
+ "Condition codes generation in XMAD is not implemented");
std::string op_a{regs.GetRegisterAsInteger(instr.gpr8, 0, instr.xmad.sign_a)};
std::string op_b;
std::string op_c;
// TODO(bunnei): Needs to be fixed once op_a or op_b is signed
- ASSERT_MSG(instr.xmad.sign_a == instr.xmad.sign_b, "Unimplemented");
+ UNIMPLEMENTED_IF(instr.xmad.sign_a != instr.xmad.sign_b);
const bool is_signed{instr.xmad.sign_a == 1};
bool is_merge{};
@@ -3420,8 +3419,7 @@ private:
break;
}
default: {
- LOG_CRITICAL(HW_GPU, "Unhandled XMAD instruction: {}", opcode->get().GetName());
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled XMAD instruction: {}", opcode->get().GetName());
}
}
@@ -3457,9 +3455,8 @@ private:
op_c = "((" + op_c + ") + (" + src2 + "<< 16))";
break;
default: {
- LOG_CRITICAL(HW_GPU, "Unhandled XMAD mode: {}",
- static_cast<u32>(instr.xmad.mode.Value()));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled XMAD mode: {}",
+ static_cast<u32>(instr.xmad.mode.Value()));
}
}
@@ -3469,25 +3466,19 @@ private:
}
regs.SetRegisterToInteger(instr.gpr0, is_signed, 0, sum, 1, 1);
- if (instr.generates_cc) {
- LOG_CRITICAL(HW_GPU, "XMAD Generates an unhandled Control Code");
- UNREACHABLE();
- }
break;
}
default: {
switch (opcode->get().GetId()) {
case OpCode::Id::EXIT: {
+ const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
+ UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T,
+ "EXIT condition code used: {}", static_cast<u32>(cc));
+
if (stage == Maxwell3D::Regs::ShaderStage::Fragment) {
EmitFragmentOutputsWrite();
}
- const Tegra::Shader::ControlCode cc = instr.flow_control_code;
- if (cc != Tegra::Shader::ControlCode::T) {
- LOG_CRITICAL(HW_GPU, "EXIT Control Code used: {}", static_cast<u32>(cc));
- UNREACHABLE();
- }
-
switch (instr.flow.cond) {
case Tegra::Shader::FlowCondition::Always:
shader.AddLine("return true;");
@@ -3502,26 +3493,24 @@ private:
case Tegra::Shader::FlowCondition::Fcsm_Tr:
// TODO(bunnei): What is this used for? If we assume this conditon is not
// satisifed, dual vertex shaders in Farming Simulator make more sense
- LOG_CRITICAL(HW_GPU, "Skipping unknown FlowCondition::Fcsm_Tr");
+ UNIMPLEMENTED_MSG("Skipping unknown FlowCondition::Fcsm_Tr");
break;
default:
- LOG_CRITICAL(HW_GPU, "Unhandled flow condition: {}",
- static_cast<u32>(instr.flow.cond.Value()));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled flow condition: {}",
+ static_cast<u32>(instr.flow.cond.Value()));
}
break;
}
case OpCode::Id::KIL: {
- ASSERT(instr.flow.cond == Tegra::Shader::FlowCondition::Always);
+ UNIMPLEMENTED_IF(instr.flow.cond != Tegra::Shader::FlowCondition::Always);
+
+ const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
+ UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T,
+ "KIL condition code used: {}", static_cast<u32>(cc));
// Enclose "discard" in a conditional, so that GLSL compilation does not complain
// about unexecuted instructions that may follow this.
- const Tegra::Shader::ControlCode cc = instr.flow_control_code;
- if (cc != Tegra::Shader::ControlCode::T) {
- LOG_CRITICAL(HW_GPU, "KIL Control Code used: {}", static_cast<u32>(cc));
- UNREACHABLE();
- }
shader.AddLine("if (true) {");
++shader.scope;
shader.AddLine("discard;");
@@ -3531,7 +3520,8 @@ private:
break;
}
case OpCode::Id::OUT_R: {
- ASSERT(instr.gpr20.Value() == Register::ZeroIndex);
+ UNIMPLEMENTED_IF_MSG(instr.gpr20.Value() != Register::ZeroIndex,
+ "Stream buffer is not supported");
ASSERT_MSG(stage == Maxwell3D::Regs::ShaderStage::Geometry,
"OUT is expected to be used in a geometry shader.");
@@ -3557,19 +3547,23 @@ private:
regs.SetRegisterToInteger(instr.gpr0, false, 0, "0u", 1, 1);
break;
}
+ case Tegra::Shader::SystemVariable::Ydirection: {
+ // Config pack's third value is Y_NEGATE's state.
+ regs.SetRegisterToFloat(instr.gpr0, 0, "uintBitsToFloat(config_pack[2])", 1, 1);
+ break;
+ }
default: {
- LOG_CRITICAL(HW_GPU, "Unhandled system move: {}",
- static_cast<u32>(instr.sys20.Value()));
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unhandled system move: {}",
+ static_cast<u32>(instr.sys20.Value()));
}
}
break;
}
case OpCode::Id::ISBERD: {
- ASSERT(instr.isberd.o == 0);
- ASSERT(instr.isberd.skew == 0);
- ASSERT(instr.isberd.shift == Tegra::Shader::IsberdShift::None);
- ASSERT(instr.isberd.mode == Tegra::Shader::IsberdMode::None);
+ UNIMPLEMENTED_IF(instr.isberd.o != 0);
+ UNIMPLEMENTED_IF(instr.isberd.skew != 0);
+ UNIMPLEMENTED_IF(instr.isberd.shift != Tegra::Shader::IsberdShift::None);
+ UNIMPLEMENTED_IF(instr.isberd.mode != Tegra::Shader::IsberdMode::None);
ASSERT_MSG(stage == Maxwell3D::Regs::ShaderStage::Geometry,
"ISBERD is expected to be used in a geometry shader.");
LOG_WARNING(HW_GPU, "ISBERD instruction is incomplete");
@@ -3577,15 +3571,21 @@ private:
break;
}
case OpCode::Id::BRA: {
- ASSERT_MSG(instr.bra.constant_buffer == 0,
- "BRA with constant buffers are not implemented");
- const Tegra::Shader::ControlCode cc = instr.flow_control_code;
- if (cc != Tegra::Shader::ControlCode::T) {
- LOG_CRITICAL(HW_GPU, "BRA Control Code used: {}", static_cast<u32>(cc));
- UNREACHABLE();
- }
+ UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0,
+ "BRA with constant buffers are not implemented");
+
+ const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
const u32 target = offset + instr.bra.GetBranchTarget();
- shader.AddLine("{ jmp_to = " + std::to_string(target) + "u; break; }");
+ if (cc != Tegra::Shader::ConditionCode::T) {
+ const std::string condition_code = regs.GetConditionCode(cc);
+ shader.AddLine("if (" + condition_code + "){");
+ shader.scope++;
+ shader.AddLine("{ jmp_to = " + std::to_string(target) + "u; break; }");
+ shader.scope--;
+ shader.AddLine('}');
+ } else {
+ shader.AddLine("{ jmp_to = " + std::to_string(target) + "u; break; }");
+ }
break;
}
case OpCode::Id::IPA: {
@@ -3606,7 +3606,8 @@ private:
// The SSY opcode tells the GPU where to re-converge divergent execution paths, it
// sets the target of the jump that the SYNC instruction will make. The SSY opcode
// has a similar structure to the BRA opcode.
- ASSERT_MSG(instr.bra.constant_buffer == 0, "Constant buffer flow is not supported");
+ UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0,
+ "Constant buffer flow is not supported");
const u32 target = offset + instr.bra.GetBranchTarget();
EmitPushToFlowStack(target);
@@ -3616,29 +3617,28 @@ private:
// PBK pushes to a stack the address where BRK will jump to. This shares stack with
// SSY but using SYNC on a PBK address will kill the shader execution. We don't
// emulate this because it's very unlikely a driver will emit such invalid shader.
- ASSERT_MSG(instr.bra.constant_buffer == 0, "Constant buffer PBK is not supported");
+ UNIMPLEMENTED_IF_MSG(instr.bra.constant_buffer != 0,
+ "Constant buffer PBK is not supported");
const u32 target = offset + instr.bra.GetBranchTarget();
EmitPushToFlowStack(target);
break;
}
case OpCode::Id::SYNC: {
+ const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
+ UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T,
+ "SYNC condition code used: {}", static_cast<u32>(cc));
+
// The SYNC opcode jumps to the address previously set by the SSY opcode
- const Tegra::Shader::ControlCode cc = instr.flow_control_code;
- if (cc != Tegra::Shader::ControlCode::T) {
- LOG_CRITICAL(HW_GPU, "SYNC Control Code used: {}", static_cast<u32>(cc));
- UNREACHABLE();
- }
EmitPopFromFlowStack();
break;
}
case OpCode::Id::BRK: {
// The BRK opcode jumps to the address previously set by the PBK opcode
- const Tegra::Shader::ControlCode cc = instr.flow_control_code;
- if (cc != Tegra::Shader::ControlCode::T) {
- LOG_CRITICAL(HW_GPU, "BRK Control Code used: {}", static_cast<u32>(cc));
- UNREACHABLE();
- }
+ const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
+ UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T,
+ "BRK condition code used: {}", static_cast<u32>(cc));
+
EmitPopFromFlowStack();
break;
}
@@ -3649,6 +3649,9 @@ private:
break;
}
case OpCode::Id::VMAD: {
+ UNIMPLEMENTED_IF_MSG(instr.generates_cc,
+ "Condition codes generation in VMAD is not implemented");
+
const bool result_signed = instr.video.signed_a == 1 || instr.video.signed_b == 1;
const std::string op_a = GetVideoOperandA(instr);
const std::string op_b = GetVideoOperandB(instr);
@@ -3668,11 +3671,6 @@ private:
regs.SetRegisterToInteger(instr.gpr0, result_signed, 1, result, 1, 1,
instr.vmad.saturate == 1, 0, Register::Size::Word,
instr.vmad.cc);
- if (instr.generates_cc) {
- LOG_CRITICAL(HW_GPU, "VMAD Generates an unhandled Control Code");
- UNREACHABLE();
- }
-
break;
}
case OpCode::Id::VSETP: {
@@ -3699,10 +3697,7 @@ private:
}
break;
}
- default: {
- LOG_CRITICAL(HW_GPU, "Unhandled instruction: {}", opcode->get().GetName());
- UNREACHABLE();
- }
+ default: { UNIMPLEMENTED_MSG("Unhandled instruction: {}", opcode->get().GetName()); }
}
break;
@@ -3827,6 +3822,7 @@ private:
Maxwell3D::Regs::ShaderStage stage;
const std::string& suffix;
u64 local_memory_size;
+ std::size_t shader_length;
ShaderWriter shader;
ShaderWriter declarations;
@@ -3845,9 +3841,10 @@ std::optional<ProgramResult> DecompileProgram(const ProgramCode& program_code, u
Maxwell3D::Regs::ShaderStage stage,
const std::string& suffix) {
try {
- const auto subroutines =
- ControlFlowAnalyzer(program_code, main_offset, suffix).GetSubroutines();
- GLSLGenerator generator(subroutines, program_code, main_offset, stage, suffix);
+ ControlFlowAnalyzer analyzer(program_code, main_offset, suffix);
+ const auto subroutines = analyzer.GetSubroutines();
+ GLSLGenerator generator(subroutines, program_code, main_offset, stage, suffix,
+ analyzer.GetShaderLength());
return ProgramResult{generator.GetShaderCode(), generator.GetEntries()};
} catch (const DecompileFail& exception) {
LOG_ERROR(HW_GPU, "Shader decompilation failed: {}", exception.what());
@@ -3855,4 +3852,4 @@ std::optional<ProgramResult> DecompileProgram(const ProgramCode& program_code, u
return {};
}
-} // namespace OpenGL::GLShader::Decompiler
+} // namespace OpenGL::GLShader::Decompiler \ No newline at end of file
diff --git a/src/video_core/renderer_opengl/gl_shader_gen.cpp b/src/video_core/renderer_opengl/gl_shader_gen.cpp
index eea090e52..23ed91e27 100644
--- a/src/video_core/renderer_opengl/gl_shader_gen.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_gen.cpp
@@ -24,8 +24,7 @@ layout (location = 0) out vec4 position;
layout(std140) uniform vs_config {
vec4 viewport_flip;
- uvec4 instance_id;
- uvec4 flip_stage;
+ uvec4 config_pack; // instance_id, flip_stage, y_direction, padding
uvec4 alpha_test;
};
)";
@@ -63,7 +62,8 @@ void main() {
out += R"(
// Check if the flip stage is VertexB
- if (flip_stage[0] == 1) {
+ // Config pack's second value is flip_stage
+ if (config_pack[1] == 1) {
// Viewport can be flipped, which is unsupported by glViewport
position.xy *= viewport_flip.xy;
}
@@ -71,7 +71,7 @@ void main() {
// TODO(bunnei): This is likely a hack, position.w should be interpolated as 1.0
// For now, this is here to bring order in lieu of proper emulation
- if (flip_stage[0] == 1) {
+ if (config_pack[1] == 1) {
position.w = 1.0;
}
}
@@ -101,8 +101,7 @@ layout (location = 0) out vec4 position;
layout (std140) uniform gs_config {
vec4 viewport_flip;
- uvec4 instance_id;
- uvec4 flip_stage;
+ uvec4 config_pack; // instance_id, flip_stage, y_direction, padding
uvec4 alpha_test;
};
@@ -139,8 +138,7 @@ layout (location = 0) in vec4 position;
layout (std140) uniform fs_config {
vec4 viewport_flip;
- uvec4 instance_id;
- uvec4 flip_stage;
+ uvec4 config_pack; // instance_id, flip_stage, y_direction, padding
uvec4 alpha_test;
};
diff --git a/src/video_core/renderer_opengl/gl_shader_gen.h b/src/video_core/renderer_opengl/gl_shader_gen.h
index 520b9d4e3..4fa6d7612 100644
--- a/src/video_core/renderer_opengl/gl_shader_gen.h
+++ b/src/video_core/renderer_opengl/gl_shader_gen.h
@@ -163,6 +163,8 @@ private:
struct ShaderEntries {
std::vector<ConstBufferEntry> const_buffer_entries;
std::vector<SamplerEntry> texture_samplers;
+ std::array<bool, Tegra::Engines::Maxwell3D::Regs::NumClipDistances> clip_distances;
+ std::size_t shader_length;
};
using ProgramResult = std::pair<std::string, ShaderEntries>;
diff --git a/src/video_core/renderer_opengl/gl_shader_manager.cpp b/src/video_core/renderer_opengl/gl_shader_manager.cpp
index 8b8869ecb..6a30c28d2 100644
--- a/src/video_core/renderer_opengl/gl_shader_manager.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_manager.cpp
@@ -27,16 +27,18 @@ void MaxwellUniformData::SetFromRegs(const Maxwell3D::State::ShaderStageInfo& sh
alpha_test.func = func;
alpha_test.ref = regs.alpha_test_ref;
- // We only assign the instance to the first component of the vector, the rest is just padding.
- instance_id[0] = state.current_instance;
+ instance_id = state.current_instance;
// Assign in which stage the position has to be flipped
// (the last stage before the fragment shader).
if (gpu.regs.shader_config[static_cast<u32>(Maxwell3D::Regs::ShaderProgram::Geometry)].enable) {
- flip_stage[0] = static_cast<u32>(Maxwell3D::Regs::ShaderProgram::Geometry);
+ flip_stage = static_cast<u32>(Maxwell3D::Regs::ShaderProgram::Geometry);
} else {
- flip_stage[0] = static_cast<u32>(Maxwell3D::Regs::ShaderProgram::VertexB);
+ flip_stage = static_cast<u32>(Maxwell3D::Regs::ShaderProgram::VertexB);
}
+
+ // Y_NEGATE controls what value S2R returns for the Y_DIRECTION system value.
+ y_direction = regs.screen_y_control.y_negate == 0 ? 1.f : -1.f;
}
} // namespace OpenGL::GLShader
diff --git a/src/video_core/renderer_opengl/gl_shader_manager.h b/src/video_core/renderer_opengl/gl_shader_manager.h
index 2a069cdd8..4970aafed 100644
--- a/src/video_core/renderer_opengl/gl_shader_manager.h
+++ b/src/video_core/renderer_opengl/gl_shader_manager.h
@@ -21,8 +21,11 @@ using Tegra::Engines::Maxwell3D;
struct MaxwellUniformData {
void SetFromRegs(const Maxwell3D::State::ShaderStageInfo& shader_stage);
alignas(16) GLvec4 viewport_flip;
- alignas(16) GLuvec4 instance_id;
- alignas(16) GLuvec4 flip_stage;
+ struct alignas(16) {
+ GLuint instance_id;
+ GLuint flip_stage;
+ GLfloat y_direction;
+ };
struct alignas(16) {
GLuint enabled;
GLuint func;
@@ -30,7 +33,7 @@ struct MaxwellUniformData {
GLuint padding;
} alpha_test;
};
-static_assert(sizeof(MaxwellUniformData) == 64, "MaxwellUniformData structure size is incorrect");
+static_assert(sizeof(MaxwellUniformData) == 48, "MaxwellUniformData structure size is incorrect");
static_assert(sizeof(MaxwellUniformData) < 16384,
"MaxwellUniformData structure must be less than 16kb as per the OpenGL spec");
@@ -57,6 +60,17 @@ public:
}
void ApplyTo(OpenGLState& state) {
+ UpdatePipeline();
+ state.draw.shader_program = 0;
+ state.draw.program_pipeline = pipeline.handle;
+ state.geometry_shaders.enabled = (gs != 0);
+ }
+
+private:
+ void UpdatePipeline() {
+ // Avoid updating the pipeline when values have no changed
+ if (old_vs == vs && old_fs == fs && old_gs == gs)
+ return;
// Workaround for AMD bug
glUseProgramStages(pipeline.handle,
GL_VERTEX_SHADER_BIT | GL_GEOMETRY_SHADER_BIT | GL_FRAGMENT_SHADER_BIT,
@@ -65,13 +79,16 @@ public:
glUseProgramStages(pipeline.handle, GL_VERTEX_SHADER_BIT, vs);
glUseProgramStages(pipeline.handle, GL_GEOMETRY_SHADER_BIT, gs);
glUseProgramStages(pipeline.handle, GL_FRAGMENT_SHADER_BIT, fs);
- state.draw.shader_program = 0;
- state.draw.program_pipeline = pipeline.handle;
+
+ // Update the old values
+ old_vs = vs;
+ old_fs = fs;
+ old_gs = gs;
}
-private:
OGLPipeline pipeline;
GLuint vs{}, fs{}, gs{};
+ GLuint old_vs{}, old_fs{}, old_gs{};
};
} // namespace OpenGL::GLShader
diff --git a/src/video_core/renderer_opengl/gl_state.cpp b/src/video_core/renderer_opengl/gl_state.cpp
index 2635f2b0c..dc0a5ed5e 100644
--- a/src/video_core/renderer_opengl/gl_state.cpp
+++ b/src/video_core/renderer_opengl/gl_state.cpp
@@ -14,7 +14,10 @@ OpenGLState OpenGLState::cur_state;
bool OpenGLState::s_rgb_used;
OpenGLState::OpenGLState() {
// These all match default OpenGL values
+ geometry_shaders.enabled = false;
framebuffer_srgb.enabled = false;
+ multisample_control.alpha_to_coverage = false;
+ multisample_control.alpha_to_one = false;
cull.enabled = false;
cull.mode = GL_BACK;
cull.front_face = GL_CCW;
@@ -50,12 +53,12 @@ OpenGLState::OpenGLState() {
item.height = 0;
item.depth_range_near = 0.0f;
item.depth_range_far = 1.0f;
+ item.scissor.enabled = false;
+ item.scissor.x = 0;
+ item.scissor.y = 0;
+ item.scissor.width = 0;
+ item.scissor.height = 0;
}
- scissor.enabled = false;
- scissor.x = 0;
- scissor.y = 0;
- scissor.width = 0;
- scissor.height = 0;
for (auto& item : blend) {
item.enabled = true;
item.rgb_equation = GL_FUNC_ADD;
@@ -88,6 +91,15 @@ OpenGLState::OpenGLState() {
clip_distance = {};
point.size = 1;
+ fragment_color_clamp.enabled = false;
+ depth_clamp.far_plane = false;
+ depth_clamp.near_plane = false;
+ polygon_offset.fill_enable = false;
+ polygon_offset.line_enable = false;
+ polygon_offset.point_enable = false;
+ polygon_offset.factor = 0.0f;
+ polygon_offset.units = 0.0f;
+ polygon_offset.clamp = 0.0f;
}
void OpenGLState::ApplyDefaultState() {
@@ -136,7 +148,7 @@ void OpenGLState::ApplyCulling() const {
}
void OpenGLState::ApplyColorMask() const {
- if (GLAD_GL_ARB_viewport_array) {
+ if (independant_blend.enabled) {
for (size_t i = 0; i < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets; i++) {
const auto& updated = color_mask[i];
const auto& current = cur_state.color_mask[i];
@@ -229,37 +241,61 @@ void OpenGLState::ApplyStencilTest() const {
config_stencil(GL_BACK, stencil.back, cur_state.stencil.back);
}
}
-
-void OpenGLState::ApplyScissor() const {
- const bool scissor_changed = scissor.enabled != cur_state.scissor.enabled;
- if (scissor_changed) {
- if (scissor.enabled) {
- glEnable(GL_SCISSOR_TEST);
- } else {
- glDisable(GL_SCISSOR_TEST);
- }
- }
- if (scissor.enabled &&
- (scissor_changed || scissor.x != cur_state.scissor.x || scissor.y != cur_state.scissor.y ||
- scissor.width != cur_state.scissor.width || scissor.height != cur_state.scissor.height)) {
- glScissor(scissor.x, scissor.y, scissor.width, scissor.height);
+// Viewport does not affects glClearBuffer so emulate viewport using scissor test
+void OpenGLState::EmulateViewportWithScissor() {
+ auto& current = viewports[0];
+ if (current.scissor.enabled) {
+ const GLint left = std::max(current.x, current.scissor.x);
+ const GLint right =
+ std::max(current.x + current.width, current.scissor.x + current.scissor.width);
+ const GLint bottom = std::max(current.y, current.scissor.y);
+ const GLint top =
+ std::max(current.y + current.height, current.scissor.y + current.scissor.height);
+ current.scissor.x = std::max(left, 0);
+ current.scissor.y = std::max(bottom, 0);
+ current.scissor.width = std::max(right - left, 0);
+ current.scissor.height = std::max(top - bottom, 0);
+ } else {
+ current.scissor.enabled = true;
+ current.scissor.x = current.x;
+ current.scissor.y = current.y;
+ current.scissor.width = current.width;
+ current.scissor.height = current.height;
}
}
void OpenGLState::ApplyViewport() const {
- if (GLAD_GL_ARB_viewport_array) {
- for (GLuint i = 0;
- i < static_cast<GLuint>(Tegra::Engines::Maxwell3D::Regs::NumRenderTargets); i++) {
+ if (geometry_shaders.enabled) {
+ for (GLuint i = 0; i < static_cast<GLuint>(Tegra::Engines::Maxwell3D::Regs::NumViewports);
+ i++) {
const auto& current = cur_state.viewports[i];
const auto& updated = viewports[i];
if (updated.x != current.x || updated.y != current.y ||
updated.width != current.width || updated.height != current.height) {
- glViewportIndexedf(i, updated.x, updated.y, updated.width, updated.height);
+ glViewportIndexedf(
+ i, static_cast<GLfloat>(updated.x), static_cast<GLfloat>(updated.y),
+ static_cast<GLfloat>(updated.width), static_cast<GLfloat>(updated.height));
}
if (updated.depth_range_near != current.depth_range_near ||
updated.depth_range_far != current.depth_range_far) {
glDepthRangeIndexed(i, updated.depth_range_near, updated.depth_range_far);
}
+ const bool scissor_changed = updated.scissor.enabled != current.scissor.enabled;
+ if (scissor_changed) {
+ if (updated.scissor.enabled) {
+ glEnablei(GL_SCISSOR_TEST, i);
+ } else {
+ glDisablei(GL_SCISSOR_TEST, i);
+ }
+ }
+ if (updated.scissor.enabled &&
+ (scissor_changed || updated.scissor.x != current.scissor.x ||
+ updated.scissor.y != current.scissor.y ||
+ updated.scissor.width != current.scissor.width ||
+ updated.scissor.height != current.scissor.height)) {
+ glScissorIndexed(i, updated.scissor.x, updated.scissor.y, updated.scissor.width,
+ updated.scissor.height);
+ }
}
} else {
const auto& current = cur_state.viewports[0];
@@ -272,6 +308,21 @@ void OpenGLState::ApplyViewport() const {
updated.depth_range_far != current.depth_range_far) {
glDepthRange(updated.depth_range_near, updated.depth_range_far);
}
+ const bool scissor_changed = updated.scissor.enabled != current.scissor.enabled;
+ if (scissor_changed) {
+ if (updated.scissor.enabled) {
+ glEnable(GL_SCISSOR_TEST);
+ } else {
+ glDisable(GL_SCISSOR_TEST);
+ }
+ }
+ if (updated.scissor.enabled && (scissor_changed || updated.scissor.x != current.scissor.x ||
+ updated.scissor.y != current.scissor.y ||
+ updated.scissor.width != current.scissor.width ||
+ updated.scissor.height != current.scissor.height)) {
+ glScissor(updated.scissor.x, updated.scissor.y, updated.scissor.width,
+ updated.scissor.height);
+ }
}
}
@@ -289,31 +340,20 @@ void OpenGLState::ApplyGlobalBlending() const {
if (!updated.enabled) {
return;
}
- if (updated.separate_alpha) {
- if (blend_changed || updated.src_rgb_func != current.src_rgb_func ||
- updated.dst_rgb_func != current.dst_rgb_func ||
- updated.src_a_func != current.src_a_func || updated.dst_a_func != current.dst_a_func) {
- glBlendFuncSeparate(updated.src_rgb_func, updated.dst_rgb_func, updated.src_a_func,
- updated.dst_a_func);
- }
-
- if (blend_changed || updated.rgb_equation != current.rgb_equation ||
- updated.a_equation != current.a_equation) {
- glBlendEquationSeparate(updated.rgb_equation, updated.a_equation);
- }
- } else {
- if (blend_changed || updated.src_rgb_func != current.src_rgb_func ||
- updated.dst_rgb_func != current.dst_rgb_func) {
- glBlendFunc(updated.src_rgb_func, updated.dst_rgb_func);
- }
+ if (blend_changed || updated.src_rgb_func != current.src_rgb_func ||
+ updated.dst_rgb_func != current.dst_rgb_func || updated.src_a_func != current.src_a_func ||
+ updated.dst_a_func != current.dst_a_func) {
+ glBlendFuncSeparate(updated.src_rgb_func, updated.dst_rgb_func, updated.src_a_func,
+ updated.dst_a_func);
+ }
- if (blend_changed || updated.rgb_equation != current.rgb_equation) {
- glBlendEquation(updated.rgb_equation);
- }
+ if (blend_changed || updated.rgb_equation != current.rgb_equation ||
+ updated.a_equation != current.a_equation) {
+ glBlendEquationSeparate(updated.rgb_equation, updated.a_equation);
}
}
-void OpenGLState::ApplyTargetBlending(int target, bool force) const {
+void OpenGLState::ApplyTargetBlending(std::size_t target, bool force) const {
const Blend& updated = blend[target];
const Blend& current = cur_state.blend[target];
const bool blend_changed = updated.enabled != current.enabled || force;
@@ -327,29 +367,17 @@ void OpenGLState::ApplyTargetBlending(int target, bool force) const {
if (!updated.enabled) {
return;
}
- if (updated.separate_alpha) {
- if (blend_changed || updated.src_rgb_func != current.src_rgb_func ||
- updated.dst_rgb_func != current.dst_rgb_func ||
- updated.src_a_func != current.src_a_func || updated.dst_a_func != current.dst_a_func) {
- glBlendFuncSeparateiARB(static_cast<GLuint>(target), updated.src_rgb_func,
- updated.dst_rgb_func, updated.src_a_func, updated.dst_a_func);
- }
-
- if (blend_changed || updated.rgb_equation != current.rgb_equation ||
- updated.a_equation != current.a_equation) {
- glBlendEquationSeparateiARB(static_cast<GLuint>(target), updated.rgb_equation,
- updated.a_equation);
- }
- } else {
- if (blend_changed || updated.src_rgb_func != current.src_rgb_func ||
- updated.dst_rgb_func != current.dst_rgb_func) {
- glBlendFunciARB(static_cast<GLuint>(target), updated.src_rgb_func,
- updated.dst_rgb_func);
- }
+ if (blend_changed || updated.src_rgb_func != current.src_rgb_func ||
+ updated.dst_rgb_func != current.dst_rgb_func || updated.src_a_func != current.src_a_func ||
+ updated.dst_a_func != current.dst_a_func) {
+ glBlendFuncSeparatei(static_cast<GLuint>(target), updated.src_rgb_func,
+ updated.dst_rgb_func, updated.src_a_func, updated.dst_a_func);
+ }
- if (blend_changed || updated.rgb_equation != current.rgb_equation) {
- glBlendEquationiARB(static_cast<GLuint>(target), updated.rgb_equation);
- }
+ if (blend_changed || updated.rgb_equation != current.rgb_equation ||
+ updated.a_equation != current.a_equation) {
+ glBlendEquationSeparatei(static_cast<GLuint>(target), updated.rgb_equation,
+ updated.a_equation);
}
}
@@ -386,6 +414,55 @@ void OpenGLState::ApplyLogicOp() const {
}
}
+void OpenGLState::ApplyPolygonOffset() const {
+
+ const bool fill_enable_changed =
+ polygon_offset.fill_enable != cur_state.polygon_offset.fill_enable;
+ const bool line_enable_changed =
+ polygon_offset.line_enable != cur_state.polygon_offset.line_enable;
+ const bool point_enable_changed =
+ polygon_offset.point_enable != cur_state.polygon_offset.point_enable;
+ const bool factor_changed = polygon_offset.factor != cur_state.polygon_offset.factor;
+ const bool units_changed = polygon_offset.units != cur_state.polygon_offset.units;
+ const bool clamp_changed = polygon_offset.clamp != cur_state.polygon_offset.clamp;
+
+ if (fill_enable_changed) {
+ if (polygon_offset.fill_enable) {
+ glEnable(GL_POLYGON_OFFSET_FILL);
+ } else {
+ glDisable(GL_POLYGON_OFFSET_FILL);
+ }
+ }
+
+ if (line_enable_changed) {
+ if (polygon_offset.line_enable) {
+ glEnable(GL_POLYGON_OFFSET_LINE);
+ } else {
+ glDisable(GL_POLYGON_OFFSET_LINE);
+ }
+ }
+
+ if (point_enable_changed) {
+ if (polygon_offset.point_enable) {
+ glEnable(GL_POLYGON_OFFSET_POINT);
+ } else {
+ glDisable(GL_POLYGON_OFFSET_POINT);
+ }
+ }
+
+ if ((polygon_offset.fill_enable || polygon_offset.line_enable || polygon_offset.point_enable) &&
+ (factor_changed || units_changed || clamp_changed)) {
+
+ if (GLAD_GL_EXT_polygon_offset_clamp && polygon_offset.clamp != 0) {
+ glPolygonOffsetClamp(polygon_offset.factor, polygon_offset.units, polygon_offset.clamp);
+ } else {
+ glPolygonOffset(polygon_offset.factor, polygon_offset.units);
+ UNIMPLEMENTED_IF_MSG(polygon_offset.clamp != 0,
+ "Unimplemented Depth polygon offset clamp.");
+ }
+ }
+}
+
void OpenGLState::ApplyTextures() const {
for (std::size_t i = 0; i < std::size(texture_units); ++i) {
const auto& texture_unit = texture_units[i];
@@ -449,6 +526,21 @@ void OpenGLState::ApplyVertexBufferState() const {
}
}
+void OpenGLState::ApplyDepthClamp() const {
+ if (depth_clamp.far_plane == cur_state.depth_clamp.far_plane &&
+ depth_clamp.near_plane == cur_state.depth_clamp.near_plane) {
+ return;
+ }
+ if (depth_clamp.far_plane != depth_clamp.near_plane) {
+ UNIMPLEMENTED_MSG("Unimplemented Depth Clamp Separation!");
+ }
+ if (depth_clamp.far_plane || depth_clamp.near_plane) {
+ glEnable(GL_DEPTH_CLAMP);
+ } else {
+ glDisable(GL_DEPTH_CLAMP);
+ }
+}
+
void OpenGLState::Apply() const {
ApplyFramebufferState();
ApplyVertexBufferState();
@@ -480,9 +572,27 @@ void OpenGLState::Apply() const {
if (point.size != cur_state.point.size) {
glPointSize(point.size);
}
+ if (fragment_color_clamp.enabled != cur_state.fragment_color_clamp.enabled) {
+ glClampColor(GL_CLAMP_FRAGMENT_COLOR_ARB,
+ fragment_color_clamp.enabled ? GL_TRUE : GL_FALSE);
+ }
+ if (multisample_control.alpha_to_coverage != cur_state.multisample_control.alpha_to_coverage) {
+ if (multisample_control.alpha_to_coverage) {
+ glEnable(GL_SAMPLE_ALPHA_TO_COVERAGE);
+ } else {
+ glDisable(GL_SAMPLE_ALPHA_TO_COVERAGE);
+ }
+ }
+ if (multisample_control.alpha_to_one != cur_state.multisample_control.alpha_to_one) {
+ if (multisample_control.alpha_to_one) {
+ glEnable(GL_SAMPLE_ALPHA_TO_ONE);
+ } else {
+ glDisable(GL_SAMPLE_ALPHA_TO_ONE);
+ }
+ }
+ ApplyDepthClamp();
ApplyColorMask();
ApplyViewport();
- ApplyScissor();
ApplyStencilTest();
ApplySRgb();
ApplyCulling();
@@ -492,6 +602,7 @@ void OpenGLState::Apply() const {
ApplyLogicOp();
ApplyTextures();
ApplySamplers();
+ ApplyPolygonOffset();
cur_state = *this;
}
diff --git a/src/video_core/renderer_opengl/gl_state.h b/src/video_core/renderer_opengl/gl_state.h
index eacca0b9c..439bfbc98 100644
--- a/src/video_core/renderer_opengl/gl_state.h
+++ b/src/video_core/renderer_opengl/gl_state.h
@@ -40,6 +40,24 @@ public:
} framebuffer_srgb;
struct {
+ bool alpha_to_coverage; // GL_ALPHA_TO_COVERAGE
+ bool alpha_to_one; // GL_ALPHA_TO_ONE
+ } multisample_control;
+
+ struct {
+ bool enabled; // GL_CLAMP_FRAGMENT_COLOR_ARB
+ } fragment_color_clamp;
+
+ struct {
+ bool far_plane;
+ bool near_plane;
+ } depth_clamp; // GL_DEPTH_CLAMP
+
+ struct {
+ bool enabled; // viewports arrays are only supported when geometry shaders are enabled.
+ } geometry_shaders;
+
+ struct {
bool enabled; // GL_CULL_FACE
GLenum mode; // GL_CULL_FACE_MODE
GLenum front_face; // GL_FRONT_FACE
@@ -79,7 +97,6 @@ public:
struct Blend {
bool enabled; // GL_BLEND
- bool separate_alpha; // Independent blend enabled
GLenum rgb_equation; // GL_BLEND_EQUATION_RGB
GLenum a_equation; // GL_BLEND_EQUATION_ALPHA
GLenum src_rgb_func; // GL_BLEND_SRC_RGB
@@ -144,28 +161,36 @@ public:
} draw;
struct viewport {
- GLfloat x;
- GLfloat y;
- GLfloat width;
- GLfloat height;
+ GLint x;
+ GLint y;
+ GLint width;
+ GLint height;
GLfloat depth_range_near; // GL_DEPTH_RANGE
GLfloat depth_range_far; // GL_DEPTH_RANGE
+ struct {
+ bool enabled; // GL_SCISSOR_TEST
+ GLint x;
+ GLint y;
+ GLsizei width;
+ GLsizei height;
+ } scissor;
};
- std::array<viewport, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> viewports;
-
- struct {
- bool enabled; // GL_SCISSOR_TEST
- GLint x;
- GLint y;
- GLsizei width;
- GLsizei height;
- } scissor;
+ std::array<viewport, Tegra::Engines::Maxwell3D::Regs::NumViewports> viewports;
struct {
float size; // GL_POINT_SIZE
} point;
- std::array<bool, 2> clip_distance; // GL_CLIP_DISTANCE
+ struct {
+ bool point_enable;
+ bool line_enable;
+ bool fill_enable;
+ GLfloat units;
+ GLfloat factor;
+ GLfloat clamp;
+ } polygon_offset;
+
+ std::array<bool, 8> clip_distance; // GL_CLIP_DISTANCE
OpenGLState();
@@ -195,6 +220,7 @@ public:
OpenGLState& ResetBuffer(GLuint handle);
OpenGLState& ResetVertexArray(GLuint handle);
OpenGLState& ResetFramebuffer(GLuint handle);
+ void EmulateViewportWithScissor();
private:
static OpenGLState cur_state;
@@ -208,13 +234,14 @@ private:
void ApplyPrimitiveRestart() const;
void ApplyStencilTest() const;
void ApplyViewport() const;
- void ApplyTargetBlending(int target, bool force) const;
+ void ApplyTargetBlending(std::size_t target, bool force) const;
void ApplyGlobalBlending() const;
void ApplyBlending() const;
void ApplyLogicOp() const;
void ApplyTextures() const;
void ApplySamplers() const;
- void ApplyScissor() const;
+ void ApplyDepthClamp() const;
+ void ApplyPolygonOffset() const;
};
} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h
index 3ce2cc6d2..a8833c06e 100644
--- a/src/video_core/renderer_opengl/maxwell_to_gl.h
+++ b/src/video_core/renderer_opengl/maxwell_to_gl.h
@@ -180,6 +180,12 @@ inline GLenum WrapMode(Tegra::Texture::WrapMode wrap_mode) {
return GL_CLAMP_TO_BORDER;
case Tegra::Texture::WrapMode::MirrorOnceClampToEdge:
return GL_MIRROR_CLAMP_TO_EDGE;
+ case Tegra::Texture::WrapMode::MirrorOnceBorder:
+ if (GL_EXT_texture_mirror_clamp) {
+ return GL_MIRROR_CLAMP_TO_BORDER_EXT;
+ } else {
+ return GL_MIRROR_CLAMP_TO_EDGE;
+ }
}
LOG_ERROR(Render_OpenGL, "Unimplemented texture wrap mode={}", static_cast<u32>(wrap_mode));
return GL_REPEAT;
@@ -212,14 +218,19 @@ inline GLenum DepthCompareFunc(Tegra::Texture::DepthCompareFunc func) {
inline GLenum BlendEquation(Maxwell::Blend::Equation equation) {
switch (equation) {
case Maxwell::Blend::Equation::Add:
+ case Maxwell::Blend::Equation::AddGL:
return GL_FUNC_ADD;
case Maxwell::Blend::Equation::Subtract:
+ case Maxwell::Blend::Equation::SubtractGL:
return GL_FUNC_SUBTRACT;
case Maxwell::Blend::Equation::ReverseSubtract:
+ case Maxwell::Blend::Equation::ReverseSubtractGL:
return GL_FUNC_REVERSE_SUBTRACT;
case Maxwell::Blend::Equation::Min:
+ case Maxwell::Blend::Equation::MinGL:
return GL_MIN;
case Maxwell::Blend::Equation::Max:
+ case Maxwell::Blend::Equation::MaxGL:
return GL_MAX;
}
LOG_ERROR(Render_OpenGL, "Unimplemented blend equation={}", static_cast<u32>(equation));
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp
index ea38da932..4fd0d66c5 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.cpp
+++ b/src/video_core/renderer_opengl/renderer_opengl.cpp
@@ -19,9 +19,9 @@
#include "core/settings.h"
#include "core/telemetry_session.h"
#include "core/tracer/recorder.h"
+#include "video_core/morton.h"
#include "video_core/renderer_opengl/gl_rasterizer.h"
#include "video_core/renderer_opengl/renderer_opengl.h"
-#include "video_core/utils.h"
namespace OpenGL {
@@ -304,6 +304,12 @@ void RendererOpenGL::ConfigureFramebufferTexture(TextureInfo& texture,
gl_framebuffer_data.resize(texture.width * texture.height * 4);
break;
default:
+ internal_format = GL_RGBA;
+ texture.gl_format = GL_RGBA;
+ texture.gl_type = GL_UNSIGNED_INT_8_8_8_8_REV;
+ gl_framebuffer_data.resize(texture.width * texture.height * 4);
+ LOG_CRITICAL(Render_OpenGL, "Unknown framebuffer pixel format: {}",
+ static_cast<u32>(framebuffer.pixel_format));
UNREACHABLE();
}
@@ -484,7 +490,7 @@ bool RendererOpenGL::Init() {
Core::Telemetry().AddField(Telemetry::FieldType::UserSystem, "GPU_Model", gpu_model);
Core::Telemetry().AddField(Telemetry::FieldType::UserSystem, "GPU_OpenGL_Version", gl_version);
- if (!GLAD_GL_VERSION_3_3) {
+ if (!GLAD_GL_VERSION_4_3) {
return false;
}