From 1e65da971bf6edd5611e6e409ba1cc4f99e58655 Mon Sep 17 00:00:00 2001 From: Morph <39850852+Morph1984@users.noreply.github.com> Date: Sat, 20 Jun 2020 07:41:55 -0400 Subject: gl_device: Check for GL_EXT_texture_shadow_lod --- src/video_core/renderer_opengl/gl_device.cpp | 2 ++ 1 file changed, 2 insertions(+) (limited to 'src/video_core/renderer_opengl/gl_device.cpp') diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp index b31d604e4..1011c7738 100644 --- a/src/video_core/renderer_opengl/gl_device.cpp +++ b/src/video_core/renderer_opengl/gl_device.cpp @@ -216,6 +216,7 @@ Device::Device() has_shader_ballot = GLAD_GL_ARB_shader_ballot; has_vertex_viewport_layer = GLAD_GL_ARB_shader_viewport_layer_array; has_image_load_formatted = HasExtension(extensions, "GL_EXT_shader_image_load_formatted"); + has_texture_shadow_lod = HasExtension(extensions, "GL_EXT_texture_shadow_lod"); has_astc = IsASTCSupported(); has_variable_aoffi = TestVariableAoffi(); has_component_indexing_bug = is_amd; @@ -245,6 +246,7 @@ Device::Device(std::nullptr_t) { has_shader_ballot = true; has_vertex_viewport_layer = true; has_image_load_formatted = true; + has_texture_shadow_lod = true; has_variable_aoffi = true; } -- cgit v1.2.3 From 73fb3a304b215abce3cfb1c0c5eb2b43740b65ed Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Thu, 18 Jun 2020 03:54:13 -0300 Subject: gl_device: Expose NV_vertex_buffer_unified_memory except on Turing Expose NV_vertex_buffer_unified_memory when the driver supports it. This commit adds a function the determine if a GL_RENDERER is a Turing GPU. This is required because on Turing GPUs Nvidia's driver crashes when the buffer is marked as resident or on DeleteBuffers. Without a synchronous debug output (single threaded driver), it's likely that the driver will crash in the first blocking call. --- src/video_core/renderer_opengl/gl_device.cpp | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) (limited to 'src/video_core/renderer_opengl/gl_device.cpp') diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp index 1011c7738..447a19595 100644 --- a/src/video_core/renderer_opengl/gl_device.cpp +++ b/src/video_core/renderer_opengl/gl_device.cpp @@ -188,16 +188,32 @@ bool IsASTCSupported() { return true; } +/// @brief Returns true when a GL_RENDERER is a Turing GPU +/// @param renderer GL_RENDERER string +bool IsTuring(std::string_view renderer) { + static constexpr std::array TURING_GPUS = { + "GTX 1650", "GTX 1660", "RTX 2060", "RTX 2070", + "RTX 2080", "TITAN RTX", "Quadro RTX 3000", "Quadro RTX 4000", + "Quadro RTX 5000", "Quadro RTX 6000", "Quadro RTX 8000", "Tesla T4", + }; + return std::any_of(TURING_GPUS.begin(), TURING_GPUS.end(), + [renderer](std::string_view candidate) { + return renderer.find(candidate) != std::string_view::npos; + }); +} + } // Anonymous namespace Device::Device() : max_uniform_buffers{BuildMaxUniformBuffers()}, base_bindings{BuildBaseBindings()} { const std::string_view vendor = reinterpret_cast(glGetString(GL_VENDOR)); + const std::string_view renderer = reinterpret_cast(glGetString(GL_RENDERER)); const std::string_view version = reinterpret_cast(glGetString(GL_VERSION)); const std::vector extensions = GetExtensions(); const bool is_nvidia = vendor == "NVIDIA Corporation"; const bool is_amd = vendor == "ATI Technologies Inc."; + const bool is_turing = is_nvidia && IsTuring(renderer); bool disable_fast_buffer_sub_data = false; if (is_nvidia && version == "4.6.0 NVIDIA 443.24") { @@ -221,8 +237,16 @@ Device::Device() has_variable_aoffi = TestVariableAoffi(); has_component_indexing_bug = is_amd; has_precise_bug = TestPreciseBug(); - has_fast_buffer_sub_data = is_nvidia && !disable_fast_buffer_sub_data; has_nv_viewport_array2 = GLAD_GL_NV_viewport_array2; + + // At the moment of writing this, only Nvidia's driver optimizes BufferSubData on exclusive + // uniform buffers as "push constants" + has_fast_buffer_sub_data = is_nvidia && !disable_fast_buffer_sub_data; + + // Nvidia's driver on Turing GPUs randomly crashes when the buffer is made resident, or on + // DeleteBuffers. Disable unified memory on these devices. + has_vertex_buffer_unified_memory = GLAD_GL_NV_vertex_buffer_unified_memory && !is_turing; + use_assembly_shaders = Settings::values.use_assembly_shaders && GLAD_GL_NV_gpu_program5 && GLAD_GL_NV_compute_program5 && GLAD_GL_NV_transform_feedback && GLAD_GL_NV_transform_feedback2; -- cgit v1.2.3 From bc8d3b8f82c06e5d0b5a7c1640ef00b83e826dbf Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Thu, 25 Jun 2020 01:28:45 -0300 Subject: gl_device: Enable NV_vertex_buffer_unified_memory on Turing devices Once we make sure not to corrupt Nvidia's driver, we can safely use resident buffers on Turing devices. See GitHub pull request #4156 --- src/video_core/renderer_opengl/gl_device.cpp | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) (limited to 'src/video_core/renderer_opengl/gl_device.cpp') diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp index 447a19595..bb1375f82 100644 --- a/src/video_core/renderer_opengl/gl_device.cpp +++ b/src/video_core/renderer_opengl/gl_device.cpp @@ -188,20 +188,6 @@ bool IsASTCSupported() { return true; } -/// @brief Returns true when a GL_RENDERER is a Turing GPU -/// @param renderer GL_RENDERER string -bool IsTuring(std::string_view renderer) { - static constexpr std::array TURING_GPUS = { - "GTX 1650", "GTX 1660", "RTX 2060", "RTX 2070", - "RTX 2080", "TITAN RTX", "Quadro RTX 3000", "Quadro RTX 4000", - "Quadro RTX 5000", "Quadro RTX 6000", "Quadro RTX 8000", "Tesla T4", - }; - return std::any_of(TURING_GPUS.begin(), TURING_GPUS.end(), - [renderer](std::string_view candidate) { - return renderer.find(candidate) != std::string_view::npos; - }); -} - } // Anonymous namespace Device::Device() @@ -213,7 +199,6 @@ Device::Device() const bool is_nvidia = vendor == "NVIDIA Corporation"; const bool is_amd = vendor == "ATI Technologies Inc."; - const bool is_turing = is_nvidia && IsTuring(renderer); bool disable_fast_buffer_sub_data = false; if (is_nvidia && version == "4.6.0 NVIDIA 443.24") { @@ -238,15 +223,12 @@ Device::Device() has_component_indexing_bug = is_amd; has_precise_bug = TestPreciseBug(); has_nv_viewport_array2 = GLAD_GL_NV_viewport_array2; + has_vertex_buffer_unified_memory = GLAD_GL_NV_vertex_buffer_unified_memory; // At the moment of writing this, only Nvidia's driver optimizes BufferSubData on exclusive // uniform buffers as "push constants" has_fast_buffer_sub_data = is_nvidia && !disable_fast_buffer_sub_data; - // Nvidia's driver on Turing GPUs randomly crashes when the buffer is made resident, or on - // DeleteBuffers. Disable unified memory on these devices. - has_vertex_buffer_unified_memory = GLAD_GL_NV_vertex_buffer_unified_memory && !is_turing; - use_assembly_shaders = Settings::values.use_assembly_shaders && GLAD_GL_NV_gpu_program5 && GLAD_GL_NV_compute_program5 && GLAD_GL_NV_transform_feedback && GLAD_GL_NV_transform_feedback2; -- cgit v1.2.3 From a927d8be52c343bc1025e5df822c56470eb27919 Mon Sep 17 00:00:00 2001 From: David Marcec Date: Thu, 25 Jun 2020 19:12:56 +1000 Subject: gl_device: Fix IsASTCSupported Other targets were never actually checked --- src/video_core/renderer_opengl/gl_device.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/video_core/renderer_opengl/gl_device.cpp') diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp index 447a19595..b6b6659c1 100644 --- a/src/video_core/renderer_opengl/gl_device.cpp +++ b/src/video_core/renderer_opengl/gl_device.cpp @@ -178,7 +178,7 @@ bool IsASTCSupported() { for (const GLenum format : formats) { for (const GLenum support : required_support) { GLint value; - glGetInternalformativ(GL_TEXTURE_2D, format, support, 1, &value); + glGetInternalformativ(target, format, support, 1, &value); if (value != GL_FULL_SUPPORT) { return false; } -- cgit v1.2.3