From d31dbb1bc12505d503cb97b57518a3e48cb2da11 Mon Sep 17 00:00:00 2001 From: Kelebek1 Date: Wed, 24 Feb 2021 22:04:51 +0000 Subject: Implement glDepthRangeIndexeddNV --- src/video_core/renderer_opengl/gl_device.cpp | 2 ++ 1 file changed, 2 insertions(+) (limited to 'src/video_core/renderer_opengl/gl_device.cpp') diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp index 48d5c4a5e..1ae5f1d62 100644 --- a/src/video_core/renderer_opengl/gl_device.cpp +++ b/src/video_core/renderer_opengl/gl_device.cpp @@ -239,6 +239,7 @@ Device::Device() { has_nv_viewport_array2 = GLAD_GL_NV_viewport_array2; has_vertex_buffer_unified_memory = GLAD_GL_NV_vertex_buffer_unified_memory; has_debugging_tool_attached = IsDebugToolAttached(extensions); + has_depth_buffer_float = HasExtension(extensions, "GL_NV_depth_buffer_float"); // At the moment of writing this, only Nvidia's driver optimizes BufferSubData on exclusive // uniform buffers as "push constants" @@ -275,6 +276,7 @@ Device::Device(std::nullptr_t) { has_image_load_formatted = true; has_texture_shadow_lod = true; has_variable_aoffi = true; + has_depth_buffer_float = true; } bool Device::TestVariableAoffi() { -- cgit v1.2.3