aboutsummaryrefslogtreecommitdiff
path: root/src/video_core
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core')
-rw-r--r--src/video_core/CMakeLists.txt25
-rw-r--r--src/video_core/dma_pusher.cpp57
-rw-r--r--src/video_core/dma_pusher.h5
-rw-r--r--src/video_core/engines/fermi_2d.cpp15
-rw-r--r--src/video_core/engines/fermi_2d.h2
-rw-r--r--src/video_core/engines/kepler_compute.cpp3
-rw-r--r--src/video_core/engines/kepler_compute.h3
-rw-r--r--src/video_core/engines/kepler_memory.cpp11
-rw-r--r--src/video_core/engines/kepler_memory.h10
-rw-r--r--src/video_core/engines/maxwell_3d.cpp83
-rw-r--r--src/video_core/engines/maxwell_3d.h26
-rw-r--r--src/video_core/engines/maxwell_dma.cpp16
-rw-r--r--src/video_core/engines/maxwell_dma.h11
-rw-r--r--src/video_core/engines/shader_bytecode.h32
-rw-r--r--src/video_core/engines/shader_header.h41
-rw-r--r--src/video_core/gpu.cpp11
-rw-r--r--src/video_core/gpu.h69
-rw-r--r--src/video_core/gpu_asynch.cpp37
-rw-r--r--src/video_core/gpu_asynch.h37
-rw-r--r--src/video_core/gpu_synch.cpp37
-rw-r--r--src/video_core/gpu_synch.h29
-rw-r--r--src/video_core/gpu_thread.cpp98
-rw-r--r--src/video_core/gpu_thread.h185
-rw-r--r--src/video_core/morton.cpp315
-rw-r--r--src/video_core/morton.h6
-rw-r--r--src/video_core/rasterizer_cache.h88
-rw-r--r--src/video_core/rasterizer_interface.h12
-rw-r--r--src/video_core/renderer_base.cpp1
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.cpp24
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.h31
-rw-r--r--src/video_core/renderer_opengl/gl_global_cache.cpp20
-rw-r--r--src/video_core/renderer_opengl/gl_global_cache.h17
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp243
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h18
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer_cache.cpp334
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer_cache.h126
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp43
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.h19
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.cpp100
-rw-r--r--src/video_core/renderer_opengl/gl_shader_disk_cache.cpp2
-rw-r--r--src/video_core/renderer_opengl/gl_shader_gen.cpp4
-rw-r--r--src/video_core/renderer_opengl/gl_state.cpp148
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.cpp39
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.h5
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.cpp483
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.h58
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp123
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h104
-rw-r--r--src/video_core/renderer_vulkan/vk_device.cpp13
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.cpp252
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.h87
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_manager.cpp285
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_manager.h180
-rw-r--r--src/video_core/renderer_vulkan/vk_sampler_cache.cpp81
-rw-r--r--src/video_core/renderer_vulkan/vk_sampler_cache.h56
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp60
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.h69
-rw-r--r--src/video_core/renderer_vulkan/vk_stream_buffer.cpp90
-rw-r--r--src/video_core/renderer_vulkan/vk_stream_buffer.h72
-rw-r--r--src/video_core/shader/decode.cpp1
-rw-r--r--src/video_core/shader/decode/memory.cpp495
-rw-r--r--src/video_core/shader/decode/other.cpp15
-rw-r--r--src/video_core/shader/decode/texture.cpp534
-rw-r--r--src/video_core/shader/shader_ir.h5
-rw-r--r--src/video_core/shader/track.cpp10
-rw-r--r--src/video_core/surface.cpp2
-rw-r--r--src/video_core/textures/astc.cpp80
-rw-r--r--src/video_core/textures/astc.h2
-rw-r--r--src/video_core/textures/convert.cpp92
-rw-r--r--src/video_core/textures/convert.h18
-rw-r--r--src/video_core/textures/decoders.cpp6
-rw-r--r--src/video_core/textures/decoders.h18
-rw-r--r--src/video_core/textures/texture.h83
73 files changed, 4333 insertions, 1479 deletions
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index d35a738d5..14b76680f 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -17,6 +17,12 @@ add_library(video_core STATIC
engines/shader_header.h
gpu.cpp
gpu.h
+ gpu_asynch.cpp
+ gpu_asynch.h
+ gpu_synch.cpp
+ gpu_synch.h
+ gpu_thread.cpp
+ gpu_thread.h
macro_interpreter.cpp
macro_interpreter.h
memory_manager.cpp
@@ -74,6 +80,7 @@ add_library(video_core STATIC
shader/decode/hfma2.cpp
shader/decode/conversion.cpp
shader/decode/memory.cpp
+ shader/decode/texture.cpp
shader/decode/float_set_predicate.cpp
shader/decode/integer_set_predicate.cpp
shader/decode/half_set_predicate.cpp
@@ -94,6 +101,8 @@ add_library(video_core STATIC
surface.h
textures/astc.cpp
textures/astc.h
+ textures/convert.cpp
+ textures/convert.h
textures/decoders.cpp
textures/decoders.h
textures/texture.h
@@ -104,8 +113,22 @@ add_library(video_core STATIC
if (ENABLE_VULKAN)
target_sources(video_core PRIVATE
renderer_vulkan/declarations.h
+ renderer_vulkan/maxwell_to_vk.cpp
+ renderer_vulkan/maxwell_to_vk.h
+ renderer_vulkan/vk_buffer_cache.cpp
+ renderer_vulkan/vk_buffer_cache.h
renderer_vulkan/vk_device.cpp
- renderer_vulkan/vk_device.h)
+ renderer_vulkan/vk_device.h
+ renderer_vulkan/vk_memory_manager.cpp
+ renderer_vulkan/vk_memory_manager.h
+ renderer_vulkan/vk_resource_manager.cpp
+ renderer_vulkan/vk_resource_manager.h
+ renderer_vulkan/vk_sampler_cache.cpp
+ renderer_vulkan/vk_sampler_cache.h
+ renderer_vulkan/vk_scheduler.cpp
+ renderer_vulkan/vk_scheduler.h
+ renderer_vulkan/vk_stream_buffer.cpp
+ renderer_vulkan/vk_stream_buffer.h)
target_include_directories(video_core PRIVATE ../../externals/Vulkan-Headers/include)
target_compile_definitions(video_core PRIVATE HAS_VULKAN)
diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp
index eb9bf1878..bff1a37ff 100644
--- a/src/video_core/dma_pusher.cpp
+++ b/src/video_core/dma_pusher.cpp
@@ -33,18 +33,36 @@ void DmaPusher::DispatchCalls() {
}
bool DmaPusher::Step() {
- if (dma_get != dma_put) {
- // Push buffer non-empty, read a word
- const auto address = gpu.MemoryManager().GpuToCpuAddress(dma_get);
- ASSERT_MSG(address, "Invalid GPU address");
+ if (!ib_enable || dma_pushbuffer.empty()) {
+ // pushbuffer empty and IB empty or nonexistent - nothing to do
+ return false;
+ }
- const CommandHeader command_header{Memory::Read32(*address)};
+ const CommandList& command_list{dma_pushbuffer.front()};
+ const CommandListHeader command_list_header{command_list[dma_pushbuffer_subindex++]};
+ GPUVAddr dma_get = command_list_header.addr;
+ GPUVAddr dma_put = dma_get + command_list_header.size * sizeof(u32);
+ bool non_main = command_list_header.is_non_main;
- dma_get += sizeof(u32);
+ if (dma_pushbuffer_subindex >= command_list.size()) {
+ // We've gone through the current list, remove it from the queue
+ dma_pushbuffer.pop();
+ dma_pushbuffer_subindex = 0;
+ }
- if (!non_main) {
- dma_mget = dma_get;
- }
+ if (command_list_header.size == 0) {
+ return true;
+ }
+
+ // Push buffer non-empty, read a word
+ const auto address = gpu.MemoryManager().GpuToCpuAddress(dma_get);
+ ASSERT_MSG(address, "Invalid GPU address");
+
+ command_headers.resize(command_list_header.size);
+
+ Memory::ReadBlock(*address, command_headers.data(), command_list_header.size * sizeof(u32));
+
+ for (const CommandHeader& command_header : command_headers) {
// now, see if we're in the middle of a command
if (dma_state.length_pending) {
@@ -91,22 +109,11 @@ bool DmaPusher::Step() {
break;
}
}
- } else if (ib_enable && !dma_pushbuffer.empty()) {
- // Current pushbuffer empty, but we have more IB entries to read
- const CommandList& command_list{dma_pushbuffer.front()};
- const CommandListHeader& command_list_header{command_list[dma_pushbuffer_subindex++]};
- dma_get = command_list_header.addr;
- dma_put = dma_get + command_list_header.size * sizeof(u32);
- non_main = command_list_header.is_non_main;
-
- if (dma_pushbuffer_subindex >= command_list.size()) {
- // We've gone through the current list, remove it from the queue
- dma_pushbuffer.pop();
- dma_pushbuffer_subindex = 0;
- }
- } else {
- // Otherwise, pushbuffer empty and IB empty or nonexistent - nothing to do
- return {};
+ }
+
+ if (!non_main) {
+ // TODO (degasus): This is dead code, as dma_mget is never read.
+ dma_mget = dma_put;
}
return true;
diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h
index 1097e5c49..27a36348c 100644
--- a/src/video_core/dma_pusher.h
+++ b/src/video_core/dma_pusher.h
@@ -75,6 +75,8 @@ private:
GPU& gpu;
+ std::vector<CommandHeader> command_headers; ///< Buffer for list of commands fetched at once
+
std::queue<CommandList> dma_pushbuffer; ///< Queue of command lists to be processed
std::size_t dma_pushbuffer_subindex{}; ///< Index within a command list within the pushbuffer
@@ -89,11 +91,8 @@ private:
DmaState dma_state{};
bool dma_increment_once{};
- GPUVAddr dma_put{}; ///< pushbuffer current end address
- GPUVAddr dma_get{}; ///< pushbuffer current read address
GPUVAddr dma_mget{}; ///< main pushbuffer last read address
bool ib_enable{true}; ///< IB mode enabled
- bool non_main{}; ///< non-main pushbuffer active
};
} // namespace Tegra
diff --git a/src/video_core/engines/fermi_2d.cpp b/src/video_core/engines/fermi_2d.cpp
index ec1a57226..03b7ee5d8 100644
--- a/src/video_core/engines/fermi_2d.cpp
+++ b/src/video_core/engines/fermi_2d.cpp
@@ -2,12 +2,11 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#include "core/core.h"
-#include "core/memory.h"
+#include "common/assert.h"
+#include "common/logging/log.h"
+#include "common/math_util.h"
#include "video_core/engines/fermi_2d.h"
-#include "video_core/engines/maxwell_3d.h"
#include "video_core/rasterizer_interface.h"
-#include "video_core/textures/decoders.h"
namespace Tegra::Engines {
@@ -44,10 +43,10 @@ void Fermi2D::HandleSurfaceCopy() {
const u32 src_blit_y2{
static_cast<u32>((regs.blit_src_y + (regs.blit_dst_height * regs.blit_dv_dy)) >> 32)};
- const MathUtil::Rectangle<u32> src_rect{src_blit_x1, src_blit_y1, src_blit_x2, src_blit_y2};
- const MathUtil::Rectangle<u32> dst_rect{regs.blit_dst_x, regs.blit_dst_y,
- regs.blit_dst_x + regs.blit_dst_width,
- regs.blit_dst_y + regs.blit_dst_height};
+ const Common::Rectangle<u32> src_rect{src_blit_x1, src_blit_y1, src_blit_x2, src_blit_y2};
+ const Common::Rectangle<u32> dst_rect{regs.blit_dst_x, regs.blit_dst_y,
+ regs.blit_dst_x + regs.blit_dst_width,
+ regs.blit_dst_y + regs.blit_dst_height};
if (!rasterizer.AccelerateSurfaceCopy(regs.src, regs.dst, src_rect, dst_rect)) {
UNIMPLEMENTED();
diff --git a/src/video_core/engines/fermi_2d.h b/src/video_core/engines/fermi_2d.h
index c69f74cc5..80523e320 100644
--- a/src/video_core/engines/fermi_2d.h
+++ b/src/video_core/engines/fermi_2d.h
@@ -5,7 +5,7 @@
#pragma once
#include <array>
-#include "common/assert.h"
+#include <cstddef>
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp
index 4ca856b6b..b1d950460 100644
--- a/src/video_core/engines/kepler_compute.cpp
+++ b/src/video_core/engines/kepler_compute.cpp
@@ -2,9 +2,8 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include "common/assert.h"
#include "common/logging/log.h"
-#include "core/core.h"
-#include "core/memory.h"
#include "video_core/engines/kepler_compute.h"
#include "video_core/memory_manager.h"
diff --git a/src/video_core/engines/kepler_compute.h b/src/video_core/engines/kepler_compute.h
index df0a32e0f..6575afd0f 100644
--- a/src/video_core/engines/kepler_compute.h
+++ b/src/video_core/engines/kepler_compute.h
@@ -5,8 +5,7 @@
#pragma once
#include <array>
-#include "common/assert.h"
-#include "common/bit_field.h"
+#include <cstddef>
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "video_core/gpu.h"
diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp
index 5c1029ddf..daefa43a6 100644
--- a/src/video_core/engines/kepler_memory.cpp
+++ b/src/video_core/engines/kepler_memory.cpp
@@ -2,18 +2,20 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/memory.h"
#include "video_core/engines/kepler_memory.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/rasterizer_interface.h"
+#include "video_core/renderer_base.h"
namespace Tegra::Engines {
-KeplerMemory::KeplerMemory(VideoCore::RasterizerInterface& rasterizer,
+KeplerMemory::KeplerMemory(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
MemoryManager& memory_manager)
- : memory_manager(memory_manager), rasterizer{rasterizer} {}
+ : system{system}, memory_manager(memory_manager), rasterizer{rasterizer} {}
KeplerMemory::~KeplerMemory() = default;
@@ -47,10 +49,11 @@ void KeplerMemory::ProcessData(u32 data) {
// We have to invalidate the destination region to evict any outdated surfaces from the cache.
// We do this before actually writing the new data because the destination address might contain
// a dirty surface that will have to be written back to memory.
- rasterizer.InvalidateRegion(*dest_address, sizeof(u32));
+ system.Renderer().Rasterizer().InvalidateRegion(ToCacheAddr(Memory::GetPointer(*dest_address)),
+ sizeof(u32));
Memory::Write32(*dest_address, data);
- Core::System::GetInstance().GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
+ system.GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
state.write_offset++;
}
diff --git a/src/video_core/engines/kepler_memory.h b/src/video_core/engines/kepler_memory.h
index fe9ebc5b9..9181e9d80 100644
--- a/src/video_core/engines/kepler_memory.h
+++ b/src/video_core/engines/kepler_memory.h
@@ -5,13 +5,17 @@
#pragma once
#include <array>
-#include "common/assert.h"
+#include <cstddef>
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "video_core/gpu.h"
#include "video_core/memory_manager.h"
+namespace Core {
+class System;
+}
+
namespace VideoCore {
class RasterizerInterface;
}
@@ -23,7 +27,8 @@ namespace Tegra::Engines {
class KeplerMemory final {
public:
- KeplerMemory(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager);
+ KeplerMemory(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
+ MemoryManager& memory_manager);
~KeplerMemory();
/// Write the value to the register identified by method.
@@ -76,6 +81,7 @@ public:
} state{};
private:
+ Core::System& system;
MemoryManager& memory_manager;
VideoCore::RasterizerInterface& rasterizer;
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index 86ede5faa..49979694e 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -19,8 +19,10 @@ namespace Tegra::Engines {
/// First register id that is actually a Macro call.
constexpr u32 MacroRegistersStart = 0xE00;
-Maxwell3D::Maxwell3D(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager)
- : memory_manager(memory_manager), rasterizer{rasterizer}, macro_interpreter(*this) {
+Maxwell3D::Maxwell3D(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
+ MemoryManager& memory_manager)
+ : memory_manager(memory_manager), system{system}, rasterizer{rasterizer},
+ macro_interpreter(*this) {
InitializeRegisterDefaults();
}
@@ -103,23 +105,25 @@ void Maxwell3D::CallMacroMethod(u32 method, std::vector<u32> parameters) {
}
void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
- auto debug_context = Core::System::GetInstance().GetGPUDebugContext();
+ auto debug_context = system.GetGPUDebugContext();
+
+ const u32 method = method_call.method;
// It is an error to write to a register other than the current macro's ARG register before it
// has finished execution.
if (executing_macro != 0) {
- ASSERT(method_call.method == executing_macro + 1);
+ ASSERT(method == executing_macro + 1);
}
// Methods after 0xE00 are special, they're actually triggers for some microcode that was
// uploaded to the GPU during initialization.
- if (method_call.method >= MacroRegistersStart) {
+ if (method >= MacroRegistersStart) {
// We're trying to execute a macro
if (executing_macro == 0) {
// A macro call must begin by writing the macro method's register, not its argument.
- ASSERT_MSG((method_call.method % 2) == 0,
+ ASSERT_MSG((method % 2) == 0,
"Can't start macro execution by writing to the ARGS register");
- executing_macro = method_call.method;
+ executing_macro = method;
}
macro_params.push_back(method_call.argument);
@@ -131,66 +135,62 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
return;
}
- ASSERT_MSG(method_call.method < Regs::NUM_REGS,
+ ASSERT_MSG(method < Regs::NUM_REGS,
"Invalid Maxwell3D register, increase the size of the Regs structure");
if (debug_context) {
debug_context->OnEvent(Tegra::DebugContext::Event::MaxwellCommandLoaded, nullptr);
}
- if (regs.reg_array[method_call.method] != method_call.argument) {
- regs.reg_array[method_call.method] = method_call.argument;
+ if (regs.reg_array[method] != method_call.argument) {
+ regs.reg_array[method] = method_call.argument;
// Color buffers
constexpr u32 first_rt_reg = MAXWELL3D_REG_INDEX(rt);
constexpr u32 registers_per_rt = sizeof(regs.rt[0]) / sizeof(u32);
- if (method_call.method >= first_rt_reg &&
- method_call.method < first_rt_reg + registers_per_rt * Regs::NumRenderTargets) {
- const std::size_t rt_index = (method_call.method - first_rt_reg) / registers_per_rt;
- dirty_flags.color_buffer |= 1u << static_cast<u32>(rt_index);
+ if (method >= first_rt_reg &&
+ method < first_rt_reg + registers_per_rt * Regs::NumRenderTargets) {
+ const std::size_t rt_index = (method - first_rt_reg) / registers_per_rt;
+ dirty_flags.color_buffer.set(rt_index);
}
// Zeta buffer
constexpr u32 registers_in_zeta = sizeof(regs.zeta) / sizeof(u32);
- if (method_call.method == MAXWELL3D_REG_INDEX(zeta_enable) ||
- method_call.method == MAXWELL3D_REG_INDEX(zeta_width) ||
- method_call.method == MAXWELL3D_REG_INDEX(zeta_height) ||
- (method_call.method >= MAXWELL3D_REG_INDEX(zeta) &&
- method_call.method < MAXWELL3D_REG_INDEX(zeta) + registers_in_zeta)) {
+ if (method == MAXWELL3D_REG_INDEX(zeta_enable) ||
+ method == MAXWELL3D_REG_INDEX(zeta_width) ||
+ method == MAXWELL3D_REG_INDEX(zeta_height) ||
+ (method >= MAXWELL3D_REG_INDEX(zeta) &&
+ method < MAXWELL3D_REG_INDEX(zeta) + registers_in_zeta)) {
dirty_flags.zeta_buffer = true;
}
// Shader
constexpr u32 shader_registers_count =
sizeof(regs.shader_config[0]) * Regs::MaxShaderProgram / sizeof(u32);
- if (method_call.method >= MAXWELL3D_REG_INDEX(shader_config[0]) &&
- method_call.method < MAXWELL3D_REG_INDEX(shader_config[0]) + shader_registers_count) {
+ if (method >= MAXWELL3D_REG_INDEX(shader_config[0]) &&
+ method < MAXWELL3D_REG_INDEX(shader_config[0]) + shader_registers_count) {
dirty_flags.shaders = true;
}
// Vertex format
- if (method_call.method >= MAXWELL3D_REG_INDEX(vertex_attrib_format) &&
- method_call.method <
- MAXWELL3D_REG_INDEX(vertex_attrib_format) + regs.vertex_attrib_format.size()) {
+ if (method >= MAXWELL3D_REG_INDEX(vertex_attrib_format) &&
+ method < MAXWELL3D_REG_INDEX(vertex_attrib_format) + regs.vertex_attrib_format.size()) {
dirty_flags.vertex_attrib_format = true;
}
// Vertex buffer
- if (method_call.method >= MAXWELL3D_REG_INDEX(vertex_array) &&
- method_call.method < MAXWELL3D_REG_INDEX(vertex_array) + 4 * 32) {
- dirty_flags.vertex_array |=
- 1u << ((method_call.method - MAXWELL3D_REG_INDEX(vertex_array)) >> 2);
- } else if (method_call.method >= MAXWELL3D_REG_INDEX(vertex_array_limit) &&
- method_call.method < MAXWELL3D_REG_INDEX(vertex_array_limit) + 2 * 32) {
- dirty_flags.vertex_array |=
- 1u << ((method_call.method - MAXWELL3D_REG_INDEX(vertex_array_limit)) >> 1);
- } else if (method_call.method >= MAXWELL3D_REG_INDEX(instanced_arrays) &&
- method_call.method < MAXWELL3D_REG_INDEX(instanced_arrays) + 32) {
- dirty_flags.vertex_array |=
- 1u << (method_call.method - MAXWELL3D_REG_INDEX(instanced_arrays));
+ if (method >= MAXWELL3D_REG_INDEX(vertex_array) &&
+ method < MAXWELL3D_REG_INDEX(vertex_array) + 4 * 32) {
+ dirty_flags.vertex_array.set((method - MAXWELL3D_REG_INDEX(vertex_array)) >> 2);
+ } else if (method >= MAXWELL3D_REG_INDEX(vertex_array_limit) &&
+ method < MAXWELL3D_REG_INDEX(vertex_array_limit) + 2 * 32) {
+ dirty_flags.vertex_array.set((method - MAXWELL3D_REG_INDEX(vertex_array_limit)) >> 1);
+ } else if (method >= MAXWELL3D_REG_INDEX(instanced_arrays) &&
+ method < MAXWELL3D_REG_INDEX(instanced_arrays) + 32) {
+ dirty_flags.vertex_array.set(method - MAXWELL3D_REG_INDEX(instanced_arrays));
}
}
- switch (method_call.method) {
+ switch (method) {
case MAXWELL3D_REG_INDEX(macros.data): {
ProcessMacroUpload(method_call.argument);
break;
@@ -317,7 +317,7 @@ void Maxwell3D::ProcessQueryGet() {
LongQueryResult query_result{};
query_result.value = result;
// TODO(Subv): Generate a real GPU timestamp and write it here instead of CoreTiming
- query_result.timestamp = Core::System::GetInstance().CoreTiming().GetTicks();
+ query_result.timestamp = system.CoreTiming().GetTicks();
Memory::WriteBlock(*address, &query_result, sizeof(query_result));
}
dirty_flags.OnMemoryWrite();
@@ -334,7 +334,7 @@ void Maxwell3D::DrawArrays() {
regs.vertex_buffer.count);
ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?");
- auto debug_context = Core::System::GetInstance().GetGPUDebugContext();
+ auto debug_context = system.GetGPUDebugContext();
if (debug_context) {
debug_context->OnEvent(Tegra::DebugContext::Event::IncomingPrimitiveBatch, nullptr);
@@ -396,7 +396,10 @@ void Maxwell3D::ProcessCBData(u32 value) {
const auto address = memory_manager.GpuToCpuAddress(buffer_address + regs.const_buffer.cb_pos);
ASSERT_MSG(address, "Invalid GPU address");
- Memory::Write32(*address, value);
+ u8* ptr{Memory::GetPointer(*address)};
+ rasterizer.InvalidateRegion(ToCacheAddr(ptr), sizeof(u32));
+ std::memcpy(ptr, &value, sizeof(u32));
+
dirty_flags.OnMemoryWrite();
// Increment the current buffer position.
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index 1f76aa670..7fbf1026e 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -5,8 +5,10 @@
#pragma once
#include <array>
+#include <bitset>
#include <unordered_map>
#include <vector>
+
#include "common/assert.h"
#include "common/bit_field.h"
#include "common/common_funcs.h"
@@ -17,6 +19,10 @@
#include "video_core/memory_manager.h"
#include "video_core/textures/texture.h"
+namespace Core {
+class System;
+}
+
namespace VideoCore {
class RasterizerInterface;
}
@@ -28,7 +34,8 @@ namespace Tegra::Engines {
class Maxwell3D final {
public:
- explicit Maxwell3D(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager);
+ explicit Maxwell3D(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
+ MemoryManager& memory_manager);
~Maxwell3D() = default;
/// Register structure of the Maxwell3D engine.
@@ -498,7 +505,7 @@ public:
f32 translate_z;
INSERT_PADDING_WORDS(2);
- MathUtil::Rectangle<s32> GetRect() const {
+ Common::Rectangle<s32> GetRect() const {
return {
GetX(), // left
GetY() + GetHeight(), // top
@@ -1089,19 +1096,18 @@ public:
MemoryManager& memory_manager;
struct DirtyFlags {
- u8 color_buffer = 0xFF;
- bool zeta_buffer = true;
-
- bool shaders = true;
+ std::bitset<8> color_buffer{0xFF};
+ std::bitset<32> vertex_array{0xFFFFFFFF};
bool vertex_attrib_format = true;
- u32 vertex_array = 0xFFFFFFFF;
+ bool zeta_buffer = true;
+ bool shaders = true;
void OnMemoryWrite() {
- color_buffer = 0xFF;
zeta_buffer = true;
shaders = true;
- vertex_array = 0xFFFFFFFF;
+ color_buffer.set();
+ vertex_array.set();
}
};
@@ -1131,6 +1137,8 @@ public:
private:
void InitializeRegisterDefaults();
+ Core::System& system;
+
VideoCore::RasterizerInterface& rasterizer;
/// Start offsets of each macro in macro_memory
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index d6c41a5ae..415a6319a 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -2,17 +2,21 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include "common/assert.h"
+#include "common/logging/log.h"
#include "core/core.h"
#include "core/memory.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/engines/maxwell_dma.h"
#include "video_core/rasterizer_interface.h"
+#include "video_core/renderer_base.h"
#include "video_core/textures/decoders.h"
namespace Tegra::Engines {
-MaxwellDMA::MaxwellDMA(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager)
- : memory_manager(memory_manager), rasterizer{rasterizer} {}
+MaxwellDMA::MaxwellDMA(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
+ MemoryManager& memory_manager)
+ : memory_manager(memory_manager), system{system}, rasterizer{rasterizer} {}
void MaxwellDMA::CallMethod(const GPU::MethodCall& method_call) {
ASSERT_MSG(method_call.method < Regs::NUM_REGS,
@@ -59,7 +63,7 @@ void MaxwellDMA::HandleCopy() {
}
// All copies here update the main memory, so mark all rasterizer states as invalid.
- Core::System::GetInstance().GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
+ system.GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
if (regs.exec.is_dst_linear && regs.exec.is_src_linear) {
// When the enable_2d bit is disabled, the copy is performed as if we were copying a 1D
@@ -89,12 +93,14 @@ void MaxwellDMA::HandleCopy() {
const auto FlushAndInvalidate = [&](u32 src_size, u64 dst_size) {
// TODO(Subv): For now, manually flush the regions until we implement GPU-accelerated
// copying.
- rasterizer.FlushRegion(*source_cpu, src_size);
+ Core::System::GetInstance().Renderer().Rasterizer().FlushRegion(
+ ToCacheAddr(Memory::GetPointer(*source_cpu)), src_size);
// We have to invalidate the destination region to evict any outdated surfaces from the
// cache. We do this before actually writing the new data because the destination address
// might contain a dirty surface that will have to be written back to memory.
- rasterizer.InvalidateRegion(*dest_cpu, dst_size);
+ Core::System::GetInstance().Renderer().Rasterizer().InvalidateRegion(
+ ToCacheAddr(Memory::GetPointer(*dest_cpu)), dst_size);
};
if (regs.exec.is_dst_linear && !regs.exec.is_src_linear) {
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index 1f8cd65d2..34c369320 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -5,13 +5,17 @@
#pragma once
#include <array>
-#include "common/assert.h"
+#include <cstddef>
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "video_core/gpu.h"
#include "video_core/memory_manager.h"
+namespace Core {
+class System;
+}
+
namespace VideoCore {
class RasterizerInterface;
}
@@ -20,7 +24,8 @@ namespace Tegra::Engines {
class MaxwellDMA final {
public:
- explicit MaxwellDMA(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager);
+ explicit MaxwellDMA(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
+ MemoryManager& memory_manager);
~MaxwellDMA() = default;
/// Write the value to the register identified by method.
@@ -137,6 +142,8 @@ public:
MemoryManager& memory_manager;
private:
+ Core::System& system;
+
VideoCore::RasterizerInterface& rasterizer;
/// Performs the copy from the source buffer to the destination buffer as configured in the
diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h
index 1f425f90b..7f613370b 100644
--- a/src/video_core/engines/shader_bytecode.h
+++ b/src/video_core/engines/shader_bytecode.h
@@ -6,7 +6,6 @@
#include <bitset>
#include <optional>
-#include <string>
#include <tuple>
#include <vector>
@@ -325,11 +324,11 @@ enum class TextureQueryType : u64 {
enum class TextureProcessMode : u64 {
None = 0,
- LZ = 1, // Unknown, appears to be the same as none.
+ LZ = 1, // Load LOD of zero.
LB = 2, // Load Bias.
- LL = 3, // Load LOD (LevelOfDetail)
- LBA = 6, // Load Bias. The A is unknown, does not appear to differ with LB
- LLA = 7 // Load LOD. The A is unknown, does not appear to differ with LL
+ LL = 3, // Load LOD.
+ LBA = 6, // Load Bias. The A is unknown, does not appear to differ with LB.
+ LLA = 7 // Load LOD. The A is unknown, does not appear to differ with LL.
};
enum class TextureMiscMode : u64 {
@@ -376,9 +375,9 @@ enum class R2pMode : u64 {
};
enum class IpaInterpMode : u64 {
- Linear = 0,
- Perspective = 1,
- Flat = 2,
+ Pass = 0,
+ Multiply = 1,
+ Constant = 2,
Sc = 3,
};
@@ -1446,6 +1445,7 @@ public:
Flow,
Synch,
Memory,
+ Texture,
FloatSet,
FloatSetPredicate,
IntegerSet,
@@ -1576,14 +1576,14 @@ private:
INST("1110111101010---", Id::ST_L, Type::Memory, "ST_L"),
INST("1110111011010---", Id::LDG, Type::Memory, "LDG"),
INST("1110111011011---", Id::STG, Type::Memory, "STG"),
- INST("110000----111---", Id::TEX, Type::Memory, "TEX"),
- INST("1101111101001---", Id::TXQ, Type::Memory, "TXQ"),
- INST("1101-00---------", Id::TEXS, Type::Memory, "TEXS"),
- INST("1101101---------", Id::TLDS, Type::Memory, "TLDS"),
- INST("110010----111---", Id::TLD4, Type::Memory, "TLD4"),
- INST("1101111100------", Id::TLD4S, Type::Memory, "TLD4S"),
- INST("110111110110----", Id::TMML_B, Type::Memory, "TMML_B"),
- INST("1101111101011---", Id::TMML, Type::Memory, "TMML"),
+ INST("110000----111---", Id::TEX, Type::Texture, "TEX"),
+ INST("1101111101001---", Id::TXQ, Type::Texture, "TXQ"),
+ INST("1101-00---------", Id::TEXS, Type::Texture, "TEXS"),
+ INST("1101101---------", Id::TLDS, Type::Texture, "TLDS"),
+ INST("110010----111---", Id::TLD4, Type::Texture, "TLD4"),
+ INST("1101111100------", Id::TLD4S, Type::Texture, "TLD4S"),
+ INST("110111110110----", Id::TMML_B, Type::Texture, "TMML_B"),
+ INST("1101111101011---", Id::TMML, Type::Texture, "TMML"),
INST("111000110000----", Id::EXIT, Type::Trivial, "EXIT"),
INST("11100000--------", Id::IPA, Type::Trivial, "IPA"),
INST("1111101111100---", Id::OUT_R, Type::Trivial, "OUT_R"),
diff --git a/src/video_core/engines/shader_header.h b/src/video_core/engines/shader_header.h
index cf2b76ff6..e86a7f04a 100644
--- a/src/video_core/engines/shader_header.h
+++ b/src/video_core/engines/shader_header.h
@@ -16,6 +16,13 @@ enum class OutputTopology : u32 {
TriangleStrip = 7,
};
+enum class AttributeUse : u8 {
+ Unused = 0,
+ Constant = 1,
+ Perspective = 2,
+ ScreenLinear = 3,
+};
+
// Documentation in:
// http://download.nvidia.com/open-gpu-doc/Shader-Program-Header/1/Shader-Program-Header.html#ImapTexture
struct Header {
@@ -84,9 +91,15 @@ struct Header {
} vtg;
struct {
- INSERT_PADDING_BYTES(3); // ImapSystemValuesA
- INSERT_PADDING_BYTES(1); // ImapSystemValuesB
- INSERT_PADDING_BYTES(32); // ImapGenericVector[32]
+ INSERT_PADDING_BYTES(3); // ImapSystemValuesA
+ INSERT_PADDING_BYTES(1); // ImapSystemValuesB
+ union {
+ BitField<0, 2, AttributeUse> x;
+ BitField<2, 2, AttributeUse> y;
+ BitField<4, 2, AttributeUse> w;
+ BitField<6, 2, AttributeUse> z;
+ u8 raw;
+ } imap_generic_vector[32];
INSERT_PADDING_BYTES(2); // ImapColor
INSERT_PADDING_BYTES(2); // ImapSystemValuesC
INSERT_PADDING_BYTES(10); // ImapFixedFncTexture[10]
@@ -103,6 +116,28 @@ struct Header {
const u32 bit = render_target * 4 + component;
return omap.target & (1 << bit);
}
+ AttributeUse GetAttributeIndexUse(u32 attribute, u32 index) const {
+ return static_cast<AttributeUse>(
+ (imap_generic_vector[attribute].raw >> (index * 2)) & 0x03);
+ }
+ AttributeUse GetAttributeUse(u32 attribute) const {
+ AttributeUse result = AttributeUse::Unused;
+ for (u32 i = 0; i < 4; i++) {
+ const auto index = GetAttributeIndexUse(attribute, i);
+ if (index == AttributeUse::Unused) {
+ continue;
+ }
+ if (result == AttributeUse::Unused || result == index) {
+ result = index;
+ continue;
+ }
+ LOG_CRITICAL(HW_GPU, "Generic Attribute Conflict in Interpolation Mode");
+ if (index == AttributeUse::Perspective) {
+ result = index;
+ }
+ }
+ return result;
+ }
} ps;
};
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index b86265dfe..08abf8ac9 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -12,7 +12,7 @@
#include "video_core/engines/maxwell_3d.h"
#include "video_core/engines/maxwell_dma.h"
#include "video_core/gpu.h"
-#include "video_core/rasterizer_interface.h"
+#include "video_core/renderer_base.h"
namespace Tegra {
@@ -28,14 +28,15 @@ u32 FramebufferConfig::BytesPerPixel(PixelFormat format) {
UNREACHABLE();
}
-GPU::GPU(VideoCore::RasterizerInterface& rasterizer) {
+GPU::GPU(Core::System& system, VideoCore::RendererBase& renderer) : renderer{renderer} {
+ auto& rasterizer{renderer.Rasterizer()};
memory_manager = std::make_unique<Tegra::MemoryManager>();
dma_pusher = std::make_unique<Tegra::DmaPusher>(*this);
- maxwell_3d = std::make_unique<Engines::Maxwell3D>(rasterizer, *memory_manager);
+ maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, rasterizer, *memory_manager);
fermi_2d = std::make_unique<Engines::Fermi2D>(rasterizer, *memory_manager);
kepler_compute = std::make_unique<Engines::KeplerCompute>(*memory_manager);
- maxwell_dma = std::make_unique<Engines::MaxwellDMA>(rasterizer, *memory_manager);
- kepler_memory = std::make_unique<Engines::KeplerMemory>(rasterizer, *memory_manager);
+ maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, rasterizer, *memory_manager);
+ kepler_memory = std::make_unique<Engines::KeplerMemory>(system, rasterizer, *memory_manager);
}
GPU::~GPU() = default;
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index a482196ea..a14b95c30 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -6,16 +6,24 @@
#include <array>
#include <memory>
-#include <vector>
#include "common/common_types.h"
#include "core/hle/service/nvflinger/buffer_queue.h"
#include "video_core/dma_pusher.h"
#include "video_core/memory_manager.h"
-namespace VideoCore {
-class RasterizerInterface;
+using CacheAddr = std::uintptr_t;
+inline CacheAddr ToCacheAddr(const void* host_ptr) {
+ return reinterpret_cast<CacheAddr>(host_ptr);
+}
+
+namespace Core {
+class System;
}
+namespace VideoCore {
+class RendererBase;
+} // namespace VideoCore
+
namespace Tegra {
enum class RenderTargetFormat : u32 {
@@ -97,7 +105,7 @@ struct FramebufferConfig {
using TransformFlags = Service::NVFlinger::BufferQueue::BufferTransformFlags;
TransformFlags transform_flags;
- MathUtil::Rectangle<int> crop_rect;
+ Common::Rectangle<int> crop_rect;
};
namespace Engines {
@@ -116,10 +124,11 @@ enum class EngineID {
MAXWELL_DMA_COPY_A = 0xB0B5,
};
-class GPU final {
+class GPU {
public:
- explicit GPU(VideoCore::RasterizerInterface& rasterizer);
- ~GPU();
+ explicit GPU(Core::System& system, VideoCore::RendererBase& renderer);
+
+ virtual ~GPU();
struct MethodCall {
u32 method{};
@@ -197,8 +206,42 @@ public:
};
} regs{};
+ /// Push GPU command entries to be processed
+ virtual void PushGPUEntries(Tegra::CommandList&& entries) = 0;
+
+ /// Swap buffers (render frame)
+ virtual void SwapBuffers(
+ std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) = 0;
+
+ /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
+ virtual void FlushRegion(CacheAddr addr, u64 size) = 0;
+
+ /// Notify rasterizer that any caches of the specified region should be invalidated
+ virtual void InvalidateRegion(CacheAddr addr, u64 size) = 0;
+
+ /// Notify rasterizer that any caches of the specified region should be flushed and invalidated
+ virtual void FlushAndInvalidateRegion(CacheAddr addr, u64 size) = 0;
+
private:
+ void ProcessBindMethod(const MethodCall& method_call);
+ void ProcessSemaphoreTriggerMethod();
+ void ProcessSemaphoreRelease();
+ void ProcessSemaphoreAcquire();
+
+ /// Calls a GPU puller method.
+ void CallPullerMethod(const MethodCall& method_call);
+
+ /// Calls a GPU engine method.
+ void CallEngineMethod(const MethodCall& method_call);
+
+ /// Determines where the method should be executed.
+ bool ExecuteMethodOnEngine(const MethodCall& method_call);
+
+protected:
std::unique_ptr<Tegra::DmaPusher> dma_pusher;
+ VideoCore::RendererBase& renderer;
+
+private:
std::unique_ptr<Tegra::MemoryManager> memory_manager;
/// Mapping of command subchannels to their bound engine ids.
@@ -214,18 +257,6 @@ private:
std::unique_ptr<Engines::MaxwellDMA> maxwell_dma;
/// Inline memory engine
std::unique_ptr<Engines::KeplerMemory> kepler_memory;
-
- void ProcessBindMethod(const MethodCall& method_call);
- void ProcessSemaphoreTriggerMethod();
- void ProcessSemaphoreRelease();
- void ProcessSemaphoreAcquire();
-
- // Calls a GPU puller method.
- void CallPullerMethod(const MethodCall& method_call);
- // Calls a GPU engine method.
- void CallEngineMethod(const MethodCall& method_call);
- // Determines where the method should be executed.
- bool ExecuteMethodOnEngine(const MethodCall& method_call);
};
#define ASSERT_REG_POSITION(field_name, position) \
diff --git a/src/video_core/gpu_asynch.cpp b/src/video_core/gpu_asynch.cpp
new file mode 100644
index 000000000..8b355cf7b
--- /dev/null
+++ b/src/video_core/gpu_asynch.cpp
@@ -0,0 +1,37 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "video_core/gpu_asynch.h"
+#include "video_core/gpu_thread.h"
+#include "video_core/renderer_base.h"
+
+namespace VideoCommon {
+
+GPUAsynch::GPUAsynch(Core::System& system, VideoCore::RendererBase& renderer)
+ : Tegra::GPU(system, renderer), gpu_thread{renderer, *dma_pusher} {}
+
+GPUAsynch::~GPUAsynch() = default;
+
+void GPUAsynch::PushGPUEntries(Tegra::CommandList&& entries) {
+ gpu_thread.SubmitList(std::move(entries));
+}
+
+void GPUAsynch::SwapBuffers(
+ std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) {
+ gpu_thread.SwapBuffers(std::move(framebuffer));
+}
+
+void GPUAsynch::FlushRegion(CacheAddr addr, u64 size) {
+ gpu_thread.FlushRegion(addr, size);
+}
+
+void GPUAsynch::InvalidateRegion(CacheAddr addr, u64 size) {
+ gpu_thread.InvalidateRegion(addr, size);
+}
+
+void GPUAsynch::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
+ gpu_thread.FlushAndInvalidateRegion(addr, size);
+}
+
+} // namespace VideoCommon
diff --git a/src/video_core/gpu_asynch.h b/src/video_core/gpu_asynch.h
new file mode 100644
index 000000000..1dcc61a6c
--- /dev/null
+++ b/src/video_core/gpu_asynch.h
@@ -0,0 +1,37 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "video_core/gpu.h"
+#include "video_core/gpu_thread.h"
+
+namespace VideoCore {
+class RendererBase;
+} // namespace VideoCore
+
+namespace VideoCommon {
+
+namespace GPUThread {
+class ThreadManager;
+} // namespace GPUThread
+
+/// Implementation of GPU interface that runs the GPU asynchronously
+class GPUAsynch : public Tegra::GPU {
+public:
+ explicit GPUAsynch(Core::System& system, VideoCore::RendererBase& renderer);
+ ~GPUAsynch() override;
+
+ void PushGPUEntries(Tegra::CommandList&& entries) override;
+ void SwapBuffers(
+ std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) override;
+ void FlushRegion(CacheAddr addr, u64 size) override;
+ void InvalidateRegion(CacheAddr addr, u64 size) override;
+ void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override;
+
+private:
+ GPUThread::ThreadManager gpu_thread;
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/gpu_synch.cpp b/src/video_core/gpu_synch.cpp
new file mode 100644
index 000000000..2cfc900ed
--- /dev/null
+++ b/src/video_core/gpu_synch.cpp
@@ -0,0 +1,37 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "video_core/gpu_synch.h"
+#include "video_core/renderer_base.h"
+
+namespace VideoCommon {
+
+GPUSynch::GPUSynch(Core::System& system, VideoCore::RendererBase& renderer)
+ : Tegra::GPU(system, renderer) {}
+
+GPUSynch::~GPUSynch() = default;
+
+void GPUSynch::PushGPUEntries(Tegra::CommandList&& entries) {
+ dma_pusher->Push(std::move(entries));
+ dma_pusher->DispatchCalls();
+}
+
+void GPUSynch::SwapBuffers(
+ std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) {
+ renderer.SwapBuffers(std::move(framebuffer));
+}
+
+void GPUSynch::FlushRegion(CacheAddr addr, u64 size) {
+ renderer.Rasterizer().FlushRegion(addr, size);
+}
+
+void GPUSynch::InvalidateRegion(CacheAddr addr, u64 size) {
+ renderer.Rasterizer().InvalidateRegion(addr, size);
+}
+
+void GPUSynch::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
+ renderer.Rasterizer().FlushAndInvalidateRegion(addr, size);
+}
+
+} // namespace VideoCommon
diff --git a/src/video_core/gpu_synch.h b/src/video_core/gpu_synch.h
new file mode 100644
index 000000000..766b5631c
--- /dev/null
+++ b/src/video_core/gpu_synch.h
@@ -0,0 +1,29 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "video_core/gpu.h"
+
+namespace VideoCore {
+class RendererBase;
+} // namespace VideoCore
+
+namespace VideoCommon {
+
+/// Implementation of GPU interface that runs the GPU synchronously
+class GPUSynch : public Tegra::GPU {
+public:
+ explicit GPUSynch(Core::System& system, VideoCore::RendererBase& renderer);
+ ~GPUSynch() override;
+
+ void PushGPUEntries(Tegra::CommandList&& entries) override;
+ void SwapBuffers(
+ std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) override;
+ void FlushRegion(CacheAddr addr, u64 size) override;
+ void InvalidateRegion(CacheAddr addr, u64 size) override;
+ void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override;
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
new file mode 100644
index 000000000..086b2f625
--- /dev/null
+++ b/src/video_core/gpu_thread.cpp
@@ -0,0 +1,98 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/assert.h"
+#include "common/microprofile.h"
+#include "core/frontend/scope_acquire_window_context.h"
+#include "video_core/dma_pusher.h"
+#include "video_core/gpu.h"
+#include "video_core/gpu_thread.h"
+#include "video_core/renderer_base.h"
+
+namespace VideoCommon::GPUThread {
+
+/// Runs the GPU thread
+static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher,
+ SynchState& state) {
+ MicroProfileOnThreadCreate("GpuThread");
+
+ // Wait for first GPU command before acquiring the window context
+ state.WaitForCommands();
+
+ // If emulation was stopped during disk shader loading, abort before trying to acquire context
+ if (!state.is_running) {
+ return;
+ }
+
+ Core::Frontend::ScopeAcquireWindowContext acquire_context{renderer.GetRenderWindow()};
+
+ CommandDataContainer next;
+ while (state.is_running) {
+ state.WaitForCommands();
+ while (!state.queue.Empty()) {
+ state.queue.Pop(next);
+ if (const auto submit_list = std::get_if<SubmitListCommand>(&next.data)) {
+ dma_pusher.Push(std::move(submit_list->entries));
+ dma_pusher.DispatchCalls();
+ } else if (const auto data = std::get_if<SwapBuffersCommand>(&next.data)) {
+ state.DecrementFramesCounter();
+ renderer.SwapBuffers(std::move(data->framebuffer));
+ } else if (const auto data = std::get_if<FlushRegionCommand>(&next.data)) {
+ renderer.Rasterizer().FlushRegion(data->addr, data->size);
+ } else if (const auto data = std::get_if<InvalidateRegionCommand>(&next.data)) {
+ renderer.Rasterizer().InvalidateRegion(data->addr, data->size);
+ } else if (const auto data = std::get_if<EndProcessingCommand>(&next.data)) {
+ return;
+ } else {
+ UNREACHABLE();
+ }
+ }
+ }
+}
+
+ThreadManager::ThreadManager(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher)
+ : renderer{renderer}, dma_pusher{dma_pusher}, thread{RunThread, std::ref(renderer),
+ std::ref(dma_pusher), std::ref(state)} {}
+
+ThreadManager::~ThreadManager() {
+ // Notify GPU thread that a shutdown is pending
+ PushCommand(EndProcessingCommand());
+ thread.join();
+}
+
+void ThreadManager::SubmitList(Tegra::CommandList&& entries) {
+ PushCommand(SubmitListCommand(std::move(entries)));
+}
+
+void ThreadManager::SwapBuffers(
+ std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer) {
+ state.IncrementFramesCounter();
+ PushCommand(SwapBuffersCommand(std::move(framebuffer)));
+ state.WaitForFrames();
+}
+
+void ThreadManager::FlushRegion(CacheAddr addr, u64 size) {
+ PushCommand(FlushRegionCommand(addr, size));
+}
+
+void ThreadManager::InvalidateRegion(CacheAddr addr, u64 size) {
+ if (state.queue.Empty()) {
+ // It's quicker to invalidate a single region on the CPU if the queue is already empty
+ renderer.Rasterizer().InvalidateRegion(addr, size);
+ } else {
+ PushCommand(InvalidateRegionCommand(addr, size));
+ }
+}
+
+void ThreadManager::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
+ // Skip flush on asynch mode, as FlushAndInvalidateRegion is not used for anything too important
+ InvalidateRegion(addr, size);
+}
+
+void ThreadManager::PushCommand(CommandData&& command_data) {
+ state.queue.Push(CommandDataContainer(std::move(command_data)));
+ state.SignalCommands();
+}
+
+} // namespace VideoCommon::GPUThread
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h
new file mode 100644
index 000000000..8cd7db1c6
--- /dev/null
+++ b/src/video_core/gpu_thread.h
@@ -0,0 +1,185 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <array>
+#include <atomic>
+#include <condition_variable>
+#include <memory>
+#include <mutex>
+#include <optional>
+#include <thread>
+#include <variant>
+
+#include "common/threadsafe_queue.h"
+#include "video_core/gpu.h"
+
+namespace Tegra {
+struct FramebufferConfig;
+class DmaPusher;
+} // namespace Tegra
+
+namespace VideoCore {
+class RendererBase;
+} // namespace VideoCore
+
+namespace VideoCommon::GPUThread {
+
+/// Command to signal to the GPU thread that processing has ended
+struct EndProcessingCommand final {};
+
+/// Command to signal to the GPU thread that a command list is ready for processing
+struct SubmitListCommand final {
+ explicit SubmitListCommand(Tegra::CommandList&& entries) : entries{std::move(entries)} {}
+
+ Tegra::CommandList entries;
+};
+
+/// Command to signal to the GPU thread that a swap buffers is pending
+struct SwapBuffersCommand final {
+ explicit SwapBuffersCommand(std::optional<const Tegra::FramebufferConfig> framebuffer)
+ : framebuffer{std::move(framebuffer)} {}
+
+ std::optional<Tegra::FramebufferConfig> framebuffer;
+};
+
+/// Command to signal to the GPU thread to flush a region
+struct FlushRegionCommand final {
+ explicit constexpr FlushRegionCommand(CacheAddr addr, u64 size) : addr{addr}, size{size} {}
+
+ CacheAddr addr;
+ u64 size;
+};
+
+/// Command to signal to the GPU thread to invalidate a region
+struct InvalidateRegionCommand final {
+ explicit constexpr InvalidateRegionCommand(CacheAddr addr, u64 size) : addr{addr}, size{size} {}
+
+ CacheAddr addr;
+ u64 size;
+};
+
+/// Command to signal to the GPU thread to flush and invalidate a region
+struct FlushAndInvalidateRegionCommand final {
+ explicit constexpr FlushAndInvalidateRegionCommand(CacheAddr addr, u64 size)
+ : addr{addr}, size{size} {}
+
+ CacheAddr addr;
+ u64 size;
+};
+
+using CommandData =
+ std::variant<EndProcessingCommand, SubmitListCommand, SwapBuffersCommand, FlushRegionCommand,
+ InvalidateRegionCommand, FlushAndInvalidateRegionCommand>;
+
+struct CommandDataContainer {
+ CommandDataContainer() = default;
+
+ CommandDataContainer(CommandData&& data) : data{std::move(data)} {}
+
+ CommandDataContainer& operator=(const CommandDataContainer& t) {
+ data = std::move(t.data);
+ return *this;
+ }
+
+ CommandData data;
+};
+
+/// Struct used to synchronize the GPU thread
+struct SynchState final {
+ std::atomic_bool is_running{true};
+ std::atomic_int queued_frame_count{};
+ std::mutex frames_mutex;
+ std::mutex commands_mutex;
+ std::condition_variable commands_condition;
+ std::condition_variable frames_condition;
+
+ void IncrementFramesCounter() {
+ std::lock_guard<std::mutex> lock{frames_mutex};
+ ++queued_frame_count;
+ }
+
+ void DecrementFramesCounter() {
+ {
+ std::lock_guard<std::mutex> lock{frames_mutex};
+ --queued_frame_count;
+
+ if (queued_frame_count) {
+ return;
+ }
+ }
+ frames_condition.notify_one();
+ }
+
+ void WaitForFrames() {
+ {
+ std::lock_guard<std::mutex> lock{frames_mutex};
+ if (!queued_frame_count) {
+ return;
+ }
+ }
+
+ // Wait for the GPU to be idle (all commands to be executed)
+ {
+ std::unique_lock<std::mutex> lock{frames_mutex};
+ frames_condition.wait(lock, [this] { return !queued_frame_count; });
+ }
+ }
+
+ void SignalCommands() {
+ {
+ std::unique_lock<std::mutex> lock{commands_mutex};
+ if (queue.Empty()) {
+ return;
+ }
+ }
+
+ commands_condition.notify_one();
+ }
+
+ void WaitForCommands() {
+ std::unique_lock<std::mutex> lock{commands_mutex};
+ commands_condition.wait(lock, [this] { return !queue.Empty(); });
+ }
+
+ using CommandQueue = Common::SPSCQueue<CommandDataContainer>;
+ CommandQueue queue;
+};
+
+/// Class used to manage the GPU thread
+class ThreadManager final {
+public:
+ explicit ThreadManager(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher);
+ ~ThreadManager();
+
+ /// Push GPU command entries to be processed
+ void SubmitList(Tegra::CommandList&& entries);
+
+ /// Swap buffers (render frame)
+ void SwapBuffers(
+ std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer);
+
+ /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
+ void FlushRegion(CacheAddr addr, u64 size);
+
+ /// Notify rasterizer that any caches of the specified region should be invalidated
+ void InvalidateRegion(CacheAddr addr, u64 size);
+
+ /// Notify rasterizer that any caches of the specified region should be flushed and invalidated
+ void FlushAndInvalidateRegion(CacheAddr addr, u64 size);
+
+private:
+ /// Pushes a command to be executed by the GPU thread
+ void PushCommand(CommandData&& command_data);
+
+private:
+ SynchState state;
+ VideoCore::RendererBase& renderer;
+ Tegra::DmaPusher& dma_pusher;
+ std::thread thread;
+ std::thread::id thread_id;
+};
+
+} // namespace VideoCommon::GPUThread
diff --git a/src/video_core/morton.cpp b/src/video_core/morton.cpp
index b68f4fb13..9692ce143 100644
--- a/src/video_core/morton.cpp
+++ b/src/video_core/morton.cpp
@@ -16,12 +16,12 @@ namespace VideoCore {
using Surface::GetBytesPerPixel;
using Surface::PixelFormat;
-using MortonCopyFn = void (*)(u32, u32, u32, u32, u32, u32, u8*, std::size_t, VAddr);
+using MortonCopyFn = void (*)(u32, u32, u32, u32, u32, u32, u8*, VAddr);
using ConversionArray = std::array<MortonCopyFn, Surface::MaxPixelFormat>;
template <bool morton_to_linear, PixelFormat format>
static void MortonCopy(u32 stride, u32 block_height, u32 height, u32 block_depth, u32 depth,
- u32 tile_width_spacing, u8* buffer, std::size_t buffer_size, VAddr addr) {
+ u32 tile_width_spacing, u8* buffer, VAddr addr) {
constexpr u32 bytes_per_pixel = GetBytesPerPixel(format);
// With the BCn formats (DXT and DXN), each 4x4 tile is swizzled instead of just individual
@@ -42,142 +42,138 @@ static void MortonCopy(u32 stride, u32 block_height, u32 height, u32 block_depth
}
static constexpr ConversionArray morton_to_linear_fns = {
- // clang-format off
- MortonCopy<true, PixelFormat::ABGR8U>,
- MortonCopy<true, PixelFormat::ABGR8S>,
- MortonCopy<true, PixelFormat::ABGR8UI>,
- MortonCopy<true, PixelFormat::B5G6R5U>,
- MortonCopy<true, PixelFormat::A2B10G10R10U>,
- MortonCopy<true, PixelFormat::A1B5G5R5U>,
- MortonCopy<true, PixelFormat::R8U>,
- MortonCopy<true, PixelFormat::R8UI>,
- MortonCopy<true, PixelFormat::RGBA16F>,
- MortonCopy<true, PixelFormat::RGBA16U>,
- MortonCopy<true, PixelFormat::RGBA16UI>,
- MortonCopy<true, PixelFormat::R11FG11FB10F>,
- MortonCopy<true, PixelFormat::RGBA32UI>,
- MortonCopy<true, PixelFormat::DXT1>,
- MortonCopy<true, PixelFormat::DXT23>,
- MortonCopy<true, PixelFormat::DXT45>,
- MortonCopy<true, PixelFormat::DXN1>,
- MortonCopy<true, PixelFormat::DXN2UNORM>,
- MortonCopy<true, PixelFormat::DXN2SNORM>,
- MortonCopy<true, PixelFormat::BC7U>,
- MortonCopy<true, PixelFormat::BC6H_UF16>,
- MortonCopy<true, PixelFormat::BC6H_SF16>,
- MortonCopy<true, PixelFormat::ASTC_2D_4X4>,
- MortonCopy<true, PixelFormat::BGRA8>,
- MortonCopy<true, PixelFormat::RGBA32F>,
- MortonCopy<true, PixelFormat::RG32F>,
- MortonCopy<true, PixelFormat::R32F>,
- MortonCopy<true, PixelFormat::R16F>,
- MortonCopy<true, PixelFormat::R16U>,
- MortonCopy<true, PixelFormat::R16S>,
- MortonCopy<true, PixelFormat::R16UI>,
- MortonCopy<true, PixelFormat::R16I>,
- MortonCopy<true, PixelFormat::RG16>,
- MortonCopy<true, PixelFormat::RG16F>,
- MortonCopy<true, PixelFormat::RG16UI>,
- MortonCopy<true, PixelFormat::RG16I>,
- MortonCopy<true, PixelFormat::RG16S>,
- MortonCopy<true, PixelFormat::RGB32F>,
- MortonCopy<true, PixelFormat::RGBA8_SRGB>,
- MortonCopy<true, PixelFormat::RG8U>,
- MortonCopy<true, PixelFormat::RG8S>,
- MortonCopy<true, PixelFormat::RG32UI>,
- MortonCopy<true, PixelFormat::R32UI>,
- MortonCopy<true, PixelFormat::ASTC_2D_8X8>,
- MortonCopy<true, PixelFormat::ASTC_2D_8X5>,
- MortonCopy<true, PixelFormat::ASTC_2D_5X4>,
- MortonCopy<true, PixelFormat::BGRA8_SRGB>,
- MortonCopy<true, PixelFormat::DXT1_SRGB>,
- MortonCopy<true, PixelFormat::DXT23_SRGB>,
- MortonCopy<true, PixelFormat::DXT45_SRGB>,
- MortonCopy<true, PixelFormat::BC7U_SRGB>,
- MortonCopy<true, PixelFormat::ASTC_2D_4X4_SRGB>,
- MortonCopy<true, PixelFormat::ASTC_2D_8X8_SRGB>,
- MortonCopy<true, PixelFormat::ASTC_2D_8X5_SRGB>,
- MortonCopy<true, PixelFormat::ASTC_2D_5X4_SRGB>,
- MortonCopy<true, PixelFormat::ASTC_2D_5X5>,
- MortonCopy<true, PixelFormat::ASTC_2D_5X5_SRGB>,
- MortonCopy<true, PixelFormat::ASTC_2D_10X8>,
- MortonCopy<true, PixelFormat::ASTC_2D_10X8_SRGB>,
- MortonCopy<true, PixelFormat::Z32F>,
- MortonCopy<true, PixelFormat::Z16>,
- MortonCopy<true, PixelFormat::Z24S8>,
- MortonCopy<true, PixelFormat::S8Z24>,
- MortonCopy<true, PixelFormat::Z32FS8>,
- // clang-format on
+ MortonCopy<true, PixelFormat::ABGR8U>,
+ MortonCopy<true, PixelFormat::ABGR8S>,
+ MortonCopy<true, PixelFormat::ABGR8UI>,
+ MortonCopy<true, PixelFormat::B5G6R5U>,
+ MortonCopy<true, PixelFormat::A2B10G10R10U>,
+ MortonCopy<true, PixelFormat::A1B5G5R5U>,
+ MortonCopy<true, PixelFormat::R8U>,
+ MortonCopy<true, PixelFormat::R8UI>,
+ MortonCopy<true, PixelFormat::RGBA16F>,
+ MortonCopy<true, PixelFormat::RGBA16U>,
+ MortonCopy<true, PixelFormat::RGBA16UI>,
+ MortonCopy<true, PixelFormat::R11FG11FB10F>,
+ MortonCopy<true, PixelFormat::RGBA32UI>,
+ MortonCopy<true, PixelFormat::DXT1>,
+ MortonCopy<true, PixelFormat::DXT23>,
+ MortonCopy<true, PixelFormat::DXT45>,
+ MortonCopy<true, PixelFormat::DXN1>,
+ MortonCopy<true, PixelFormat::DXN2UNORM>,
+ MortonCopy<true, PixelFormat::DXN2SNORM>,
+ MortonCopy<true, PixelFormat::BC7U>,
+ MortonCopy<true, PixelFormat::BC6H_UF16>,
+ MortonCopy<true, PixelFormat::BC6H_SF16>,
+ MortonCopy<true, PixelFormat::ASTC_2D_4X4>,
+ MortonCopy<true, PixelFormat::BGRA8>,
+ MortonCopy<true, PixelFormat::RGBA32F>,
+ MortonCopy<true, PixelFormat::RG32F>,
+ MortonCopy<true, PixelFormat::R32F>,
+ MortonCopy<true, PixelFormat::R16F>,
+ MortonCopy<true, PixelFormat::R16U>,
+ MortonCopy<true, PixelFormat::R16S>,
+ MortonCopy<true, PixelFormat::R16UI>,
+ MortonCopy<true, PixelFormat::R16I>,
+ MortonCopy<true, PixelFormat::RG16>,
+ MortonCopy<true, PixelFormat::RG16F>,
+ MortonCopy<true, PixelFormat::RG16UI>,
+ MortonCopy<true, PixelFormat::RG16I>,
+ MortonCopy<true, PixelFormat::RG16S>,
+ MortonCopy<true, PixelFormat::RGB32F>,
+ MortonCopy<true, PixelFormat::RGBA8_SRGB>,
+ MortonCopy<true, PixelFormat::RG8U>,
+ MortonCopy<true, PixelFormat::RG8S>,
+ MortonCopy<true, PixelFormat::RG32UI>,
+ MortonCopy<true, PixelFormat::R32UI>,
+ MortonCopy<true, PixelFormat::ASTC_2D_8X8>,
+ MortonCopy<true, PixelFormat::ASTC_2D_8X5>,
+ MortonCopy<true, PixelFormat::ASTC_2D_5X4>,
+ MortonCopy<true, PixelFormat::BGRA8_SRGB>,
+ MortonCopy<true, PixelFormat::DXT1_SRGB>,
+ MortonCopy<true, PixelFormat::DXT23_SRGB>,
+ MortonCopy<true, PixelFormat::DXT45_SRGB>,
+ MortonCopy<true, PixelFormat::BC7U_SRGB>,
+ MortonCopy<true, PixelFormat::ASTC_2D_4X4_SRGB>,
+ MortonCopy<true, PixelFormat::ASTC_2D_8X8_SRGB>,
+ MortonCopy<true, PixelFormat::ASTC_2D_8X5_SRGB>,
+ MortonCopy<true, PixelFormat::ASTC_2D_5X4_SRGB>,
+ MortonCopy<true, PixelFormat::ASTC_2D_5X5>,
+ MortonCopy<true, PixelFormat::ASTC_2D_5X5_SRGB>,
+ MortonCopy<true, PixelFormat::ASTC_2D_10X8>,
+ MortonCopy<true, PixelFormat::ASTC_2D_10X8_SRGB>,
+ MortonCopy<true, PixelFormat::Z32F>,
+ MortonCopy<true, PixelFormat::Z16>,
+ MortonCopy<true, PixelFormat::Z24S8>,
+ MortonCopy<true, PixelFormat::S8Z24>,
+ MortonCopy<true, PixelFormat::Z32FS8>,
};
static constexpr ConversionArray linear_to_morton_fns = {
- // clang-format off
- MortonCopy<false, PixelFormat::ABGR8U>,
- MortonCopy<false, PixelFormat::ABGR8S>,
- MortonCopy<false, PixelFormat::ABGR8UI>,
- MortonCopy<false, PixelFormat::B5G6R5U>,
- MortonCopy<false, PixelFormat::A2B10G10R10U>,
- MortonCopy<false, PixelFormat::A1B5G5R5U>,
- MortonCopy<false, PixelFormat::R8U>,
- MortonCopy<false, PixelFormat::R8UI>,
- MortonCopy<false, PixelFormat::RGBA16F>,
- MortonCopy<false, PixelFormat::RGBA16U>,
- MortonCopy<false, PixelFormat::RGBA16UI>,
- MortonCopy<false, PixelFormat::R11FG11FB10F>,
- MortonCopy<false, PixelFormat::RGBA32UI>,
- MortonCopy<false, PixelFormat::DXT1>,
- MortonCopy<false, PixelFormat::DXT23>,
- MortonCopy<false, PixelFormat::DXT45>,
- MortonCopy<false, PixelFormat::DXN1>,
- MortonCopy<false, PixelFormat::DXN2UNORM>,
- MortonCopy<false, PixelFormat::DXN2SNORM>,
- MortonCopy<false, PixelFormat::BC7U>,
- MortonCopy<false, PixelFormat::BC6H_UF16>,
- MortonCopy<false, PixelFormat::BC6H_SF16>,
- // TODO(Subv): Swizzling ASTC formats are not supported
- nullptr,
- MortonCopy<false, PixelFormat::BGRA8>,
- MortonCopy<false, PixelFormat::RGBA32F>,
- MortonCopy<false, PixelFormat::RG32F>,
- MortonCopy<false, PixelFormat::R32F>,
- MortonCopy<false, PixelFormat::R16F>,
- MortonCopy<false, PixelFormat::R16U>,
- MortonCopy<false, PixelFormat::R16S>,
- MortonCopy<false, PixelFormat::R16UI>,
- MortonCopy<false, PixelFormat::R16I>,
- MortonCopy<false, PixelFormat::RG16>,
- MortonCopy<false, PixelFormat::RG16F>,
- MortonCopy<false, PixelFormat::RG16UI>,
- MortonCopy<false, PixelFormat::RG16I>,
- MortonCopy<false, PixelFormat::RG16S>,
- MortonCopy<false, PixelFormat::RGB32F>,
- MortonCopy<false, PixelFormat::RGBA8_SRGB>,
- MortonCopy<false, PixelFormat::RG8U>,
- MortonCopy<false, PixelFormat::RG8S>,
- MortonCopy<false, PixelFormat::RG32UI>,
- MortonCopy<false, PixelFormat::R32UI>,
- nullptr,
- nullptr,
- nullptr,
- MortonCopy<false, PixelFormat::BGRA8_SRGB>,
- MortonCopy<false, PixelFormat::DXT1_SRGB>,
- MortonCopy<false, PixelFormat::DXT23_SRGB>,
- MortonCopy<false, PixelFormat::DXT45_SRGB>,
- MortonCopy<false, PixelFormat::BC7U_SRGB>,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- nullptr,
- MortonCopy<false, PixelFormat::Z32F>,
- MortonCopy<false, PixelFormat::Z16>,
- MortonCopy<false, PixelFormat::Z24S8>,
- MortonCopy<false, PixelFormat::S8Z24>,
- MortonCopy<false, PixelFormat::Z32FS8>,
- // clang-format on
+ MortonCopy<false, PixelFormat::ABGR8U>,
+ MortonCopy<false, PixelFormat::ABGR8S>,
+ MortonCopy<false, PixelFormat::ABGR8UI>,
+ MortonCopy<false, PixelFormat::B5G6R5U>,
+ MortonCopy<false, PixelFormat::A2B10G10R10U>,
+ MortonCopy<false, PixelFormat::A1B5G5R5U>,
+ MortonCopy<false, PixelFormat::R8U>,
+ MortonCopy<false, PixelFormat::R8UI>,
+ MortonCopy<false, PixelFormat::RGBA16F>,
+ MortonCopy<false, PixelFormat::RGBA16U>,
+ MortonCopy<false, PixelFormat::RGBA16UI>,
+ MortonCopy<false, PixelFormat::R11FG11FB10F>,
+ MortonCopy<false, PixelFormat::RGBA32UI>,
+ MortonCopy<false, PixelFormat::DXT1>,
+ MortonCopy<false, PixelFormat::DXT23>,
+ MortonCopy<false, PixelFormat::DXT45>,
+ MortonCopy<false, PixelFormat::DXN1>,
+ MortonCopy<false, PixelFormat::DXN2UNORM>,
+ MortonCopy<false, PixelFormat::DXN2SNORM>,
+ MortonCopy<false, PixelFormat::BC7U>,
+ MortonCopy<false, PixelFormat::BC6H_UF16>,
+ MortonCopy<false, PixelFormat::BC6H_SF16>,
+ // TODO(Subv): Swizzling ASTC formats are not supported
+ nullptr,
+ MortonCopy<false, PixelFormat::BGRA8>,
+ MortonCopy<false, PixelFormat::RGBA32F>,
+ MortonCopy<false, PixelFormat::RG32F>,
+ MortonCopy<false, PixelFormat::R32F>,
+ MortonCopy<false, PixelFormat::R16F>,
+ MortonCopy<false, PixelFormat::R16U>,
+ MortonCopy<false, PixelFormat::R16S>,
+ MortonCopy<false, PixelFormat::R16UI>,
+ MortonCopy<false, PixelFormat::R16I>,
+ MortonCopy<false, PixelFormat::RG16>,
+ MortonCopy<false, PixelFormat::RG16F>,
+ MortonCopy<false, PixelFormat::RG16UI>,
+ MortonCopy<false, PixelFormat::RG16I>,
+ MortonCopy<false, PixelFormat::RG16S>,
+ MortonCopy<false, PixelFormat::RGB32F>,
+ MortonCopy<false, PixelFormat::RGBA8_SRGB>,
+ MortonCopy<false, PixelFormat::RG8U>,
+ MortonCopy<false, PixelFormat::RG8S>,
+ MortonCopy<false, PixelFormat::RG32UI>,
+ MortonCopy<false, PixelFormat::R32UI>,
+ nullptr,
+ nullptr,
+ nullptr,
+ MortonCopy<false, PixelFormat::BGRA8_SRGB>,
+ MortonCopy<false, PixelFormat::DXT1_SRGB>,
+ MortonCopy<false, PixelFormat::DXT23_SRGB>,
+ MortonCopy<false, PixelFormat::DXT45_SRGB>,
+ MortonCopy<false, PixelFormat::BC7U_SRGB>,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ MortonCopy<false, PixelFormat::Z32F>,
+ MortonCopy<false, PixelFormat::Z16>,
+ MortonCopy<false, PixelFormat::Z24S8>,
+ MortonCopy<false, PixelFormat::S8Z24>,
+ MortonCopy<false, PixelFormat::Z32FS8>,
};
static MortonCopyFn GetSwizzleFunction(MortonSwizzleMode mode, Surface::PixelFormat format) {
@@ -191,45 +187,6 @@ static MortonCopyFn GetSwizzleFunction(MortonSwizzleMode mode, Surface::PixelFor
return morton_to_linear_fns[static_cast<std::size_t>(format)];
}
-/// 8x8 Z-Order coordinate from 2D coordinates
-static u32 MortonInterleave(u32 x, u32 y) {
- static const u32 xlut[] = {0x00, 0x01, 0x04, 0x05, 0x10, 0x11, 0x14, 0x15};
- static const u32 ylut[] = {0x00, 0x02, 0x08, 0x0a, 0x20, 0x22, 0x28, 0x2a};
- return xlut[x % 8] + ylut[y % 8];
-}
-
-/// Calculates the offset of the position of the pixel in Morton order
-static u32 GetMortonOffset(u32 x, u32 y, u32 bytes_per_pixel) {
- // Images are split into 8x8 tiles. Each tile is composed of four 4x4 subtiles each
- // of which is composed of four 2x2 subtiles each of which is composed of four texels.
- // Each structure is embedded into the next-bigger one in a diagonal pattern, e.g.
- // texels are laid out in a 2x2 subtile like this:
- // 2 3
- // 0 1
- //
- // The full 8x8 tile has the texels arranged like this:
- //
- // 42 43 46 47 58 59 62 63
- // 40 41 44 45 56 57 60 61
- // 34 35 38 39 50 51 54 55
- // 32 33 36 37 48 49 52 53
- // 10 11 14 15 26 27 30 31
- // 08 09 12 13 24 25 28 29
- // 02 03 06 07 18 19 22 23
- // 00 01 04 05 16 17 20 21
- //
- // This pattern is what's called Z-order curve, or Morton order.
-
- const unsigned int block_height = 8;
- const unsigned int coarse_x = x & ~7;
-
- u32 i = MortonInterleave(x, y);
-
- const unsigned int offset = coarse_x * block_height;
-
- return (i + offset) * bytes_per_pixel;
-}
-
static u32 MortonInterleave128(u32 x, u32 y) {
// 128x128 Z-Order coordinate from 2D coordinates
static constexpr u32 xlut[] = {
@@ -325,14 +282,14 @@ static u32 GetMortonOffset128(u32 x, u32 y, u32 bytes_per_pixel) {
void MortonSwizzle(MortonSwizzleMode mode, Surface::PixelFormat format, u32 stride,
u32 block_height, u32 height, u32 block_depth, u32 depth, u32 tile_width_spacing,
- u8* buffer, std::size_t buffer_size, VAddr addr) {
-
+ u8* buffer, VAddr addr) {
GetSwizzleFunction(mode, format)(stride, block_height, height, block_depth, depth,
- tile_width_spacing, buffer, buffer_size, addr);
+ tile_width_spacing, buffer, addr);
}
-void MortonCopyPixels128(u32 width, u32 height, u32 bytes_per_pixel, u32 linear_bytes_per_pixel,
- u8* morton_data, u8* linear_data, bool morton_to_linear) {
+void MortonCopyPixels128(MortonSwizzleMode mode, u32 width, u32 height, u32 bytes_per_pixel,
+ u32 linear_bytes_per_pixel, u8* morton_data, u8* linear_data) {
+ const bool morton_to_linear = mode == MortonSwizzleMode::MortonToLinear;
u8* data_ptrs[2];
for (u32 y = 0; y < height; ++y) {
for (u32 x = 0; x < width; ++x) {
diff --git a/src/video_core/morton.h b/src/video_core/morton.h
index 065f59ce3..b565204b5 100644
--- a/src/video_core/morton.h
+++ b/src/video_core/morton.h
@@ -13,9 +13,9 @@ enum class MortonSwizzleMode { MortonToLinear, LinearToMorton };
void MortonSwizzle(MortonSwizzleMode mode, VideoCore::Surface::PixelFormat format, u32 stride,
u32 block_height, u32 height, u32 block_depth, u32 depth, u32 tile_width_spacing,
- u8* buffer, std::size_t buffer_size, VAddr addr);
+ u8* buffer, VAddr addr);
-void MortonCopyPixels128(u32 width, u32 height, u32 bytes_per_pixel, u32 linear_bytes_per_pixel,
- u8* morton_data, u8* linear_data, bool morton_to_linear);
+void MortonCopyPixels128(MortonSwizzleMode mode, u32 width, u32 height, u32 bytes_per_pixel,
+ u32 linear_bytes_per_pixel, u8* morton_data, u8* linear_data);
} // namespace VideoCore
diff --git a/src/video_core/rasterizer_cache.h b/src/video_core/rasterizer_cache.h
index bcf0c15a4..ecd9986a0 100644
--- a/src/video_core/rasterizer_cache.h
+++ b/src/video_core/rasterizer_cache.h
@@ -4,6 +4,7 @@
#pragma once
+#include <mutex>
#include <set>
#include <unordered_map>
@@ -12,14 +13,26 @@
#include "common/common_types.h"
#include "core/settings.h"
+#include "video_core/gpu.h"
#include "video_core/rasterizer_interface.h"
class RasterizerCacheObject {
public:
+ explicit RasterizerCacheObject(const u8* host_ptr)
+ : host_ptr{host_ptr}, cache_addr{ToCacheAddr(host_ptr)} {}
+
virtual ~RasterizerCacheObject();
+ CacheAddr GetCacheAddr() const {
+ return cache_addr;
+ }
+
+ const u8* GetHostPtr() const {
+ return host_ptr;
+ }
+
/// Gets the address of the shader in guest memory, required for cache management
- virtual VAddr GetAddr() const = 0;
+ virtual VAddr GetCpuAddr() const = 0;
/// Gets the size of the shader in guest memory, required for cache management
virtual std::size_t GetSizeInBytes() const = 0;
@@ -58,6 +71,8 @@ private:
bool is_registered{}; ///< Whether the object is currently registered with the cache
bool is_dirty{}; ///< Whether the object is dirty (out of sync with guest memory)
u64 last_modified_ticks{}; ///< When the object was last modified, used for in-order flushing
+ CacheAddr cache_addr{}; ///< Cache address memory, unique from emulated virtual address space
+ const u8* host_ptr{}; ///< Pointer to the memory backing this cached region
};
template <class T>
@@ -68,7 +83,9 @@ public:
explicit RasterizerCache(VideoCore::RasterizerInterface& rasterizer) : rasterizer{rasterizer} {}
/// Write any cached resources overlapping the specified region back to memory
- void FlushRegion(Tegra::GPUVAddr addr, size_t size) {
+ void FlushRegion(CacheAddr addr, std::size_t size) {
+ std::lock_guard<std::recursive_mutex> lock{mutex};
+
const auto& objects{GetSortedObjectsFromRegion(addr, size)};
for (auto& object : objects) {
FlushObject(object);
@@ -76,7 +93,9 @@ public:
}
/// Mark the specified region as being invalidated
- void InvalidateRegion(VAddr addr, u64 size) {
+ void InvalidateRegion(CacheAddr addr, u64 size) {
+ std::lock_guard<std::recursive_mutex> lock{mutex};
+
const auto& objects{GetSortedObjectsFromRegion(addr, size)};
for (auto& object : objects) {
if (!object->IsRegistered()) {
@@ -89,49 +108,70 @@ public:
/// Invalidates everything in the cache
void InvalidateAll() {
+ std::lock_guard<std::recursive_mutex> lock{mutex};
+
while (interval_cache.begin() != interval_cache.end()) {
Unregister(*interval_cache.begin()->second.begin());
}
}
protected:
- /// Tries to get an object from the cache with the specified address
- T TryGet(VAddr addr) const {
+ /// Tries to get an object from the cache with the specified cache address
+ T TryGet(CacheAddr addr) const {
const auto iter = map_cache.find(addr);
if (iter != map_cache.end())
return iter->second;
return nullptr;
}
+ T TryGet(const void* addr) const {
+ const auto iter = map_cache.find(ToCacheAddr(addr));
+ if (iter != map_cache.end())
+ return iter->second;
+ return nullptr;
+ }
+
/// Register an object into the cache
void Register(const T& object) {
+ std::lock_guard<std::recursive_mutex> lock{mutex};
+
object->SetIsRegistered(true);
interval_cache.add({GetInterval(object), ObjectSet{object}});
- map_cache.insert({object->GetAddr(), object});
- rasterizer.UpdatePagesCachedCount(object->GetAddr(), object->GetSizeInBytes(), 1);
+ map_cache.insert({object->GetCacheAddr(), object});
+ rasterizer.UpdatePagesCachedCount(object->GetCpuAddr(), object->GetSizeInBytes(), 1);
}
/// Unregisters an object from the cache
void Unregister(const T& object) {
- object->SetIsRegistered(false);
- rasterizer.UpdatePagesCachedCount(object->GetAddr(), object->GetSizeInBytes(), -1);
- // Only flush if use_accurate_gpu_emulation is enabled, as it incurs a performance hit
- if (Settings::values.use_accurate_gpu_emulation) {
- FlushObject(object);
- }
+ std::lock_guard<std::recursive_mutex> lock{mutex};
+ object->SetIsRegistered(false);
+ rasterizer.UpdatePagesCachedCount(object->GetCpuAddr(), object->GetSizeInBytes(), -1);
interval_cache.subtract({GetInterval(object), ObjectSet{object}});
- map_cache.erase(object->GetAddr());
+ map_cache.erase(object->GetCacheAddr());
}
/// Returns a ticks counter used for tracking when cached objects were last modified
u64 GetModifiedTicks() {
+ std::lock_guard<std::recursive_mutex> lock{mutex};
+
return ++modified_ticks;
}
+ /// Flushes the specified object, updating appropriate cache state as needed
+ void FlushObject(const T& object) {
+ std::lock_guard<std::recursive_mutex> lock{mutex};
+
+ if (!object->IsDirty()) {
+ return;
+ }
+ object->Flush();
+ object->MarkAsModified(false, *this);
+ }
+
private:
/// Returns a list of cached objects from the specified memory region, ordered by access time
- std::vector<T> GetSortedObjectsFromRegion(VAddr addr, u64 size) {
+ std::vector<T> GetSortedObjectsFromRegion(CacheAddr addr, u64 size) {
if (size == 0) {
return {};
}
@@ -154,27 +194,19 @@ private:
return objects;
}
- /// Flushes the specified object, updating appropriate cache state as needed
- void FlushObject(const T& object) {
- if (!object->IsDirty()) {
- return;
- }
- object->Flush();
- object->MarkAsModified(false, *this);
- }
-
using ObjectSet = std::set<T>;
- using ObjectCache = std::unordered_map<VAddr, T>;
- using IntervalCache = boost::icl::interval_map<VAddr, ObjectSet>;
+ using ObjectCache = std::unordered_map<CacheAddr, T>;
+ using IntervalCache = boost::icl::interval_map<CacheAddr, ObjectSet>;
using ObjectInterval = typename IntervalCache::interval_type;
static auto GetInterval(const T& object) {
- return ObjectInterval::right_open(object->GetAddr(),
- object->GetAddr() + object->GetSizeInBytes());
+ return ObjectInterval::right_open(object->GetCacheAddr(),
+ object->GetCacheAddr() + object->GetSizeInBytes());
}
ObjectCache map_cache;
IntervalCache interval_cache; ///< Cache of objects
u64 modified_ticks{}; ///< Counter of cache state ticks, used for in-order flushing
VideoCore::RasterizerInterface& rasterizer;
+ std::recursive_mutex mutex;
};
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index b2a223705..76e292e87 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -35,20 +35,20 @@ public:
virtual void FlushAll() = 0;
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
- virtual void FlushRegion(VAddr addr, u64 size) = 0;
+ virtual void FlushRegion(CacheAddr addr, u64 size) = 0;
/// Notify rasterizer that any caches of the specified region should be invalidated
- virtual void InvalidateRegion(VAddr addr, u64 size) = 0;
+ virtual void InvalidateRegion(CacheAddr addr, u64 size) = 0;
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
/// and invalidated
- virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0;
+ virtual void FlushAndInvalidateRegion(CacheAddr addr, u64 size) = 0;
/// Attempt to use a faster method to perform a surface copy
virtual bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
const Tegra::Engines::Fermi2D::Regs::Surface& dst,
- const MathUtil::Rectangle<u32>& src_rect,
- const MathUtil::Rectangle<u32>& dst_rect) {
+ const Common::Rectangle<u32>& src_rect,
+ const Common::Rectangle<u32>& dst_rect) {
return false;
}
@@ -63,7 +63,7 @@ public:
}
/// Increase/decrease the number of object in pages touching the specified region
- virtual void UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta) {}
+ virtual void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {}
/// Initialize disk cached resources for the game being emulated
virtual void LoadDiskResources(const std::atomic_bool& stop_loading = false,
diff --git a/src/video_core/renderer_base.cpp b/src/video_core/renderer_base.cpp
index 94223f45f..919d1f2d4 100644
--- a/src/video_core/renderer_base.cpp
+++ b/src/video_core/renderer_base.cpp
@@ -2,6 +2,7 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include "common/logging/log.h"
#include "core/frontend/emu_window.h"
#include "core/settings.h"
#include "video_core/renderer_base.h"
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
index b3062e5ba..a4eea61a6 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
@@ -13,6 +13,11 @@
namespace OpenGL {
+CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, GLintptr offset,
+ std::size_t alignment, u8* host_ptr)
+ : cpu_addr{cpu_addr}, size{size}, offset{offset}, alignment{alignment}, RasterizerCacheObject{
+ host_ptr} {}
+
OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, std::size_t size)
: RasterizerCache{rasterizer}, stream_buffer(size, true) {}
@@ -26,11 +31,12 @@ GLintptr OGLBufferCache::UploadMemory(Tegra::GPUVAddr gpu_addr, std::size_t size
// TODO: Figure out which size is the best for given games.
cache &= size >= 2048;
+ const auto& host_ptr{Memory::GetPointer(*cpu_addr)};
if (cache) {
- auto entry = TryGet(*cpu_addr);
+ auto entry = TryGet(host_ptr);
if (entry) {
- if (entry->size >= size && entry->alignment == alignment) {
- return entry->offset;
+ if (entry->GetSize() >= size && entry->GetAlignment() == alignment) {
+ return entry->GetOffset();
}
Unregister(entry);
}
@@ -39,17 +45,17 @@ GLintptr OGLBufferCache::UploadMemory(Tegra::GPUVAddr gpu_addr, std::size_t size
AlignBuffer(alignment);
const GLintptr uploaded_offset = buffer_offset;
- Memory::ReadBlock(*cpu_addr, buffer_ptr, size);
+ if (!host_ptr) {
+ return uploaded_offset;
+ }
+ std::memcpy(buffer_ptr, host_ptr, size);
buffer_ptr += size;
buffer_offset += size;
if (cache) {
- auto entry = std::make_shared<CachedBufferEntry>();
- entry->offset = uploaded_offset;
- entry->size = size;
- entry->alignment = alignment;
- entry->addr = *cpu_addr;
+ auto entry = std::make_shared<CachedBufferEntry>(*cpu_addr, size, uploaded_offset,
+ alignment, host_ptr);
Register(entry);
}
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h
index c11acfb79..1de1f84ae 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.h
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.h
@@ -17,22 +17,39 @@ namespace OpenGL {
class RasterizerOpenGL;
-struct CachedBufferEntry final : public RasterizerCacheObject {
- VAddr GetAddr() const override {
- return addr;
+class CachedBufferEntry final : public RasterizerCacheObject {
+public:
+ explicit CachedBufferEntry(VAddr cpu_addr, std::size_t size, GLintptr offset,
+ std::size_t alignment, u8* host_ptr);
+
+ VAddr GetCpuAddr() const override {
+ return cpu_addr;
}
std::size_t GetSizeInBytes() const override {
return size;
}
+ std::size_t GetSize() const {
+ return size;
+ }
+
+ GLintptr GetOffset() const {
+ return offset;
+ }
+
+ std::size_t GetAlignment() const {
+ return alignment;
+ }
+
// We do not have to flush this cache as things in it are never modified by us.
void Flush() override {}
- VAddr addr;
- std::size_t size;
- GLintptr offset;
- std::size_t alignment;
+private:
+ VAddr cpu_addr{};
+ std::size_t size{};
+ GLintptr offset{};
+ std::size_t alignment{};
};
class OGLBufferCache final : public RasterizerCache<std::shared_ptr<CachedBufferEntry>> {
diff --git a/src/video_core/renderer_opengl/gl_global_cache.cpp b/src/video_core/renderer_opengl/gl_global_cache.cpp
index c7f32feaa..a2c509c24 100644
--- a/src/video_core/renderer_opengl/gl_global_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_global_cache.cpp
@@ -15,12 +15,13 @@
namespace OpenGL {
-CachedGlobalRegion::CachedGlobalRegion(VAddr addr, u32 size) : addr{addr}, size{size} {
+CachedGlobalRegion::CachedGlobalRegion(VAddr cpu_addr, u32 size, u8* host_ptr)
+ : cpu_addr{cpu_addr}, size{size}, RasterizerCacheObject{host_ptr} {
buffer.Create();
// Bind and unbind the buffer so it gets allocated by the driver
glBindBuffer(GL_SHADER_STORAGE_BUFFER, buffer.handle);
glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
- LabelGLObject(GL_BUFFER, buffer.handle, addr, "GlobalMemory");
+ LabelGLObject(GL_BUFFER, buffer.handle, cpu_addr, "GlobalMemory");
}
void CachedGlobalRegion::Reload(u32 size_) {
@@ -35,7 +36,7 @@ void CachedGlobalRegion::Reload(u32 size_) {
// TODO(Rodrigo): Get rid of Memory::GetPointer with a staging buffer
glBindBuffer(GL_SHADER_STORAGE_BUFFER, buffer.handle);
- glBufferData(GL_SHADER_STORAGE_BUFFER, size, Memory::GetPointer(addr), GL_DYNAMIC_DRAW);
+ glBufferData(GL_SHADER_STORAGE_BUFFER, size, GetHostPtr(), GL_DYNAMIC_DRAW);
}
GlobalRegion GlobalRegionCacheOpenGL::TryGetReservedGlobalRegion(VAddr addr, u32 size) const {
@@ -46,19 +47,19 @@ GlobalRegion GlobalRegionCacheOpenGL::TryGetReservedGlobalRegion(VAddr addr, u32
return search->second;
}
-GlobalRegion GlobalRegionCacheOpenGL::GetUncachedGlobalRegion(VAddr addr, u32 size) {
+GlobalRegion GlobalRegionCacheOpenGL::GetUncachedGlobalRegion(VAddr addr, u32 size, u8* host_ptr) {
GlobalRegion region{TryGetReservedGlobalRegion(addr, size)};
if (!region) {
// No reserved surface available, create a new one and reserve it
- region = std::make_shared<CachedGlobalRegion>(addr, size);
+ region = std::make_shared<CachedGlobalRegion>(addr, size, host_ptr);
ReserveGlobalRegion(region);
}
region->Reload(size);
return region;
}
-void GlobalRegionCacheOpenGL::ReserveGlobalRegion(const GlobalRegion& region) {
- reserve[region->GetAddr()] = region;
+void GlobalRegionCacheOpenGL::ReserveGlobalRegion(GlobalRegion region) {
+ reserve.insert_or_assign(region->GetCpuAddr(), std::move(region));
}
GlobalRegionCacheOpenGL::GlobalRegionCacheOpenGL(RasterizerOpenGL& rasterizer)
@@ -80,11 +81,12 @@ GlobalRegion GlobalRegionCacheOpenGL::GetGlobalRegion(
ASSERT(actual_addr);
// Look up global region in the cache based on address
- GlobalRegion region = TryGet(*actual_addr);
+ const auto& host_ptr{Memory::GetPointer(*actual_addr)};
+ GlobalRegion region{TryGet(host_ptr)};
if (!region) {
// No global region found - create a new one
- region = GetUncachedGlobalRegion(*actual_addr, size);
+ region = GetUncachedGlobalRegion(*actual_addr, size, host_ptr);
Register(region);
}
diff --git a/src/video_core/renderer_opengl/gl_global_cache.h b/src/video_core/renderer_opengl/gl_global_cache.h
index 37830bb7c..e497a0619 100644
--- a/src/video_core/renderer_opengl/gl_global_cache.h
+++ b/src/video_core/renderer_opengl/gl_global_cache.h
@@ -27,15 +27,13 @@ using GlobalRegion = std::shared_ptr<CachedGlobalRegion>;
class CachedGlobalRegion final : public RasterizerCacheObject {
public:
- explicit CachedGlobalRegion(VAddr addr, u32 size);
+ explicit CachedGlobalRegion(VAddr cpu_addr, u32 size, u8* host_ptr);
- /// Gets the address of the shader in guest memory, required for cache management
- VAddr GetAddr() const {
- return addr;
+ VAddr GetCpuAddr() const override {
+ return cpu_addr;
}
- /// Gets the size of the shader in guest memory, required for cache management
- std::size_t GetSizeInBytes() const {
+ std::size_t GetSizeInBytes() const override {
return size;
}
@@ -53,9 +51,8 @@ public:
}
private:
- VAddr addr{};
+ VAddr cpu_addr{};
u32 size{};
-
OGLBuffer buffer;
};
@@ -69,8 +66,8 @@ public:
private:
GlobalRegion TryGetReservedGlobalRegion(VAddr addr, u32 size) const;
- GlobalRegion GetUncachedGlobalRegion(VAddr addr, u32 size);
- void ReserveGlobalRegion(const GlobalRegion& region);
+ GlobalRegion GetUncachedGlobalRegion(VAddr addr, u32 size, u8* host_ptr);
+ void ReserveGlobalRegion(GlobalRegion region);
std::unordered_map<VAddr, GlobalRegion> reserve;
};
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 12d876120..bb6de5477 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -102,8 +102,9 @@ struct FramebufferCacheKey {
RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& window, Core::System& system,
ScreenInfo& info)
- : res_cache{*this}, shader_cache{*this, system}, emu_window{window}, screen_info{info},
- buffer_cache(*this, STREAM_BUFFER_SIZE), global_cache{*this} {
+ : res_cache{*this}, shader_cache{*this, system}, global_cache{*this},
+ emu_window{window}, system{system}, screen_info{info},
+ buffer_cache(*this, STREAM_BUFFER_SIZE) {
// Create sampler objects
for (std::size_t i = 0; i < texture_samplers.size(); ++i) {
texture_samplers[i].Create();
@@ -118,7 +119,7 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& window, Core::Syst
glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, &uniform_buffer_alignment);
- LOG_CRITICAL(Render_OpenGL, "Sync fixed function OpenGL state here!");
+ LOG_DEBUG(Render_OpenGL, "Sync fixed function OpenGL state here");
CheckExtensions();
}
@@ -138,7 +139,7 @@ void RasterizerOpenGL::CheckExtensions() {
}
GLuint RasterizerOpenGL::SetupVertexFormat() {
- auto& gpu = Core::System::GetInstance().GPU().Maxwell3D();
+ auto& gpu = system.GPU().Maxwell3D();
const auto& regs = gpu.regs;
if (!gpu.dirty_flags.vertex_attrib_format) {
@@ -177,7 +178,7 @@ GLuint RasterizerOpenGL::SetupVertexFormat() {
continue;
const auto& buffer = regs.vertex_array[attrib.buffer];
- LOG_TRACE(HW_GPU,
+ LOG_TRACE(Render_OpenGL,
"vertex attrib {}, count={}, size={}, type={}, offset={}, normalize={}",
index, attrib.ComponentCount(), attrib.SizeString(), attrib.TypeString(),
attrib.offset.Value(), attrib.IsNormalized());
@@ -200,24 +201,24 @@ GLuint RasterizerOpenGL::SetupVertexFormat() {
}
// Rebinding the VAO invalidates the vertex buffer bindings.
- gpu.dirty_flags.vertex_array = 0xFFFFFFFF;
+ gpu.dirty_flags.vertex_array.set();
state.draw.vertex_array = vao_entry.handle;
return vao_entry.handle;
}
void RasterizerOpenGL::SetupVertexBuffer(GLuint vao) {
- auto& gpu = Core::System::GetInstance().GPU().Maxwell3D();
+ auto& gpu = system.GPU().Maxwell3D();
const auto& regs = gpu.regs;
- if (!gpu.dirty_flags.vertex_array)
+ if (gpu.dirty_flags.vertex_array.none())
return;
MICROPROFILE_SCOPE(OpenGL_VB);
// Upload all guest vertex arrays sequentially to our buffer
for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) {
- if (~gpu.dirty_flags.vertex_array & (1u << index))
+ if (!gpu.dirty_flags.vertex_array[index])
continue;
const auto& vertex_array = regs.vertex_array[index];
@@ -244,11 +245,11 @@ void RasterizerOpenGL::SetupVertexBuffer(GLuint vao) {
}
}
- gpu.dirty_flags.vertex_array = 0;
+ gpu.dirty_flags.vertex_array.reset();
}
DrawParameters RasterizerOpenGL::SetupDraw() {
- const auto& gpu = Core::System::GetInstance().GPU().Maxwell3D();
+ const auto& gpu = system.GPU().Maxwell3D();
const auto& regs = gpu.regs;
const bool is_indexed = accelerate_draw == AccelDraw::Indexed;
@@ -297,7 +298,7 @@ DrawParameters RasterizerOpenGL::SetupDraw() {
void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) {
MICROPROFILE_SCOPE(OpenGL_Shader);
- auto& gpu = Core::System::GetInstance().GPU().Maxwell3D();
+ auto& gpu = system.GPU().Maxwell3D();
BaseBindings base_bindings;
std::array<bool, Maxwell::NumClipDistances> clip_distances{};
@@ -343,9 +344,8 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) {
shader_program_manager->UseProgrammableFragmentShader(program_handle);
break;
default:
- LOG_CRITICAL(HW_GPU, "Unimplemented shader index={}, enable={}, offset=0x{:08X}", index,
- shader_config.enable.Value(), shader_config.offset);
- UNREACHABLE();
+ UNIMPLEMENTED_MSG("Unimplemented shader index={}, enable={}, offset=0x{:08X}", index,
+ shader_config.enable.Value(), shader_config.offset);
}
const auto stage_enum = static_cast<Maxwell::ShaderStage>(stage);
@@ -414,7 +414,7 @@ void RasterizerOpenGL::SetupCachedFramebuffer(const FramebufferCacheKey& fbkey,
}
std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const {
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ const auto& regs = system.GPU().Maxwell3D().regs;
std::size_t size = 0;
for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) {
@@ -432,7 +432,7 @@ std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const {
}
std::size_t RasterizerOpenGL::CalculateIndexBufferSize() const {
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ const auto& regs = system.GPU().Maxwell3D().regs;
return static_cast<std::size_t>(regs.index_array.count) *
static_cast<std::size_t>(regs.index_array.FormatSizeInBytes());
@@ -449,7 +449,7 @@ static constexpr auto RangeFromInterval(Map& map, const Interval& interval) {
return boost::make_iterator_range(map.equal_range(interval));
}
-void RasterizerOpenGL::UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta) {
+void RasterizerOpenGL::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
const u64 page_start{addr >> Memory::PAGE_BITS};
const u64 page_end{(addr + size + Memory::PAGE_SIZE - 1) >> Memory::PAGE_BITS};
@@ -488,13 +488,13 @@ std::pair<bool, bool> RasterizerOpenGL::ConfigureFramebuffers(
OpenGLState& current_state, bool using_color_fb, bool using_depth_fb, bool preserve_contents,
std::optional<std::size_t> single_color_target) {
MICROPROFILE_SCOPE(OpenGL_Framebuffer);
- const auto& gpu = Core::System::GetInstance().GPU().Maxwell3D();
+ auto& gpu = system.GPU().Maxwell3D();
const auto& regs = gpu.regs;
const FramebufferConfigState fb_config_state{using_color_fb, using_depth_fb, preserve_contents,
single_color_target};
- if (fb_config_state == current_framebuffer_config_state && gpu.dirty_flags.color_buffer == 0 &&
- !gpu.dirty_flags.zeta_buffer) {
+ if (fb_config_state == current_framebuffer_config_state &&
+ gpu.dirty_flags.color_buffer.none() && !gpu.dirty_flags.zeta_buffer) {
// Only skip if the previous ConfigureFramebuffers call was from the same kind (multiple or
// single color targets). This is done because the guest registers may not change but the
// host framebuffer may contain different attachments
@@ -582,7 +582,7 @@ void RasterizerOpenGL::Clear() {
const auto prev_state{state};
SCOPE_EXIT({ prev_state.Apply(); });
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ const auto& regs = system.GPU().Maxwell3D().regs;
bool use_color{};
bool use_depth{};
bool use_stencil{};
@@ -673,7 +673,7 @@ void RasterizerOpenGL::DrawArrays() {
return;
MICROPROFILE_SCOPE(OpenGL_Drawing);
- auto& gpu = Core::System::GetInstance().GPU().Maxwell3D();
+ auto& gpu = system.GPU().Maxwell3D();
const auto& regs = gpu.regs;
ConfigureFramebuffers(state);
@@ -721,10 +721,10 @@ void RasterizerOpenGL::DrawArrays() {
// Add space for at least 18 constant buffers
buffer_size += Maxwell::MaxConstBuffers * (MaxConstbufferSize + uniform_buffer_alignment);
- bool invalidate = buffer_cache.Map(buffer_size);
+ const bool invalidate = buffer_cache.Map(buffer_size);
if (invalidate) {
// As all cached buffers are invalidated, we need to recheck their state.
- gpu.dirty_flags.vertex_array = 0xFFFFFFFF;
+ gpu.dirty_flags.vertex_array.set();
}
const GLuint vao = SetupVertexFormat();
@@ -738,33 +738,21 @@ void RasterizerOpenGL::DrawArrays() {
shader_program_manager->ApplyTo(state);
state.Apply();
- // Execute draw call
+ res_cache.SignalPreDrawCall();
params.DispatchDraw();
-
- // Disable scissor test
- state.viewports[0].scissor.enabled = false;
+ res_cache.SignalPostDrawCall();
accelerate_draw = AccelDraw::Disabled;
-
- // Unbind textures for potential future use as framebuffer attachments
- for (auto& texture_unit : state.texture_units) {
- texture_unit.Unbind();
- }
- state.Apply();
}
void RasterizerOpenGL::FlushAll() {}
-void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) {
+void RasterizerOpenGL::FlushRegion(CacheAddr addr, u64 size) {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
-
- if (Settings::values.use_accurate_gpu_emulation) {
- // Only flush if use_accurate_gpu_emulation is enabled, as it incurs a performance hit
- res_cache.FlushRegion(addr, size);
- }
+ res_cache.FlushRegion(addr, size);
}
-void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) {
+void RasterizerOpenGL::InvalidateRegion(CacheAddr addr, u64 size) {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
res_cache.InvalidateRegion(addr, size);
shader_cache.InvalidateRegion(addr, size);
@@ -772,15 +760,15 @@ void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) {
buffer_cache.InvalidateRegion(addr, size);
}
-void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) {
+void RasterizerOpenGL::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
FlushRegion(addr, size);
InvalidateRegion(addr, size);
}
bool RasterizerOpenGL::AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
const Tegra::Engines::Fermi2D::Regs::Surface& dst,
- const MathUtil::Rectangle<u32>& src_rect,
- const MathUtil::Rectangle<u32>& dst_rect) {
+ const Common::Rectangle<u32>& src_rect,
+ const Common::Rectangle<u32>& dst_rect) {
MICROPROFILE_SCOPE(OpenGL_Blits);
res_cache.FermiCopySurface(src, dst, src_rect, dst_rect);
return true;
@@ -794,7 +782,7 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config,
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
- const auto& surface{res_cache.TryFindFramebufferSurface(framebuffer_addr)};
+ const auto& surface{res_cache.TryFindFramebufferSurface(Memory::GetPointer(framebuffer_addr))};
if (!surface) {
return {};
}
@@ -805,7 +793,10 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config,
VideoCore::Surface::PixelFormatFromGPUPixelFormat(config.pixel_format)};
ASSERT_MSG(params.width == config.width, "Framebuffer width is different");
ASSERT_MSG(params.height == config.height, "Framebuffer height is different");
- ASSERT_MSG(params.pixel_format == pixel_format, "Framebuffer pixel_format is different");
+
+ if (params.pixel_format != pixel_format) {
+ LOG_WARNING(Render_OpenGL, "Framebuffer pixel_format is different");
+ }
screen_info.display_texture = surface->Texture().handle;
@@ -814,104 +805,87 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config,
void RasterizerOpenGL::SamplerInfo::Create() {
sampler.Create();
- mag_filter = min_filter = Tegra::Texture::TextureFilter::Linear;
- wrap_u = wrap_v = wrap_p = Tegra::Texture::WrapMode::Wrap;
- uses_depth_compare = false;
+ mag_filter = Tegra::Texture::TextureFilter::Linear;
+ min_filter = Tegra::Texture::TextureFilter::Linear;
+ wrap_u = Tegra::Texture::WrapMode::Wrap;
+ wrap_v = Tegra::Texture::WrapMode::Wrap;
+ wrap_p = Tegra::Texture::WrapMode::Wrap;
+ use_depth_compare = false;
depth_compare_func = Tegra::Texture::DepthCompareFunc::Never;
- // default is GL_LINEAR_MIPMAP_LINEAR
+ // OpenGL's default is GL_LINEAR_MIPMAP_LINEAR
glSamplerParameteri(sampler.handle, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- // Other attributes have correct defaults
glSamplerParameteri(sampler.handle, GL_TEXTURE_COMPARE_FUNC, GL_NEVER);
+
+ // Other attributes have correct defaults
}
void RasterizerOpenGL::SamplerInfo::SyncWithConfig(const Tegra::Texture::TSCEntry& config) {
- const GLuint s = sampler.handle;
+ const GLuint sampler_id = sampler.handle;
if (mag_filter != config.mag_filter) {
mag_filter = config.mag_filter;
glSamplerParameteri(
- s, GL_TEXTURE_MAG_FILTER,
+ sampler_id, GL_TEXTURE_MAG_FILTER,
MaxwellToGL::TextureFilterMode(mag_filter, Tegra::Texture::TextureMipmapFilter::None));
}
- if (min_filter != config.min_filter || mip_filter != config.mip_filter) {
+ if (min_filter != config.min_filter || mipmap_filter != config.mipmap_filter) {
min_filter = config.min_filter;
- mip_filter = config.mip_filter;
- glSamplerParameteri(s, GL_TEXTURE_MIN_FILTER,
- MaxwellToGL::TextureFilterMode(min_filter, mip_filter));
+ mipmap_filter = config.mipmap_filter;
+ glSamplerParameteri(sampler_id, GL_TEXTURE_MIN_FILTER,
+ MaxwellToGL::TextureFilterMode(min_filter, mipmap_filter));
}
if (wrap_u != config.wrap_u) {
wrap_u = config.wrap_u;
- glSamplerParameteri(s, GL_TEXTURE_WRAP_S, MaxwellToGL::WrapMode(wrap_u));
+ glSamplerParameteri(sampler_id, GL_TEXTURE_WRAP_S, MaxwellToGL::WrapMode(wrap_u));
}
if (wrap_v != config.wrap_v) {
wrap_v = config.wrap_v;
- glSamplerParameteri(s, GL_TEXTURE_WRAP_T, MaxwellToGL::WrapMode(wrap_v));
+ glSamplerParameteri(sampler_id, GL_TEXTURE_WRAP_T, MaxwellToGL::WrapMode(wrap_v));
}
if (wrap_p != config.wrap_p) {
wrap_p = config.wrap_p;
- glSamplerParameteri(s, GL_TEXTURE_WRAP_R, MaxwellToGL::WrapMode(wrap_p));
+ glSamplerParameteri(sampler_id, GL_TEXTURE_WRAP_R, MaxwellToGL::WrapMode(wrap_p));
}
- if (uses_depth_compare != (config.depth_compare_enabled == 1)) {
- uses_depth_compare = (config.depth_compare_enabled == 1);
- if (uses_depth_compare) {
- glSamplerParameteri(s, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
- } else {
- glSamplerParameteri(s, GL_TEXTURE_COMPARE_MODE, GL_NONE);
- }
+ if (const bool enabled = config.depth_compare_enabled == 1; use_depth_compare != enabled) {
+ use_depth_compare = enabled;
+ glSamplerParameteri(sampler_id, GL_TEXTURE_COMPARE_MODE,
+ use_depth_compare ? GL_COMPARE_REF_TO_TEXTURE : GL_NONE);
}
if (depth_compare_func != config.depth_compare_func) {
depth_compare_func = config.depth_compare_func;
- glSamplerParameteri(s, GL_TEXTURE_COMPARE_FUNC,
+ glSamplerParameteri(sampler_id, GL_TEXTURE_COMPARE_FUNC,
MaxwellToGL::DepthCompareFunc(depth_compare_func));
}
- GLvec4 new_border_color;
- if (config.srgb_conversion) {
- new_border_color[0] = config.srgb_border_color_r / 255.0f;
- new_border_color[1] = config.srgb_border_color_g / 255.0f;
- new_border_color[2] = config.srgb_border_color_g / 255.0f;
- } else {
- new_border_color[0] = config.border_color_r;
- new_border_color[1] = config.border_color_g;
- new_border_color[2] = config.border_color_b;
- }
- new_border_color[3] = config.border_color_a;
-
- if (border_color != new_border_color) {
+ if (const auto new_border_color = config.GetBorderColor(); border_color != new_border_color) {
border_color = new_border_color;
- glSamplerParameterfv(s, GL_TEXTURE_BORDER_COLOR, border_color.data());
+ glSamplerParameterfv(sampler_id, GL_TEXTURE_BORDER_COLOR, border_color.data());
}
- const float anisotropic_max = static_cast<float>(1 << config.max_anisotropy.Value());
- if (anisotropic_max != max_anisotropic) {
- max_anisotropic = anisotropic_max;
+ if (const float anisotropic = config.GetMaxAnisotropy(); max_anisotropic != anisotropic) {
+ max_anisotropic = anisotropic;
if (GLAD_GL_ARB_texture_filter_anisotropic) {
- glSamplerParameterf(s, GL_TEXTURE_MAX_ANISOTROPY, max_anisotropic);
+ glSamplerParameterf(sampler_id, GL_TEXTURE_MAX_ANISOTROPY, max_anisotropic);
} else if (GLAD_GL_EXT_texture_filter_anisotropic) {
- glSamplerParameterf(s, GL_TEXTURE_MAX_ANISOTROPY_EXT, max_anisotropic);
+ glSamplerParameterf(sampler_id, GL_TEXTURE_MAX_ANISOTROPY_EXT, max_anisotropic);
}
}
- const float lod_min = static_cast<float>(config.min_lod_clamp.Value()) / 256.0f;
- if (lod_min != min_lod) {
- min_lod = lod_min;
- glSamplerParameterf(s, GL_TEXTURE_MIN_LOD, min_lod);
- }
- const float lod_max = static_cast<float>(config.max_lod_clamp.Value()) / 256.0f;
- if (lod_max != max_lod) {
- max_lod = lod_max;
- glSamplerParameterf(s, GL_TEXTURE_MAX_LOD, max_lod);
+ if (const float min = config.GetMinLod(); min_lod != min) {
+ min_lod = min;
+ glSamplerParameterf(sampler_id, GL_TEXTURE_MIN_LOD, min_lod);
}
- const u32 bias = config.mip_lod_bias.Value();
- // Sign extend the 13-bit value.
- constexpr u32 mask = 1U << (13 - 1);
- const float bias_lod = static_cast<s32>((bias ^ mask) - mask) / 256.f;
- if (lod_bias != bias_lod) {
- lod_bias = bias_lod;
- glSamplerParameterf(s, GL_TEXTURE_LOD_BIAS, lod_bias);
+ if (const float max = config.GetMaxLod(); max_lod != max) {
+ max_lod = max;
+ glSamplerParameterf(sampler_id, GL_TEXTURE_MAX_LOD, max_lod);
+ }
+
+ if (const float bias = config.GetLodBias(); lod_bias != bias) {
+ lod_bias = bias;
+ glSamplerParameterf(sampler_id, GL_TEXTURE_LOD_BIAS, lod_bias);
}
}
@@ -919,7 +893,7 @@ void RasterizerOpenGL::SetupConstBuffers(Tegra::Engines::Maxwell3D::Regs::Shader
const Shader& shader, GLuint program_handle,
BaseBindings base_bindings) {
MICROPROFILE_SCOPE(OpenGL_UBO);
- const auto& gpu = Core::System::GetInstance().GPU();
+ const auto& gpu = system.GPU();
const auto& maxwell3d = gpu.Maxwell3D();
const auto& shader_stage = maxwell3d.state.shader_stages[static_cast<std::size_t>(stage)];
const auto& entries = shader->GetShaderEntries().const_buffers;
@@ -951,8 +925,8 @@ void RasterizerOpenGL::SetupConstBuffers(Tegra::Engines::Maxwell3D::Regs::Shader
size = buffer.size;
if (size > MaxConstbufferSize) {
- LOG_CRITICAL(HW_GPU, "indirect constbuffer size {} exceeds maximum {}", size,
- MaxConstbufferSize);
+ LOG_WARNING(Render_OpenGL, "Indirect constbuffer size {} exceeds maximum {}", size,
+ MaxConstbufferSize);
size = MaxConstbufferSize;
}
} else {
@@ -998,7 +972,7 @@ void RasterizerOpenGL::SetupGlobalRegions(Tegra::Engines::Maxwell3D::Regs::Shade
void RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& shader,
GLuint program_handle, BaseBindings base_bindings) {
MICROPROFILE_SCOPE(OpenGL_Texture);
- const auto& gpu = Core::System::GetInstance().GPU();
+ const auto& gpu = system.GPU();
const auto& maxwell3d = gpu.Maxwell3D();
const auto& entries = shader->GetShaderEntries().samplers;
@@ -1012,10 +986,9 @@ void RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& s
texture_samplers[current_bindpoint].SyncWithConfig(texture.tsc);
- Surface surface = res_cache.GetTextureSurface(texture, entry);
- if (surface != nullptr) {
+ if (Surface surface = res_cache.GetTextureSurface(texture, entry); surface) {
state.texture_units[current_bindpoint].texture =
- entry.IsArray() ? surface->TextureLayer().handle : surface->Texture().handle;
+ surface->Texture(entry.IsArray()).handle;
surface->UpdateSwizzle(texture.tic.x_source, texture.tic.y_source, texture.tic.z_source,
texture.tic.w_source);
} else {
@@ -1026,7 +999,7 @@ void RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, const Shader& s
}
void RasterizerOpenGL::SyncViewport(OpenGLState& current_state) {
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ const auto& regs = system.GPU().Maxwell3D().regs;
const bool geometry_shaders_enabled =
regs.IsShaderConfigEnabled(static_cast<size_t>(Maxwell::ShaderProgram::Geometry));
const std::size_t viewport_count =
@@ -1034,7 +1007,7 @@ void RasterizerOpenGL::SyncViewport(OpenGLState& current_state) {
for (std::size_t i = 0; i < viewport_count; i++) {
auto& viewport = current_state.viewports[i];
const auto& src = regs.viewports[i];
- const MathUtil::Rectangle<s32> viewport_rect{regs.viewport_transform[i].GetRect()};
+ const Common::Rectangle<s32> viewport_rect{regs.viewport_transform[i].GetRect()};
viewport.x = viewport_rect.left;
viewport.y = viewport_rect.bottom;
viewport.width = viewport_rect.GetWidth();
@@ -1049,7 +1022,7 @@ void RasterizerOpenGL::SyncViewport(OpenGLState& current_state) {
void RasterizerOpenGL::SyncClipEnabled(
const std::array<bool, Maxwell::Regs::NumClipDistances>& clip_mask) {
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ const auto& regs = system.GPU().Maxwell3D().regs;
const std::array<bool, Maxwell::Regs::NumClipDistances> reg_state{
regs.clip_distance_enabled.c0 != 0, regs.clip_distance_enabled.c1 != 0,
regs.clip_distance_enabled.c2 != 0, regs.clip_distance_enabled.c3 != 0,
@@ -1066,7 +1039,7 @@ void RasterizerOpenGL::SyncClipCoef() {
}
void RasterizerOpenGL::SyncCullMode() {
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ const auto& regs = system.GPU().Maxwell3D().regs;
state.cull.enabled = regs.cull.enabled != 0;
@@ -1090,14 +1063,14 @@ void RasterizerOpenGL::SyncCullMode() {
}
void RasterizerOpenGL::SyncPrimitiveRestart() {
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ const auto& regs = system.GPU().Maxwell3D().regs;
state.primitive_restart.enabled = regs.primitive_restart.enabled;
state.primitive_restart.index = regs.primitive_restart.index;
}
void RasterizerOpenGL::SyncDepthTestState() {
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ const auto& regs = system.GPU().Maxwell3D().regs;
state.depth.test_enabled = regs.depth_test_enable != 0;
state.depth.write_mask = regs.depth_write_enabled ? GL_TRUE : GL_FALSE;
@@ -1109,7 +1082,7 @@ void RasterizerOpenGL::SyncDepthTestState() {
}
void RasterizerOpenGL::SyncStencilTestState() {
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ const auto& regs = system.GPU().Maxwell3D().regs;
state.stencil.test_enabled = regs.stencil_enable != 0;
if (!regs.stencil_enable) {
@@ -1143,7 +1116,7 @@ void RasterizerOpenGL::SyncStencilTestState() {
}
void RasterizerOpenGL::SyncColorMask() {
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ const auto& regs = system.GPU().Maxwell3D().regs;
const std::size_t count =
regs.independent_blend_enable ? Tegra::Engines::Maxwell3D::Regs::NumRenderTargets : 1;
for (std::size_t i = 0; i < count; i++) {
@@ -1157,18 +1130,18 @@ void RasterizerOpenGL::SyncColorMask() {
}
void RasterizerOpenGL::SyncMultiSampleState() {
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ const auto& regs = system.GPU().Maxwell3D().regs;
state.multisample_control.alpha_to_coverage = regs.multisample_control.alpha_to_coverage != 0;
state.multisample_control.alpha_to_one = regs.multisample_control.alpha_to_one != 0;
}
void RasterizerOpenGL::SyncFragmentColorClampState() {
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ const auto& regs = system.GPU().Maxwell3D().regs;
state.fragment_color_clamp.enabled = regs.frag_color_clamp != 0;
}
void RasterizerOpenGL::SyncBlendState() {
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ const auto& regs = system.GPU().Maxwell3D().regs;
state.blend_color.red = regs.blend_color.r;
state.blend_color.green = regs.blend_color.g;
@@ -1210,7 +1183,7 @@ void RasterizerOpenGL::SyncBlendState() {
}
void RasterizerOpenGL::SyncLogicOpState() {
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ const auto& regs = system.GPU().Maxwell3D().regs;
state.logic_op.enabled = regs.logic_op.enable != 0;
@@ -1224,7 +1197,7 @@ void RasterizerOpenGL::SyncLogicOpState() {
}
void RasterizerOpenGL::SyncScissorTest(OpenGLState& current_state) {
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ const auto& regs = system.GPU().Maxwell3D().regs;
const bool geometry_shaders_enabled =
regs.IsShaderConfigEnabled(static_cast<size_t>(Maxwell::ShaderProgram::Geometry));
const std::size_t viewport_count =
@@ -1246,21 +1219,17 @@ void RasterizerOpenGL::SyncScissorTest(OpenGLState& current_state) {
}
void RasterizerOpenGL::SyncTransformFeedback() {
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
-
- if (regs.tfb_enabled != 0) {
- LOG_CRITICAL(Render_OpenGL, "Transform feedbacks are not implemented");
- UNREACHABLE();
- }
+ const auto& regs = system.GPU().Maxwell3D().regs;
+ UNIMPLEMENTED_IF_MSG(regs.tfb_enabled != 0, "Transform feedbacks are not implemented");
}
void RasterizerOpenGL::SyncPointState() {
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ const auto& regs = system.GPU().Maxwell3D().regs;
state.point.size = regs.point_size;
}
void RasterizerOpenGL::SyncPolygonOffset() {
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
+ const auto& regs = system.GPU().Maxwell3D().regs;
state.polygon_offset.fill_enable = regs.polygon_offset_fill_enable != 0;
state.polygon_offset.line_enable = regs.polygon_offset_line_enable != 0;
state.polygon_offset.point_enable = regs.polygon_offset_point_enable != 0;
@@ -1270,13 +1239,9 @@ void RasterizerOpenGL::SyncPolygonOffset() {
}
void RasterizerOpenGL::CheckAlphaTests() {
- const auto& regs = Core::System::GetInstance().GPU().Maxwell3D().regs;
-
- if (regs.alpha_test_enabled != 0 && regs.rt_control.count > 1) {
- LOG_CRITICAL(Render_OpenGL, "Alpha Testing is enabled with Multiple Render Targets, "
- "this behavior is undefined.");
- UNREACHABLE();
- }
+ const auto& regs = system.GPU().Maxwell3D().regs;
+ UNIMPLEMENTED_IF_MSG(regs.alpha_test_enabled != 0 && regs.rt_control.count > 1,
+ "Alpha Testing is enabled with more than one rendertarget");
}
} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index 258d62259..30f3e8acb 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -57,17 +57,17 @@ public:
void DrawArrays() override;
void Clear() override;
void FlushAll() override;
- void FlushRegion(VAddr addr, u64 size) override;
- void InvalidateRegion(VAddr addr, u64 size) override;
- void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
+ void FlushRegion(CacheAddr addr, u64 size) override;
+ void InvalidateRegion(CacheAddr addr, u64 size) override;
+ void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override;
bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
const Tegra::Engines::Fermi2D::Regs::Surface& dst,
- const MathUtil::Rectangle<u32>& src_rect,
- const MathUtil::Rectangle<u32>& dst_rect) override;
+ const Common::Rectangle<u32>& src_rect,
+ const Common::Rectangle<u32>& dst_rect) override;
bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
u32 pixel_stride) override;
bool AccelerateDrawBatch(bool is_indexed) override;
- void UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta) override;
+ void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) override;
void LoadDiskResources(const std::atomic_bool& stop_loading,
const VideoCore::DiskResourceLoadCallback& callback) override;
@@ -94,11 +94,12 @@ private:
private:
Tegra::Texture::TextureFilter mag_filter = Tegra::Texture::TextureFilter::Nearest;
Tegra::Texture::TextureFilter min_filter = Tegra::Texture::TextureFilter::Nearest;
- Tegra::Texture::TextureMipmapFilter mip_filter = Tegra::Texture::TextureMipmapFilter::None;
+ Tegra::Texture::TextureMipmapFilter mipmap_filter =
+ Tegra::Texture::TextureMipmapFilter::None;
Tegra::Texture::WrapMode wrap_u = Tegra::Texture::WrapMode::ClampToEdge;
Tegra::Texture::WrapMode wrap_v = Tegra::Texture::WrapMode::ClampToEdge;
Tegra::Texture::WrapMode wrap_p = Tegra::Texture::WrapMode::ClampToEdge;
- bool uses_depth_compare = false;
+ bool use_depth_compare = false;
Tegra::Texture::DepthCompareFunc depth_compare_func =
Tegra::Texture::DepthCompareFunc::Always;
GLvec4 border_color = {};
@@ -214,6 +215,7 @@ private:
GlobalRegionCacheOpenGL global_cache;
Core::Frontend::EmuWindow& emu_window;
+ Core::System& system;
ScreenInfo& screen_info;
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
index 59f671048..451de00e8 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
@@ -3,6 +3,7 @@
// Refer to the license.txt file included.
#include <algorithm>
+#include <optional>
#include <glad/glad.h>
#include "common/alignment.h"
@@ -20,7 +21,7 @@
#include "video_core/renderer_opengl/gl_rasterizer_cache.h"
#include "video_core/renderer_opengl/utils.h"
#include "video_core/surface.h"
-#include "video_core/textures/astc.h"
+#include "video_core/textures/convert.h"
#include "video_core/textures/decoders.h"
namespace OpenGL {
@@ -60,6 +61,7 @@ void SurfaceParams::InitCacheParameters(Tegra::GPUVAddr gpu_addr_) {
addr = cpu_addr ? *cpu_addr : 0;
gpu_addr = gpu_addr_;
+ host_ptr = Memory::GetPointer(addr);
size_in_bytes = SizeInBytesRaw();
if (IsPixelFormatASTC(pixel_format)) {
@@ -399,7 +401,28 @@ static const FormatTuple& GetFormatTuple(PixelFormat pixel_format, ComponentType
return format;
}
-MathUtil::Rectangle<u32> SurfaceParams::GetRect(u32 mip_level) const {
+/// Returns the discrepant array target
+constexpr GLenum GetArrayDiscrepantTarget(SurfaceTarget target) {
+ switch (target) {
+ case SurfaceTarget::Texture1D:
+ return GL_TEXTURE_1D_ARRAY;
+ case SurfaceTarget::Texture2D:
+ return GL_TEXTURE_2D_ARRAY;
+ case SurfaceTarget::Texture3D:
+ return GL_NONE;
+ case SurfaceTarget::Texture1DArray:
+ return GL_TEXTURE_1D;
+ case SurfaceTarget::Texture2DArray:
+ return GL_TEXTURE_2D;
+ case SurfaceTarget::TextureCubemap:
+ return GL_TEXTURE_CUBE_MAP_ARRAY;
+ case SurfaceTarget::TextureCubeArray:
+ return GL_TEXTURE_CUBE_MAP;
+ }
+ return GL_NONE;
+}
+
+Common::Rectangle<u32> SurfaceParams::GetRect(u32 mip_level) const {
u32 actual_height{std::max(1U, unaligned_height >> mip_level)};
if (IsPixelFormatASTC(pixel_format)) {
// ASTC formats must stop at the ATSC block size boundary
@@ -423,8 +446,8 @@ void SwizzleFunc(const MortonSwizzleMode& mode, const SurfaceParams& params,
for (u32 i = 0; i < params.depth; i++) {
MortonSwizzle(mode, params.pixel_format, params.MipWidth(mip_level),
params.MipBlockHeight(mip_level), params.MipHeight(mip_level),
- params.MipBlockDepth(mip_level), params.tile_width_spacing, 1,
- gl_buffer.data() + offset_gl, gl_size, params.addr + offset);
+ params.MipBlockDepth(mip_level), 1, params.tile_width_spacing,
+ gl_buffer.data() + offset_gl, params.addr + offset);
offset += layer_size;
offset_gl += gl_size;
}
@@ -433,7 +456,7 @@ void SwizzleFunc(const MortonSwizzleMode& mode, const SurfaceParams& params,
MortonSwizzle(mode, params.pixel_format, params.MipWidth(mip_level),
params.MipBlockHeight(mip_level), params.MipHeight(mip_level),
params.MipBlockDepth(mip_level), depth, params.tile_width_spacing,
- gl_buffer.data(), gl_buffer.size(), params.addr + offset);
+ gl_buffer.data(), params.addr + offset);
}
}
@@ -541,14 +564,16 @@ void RasterizerCacheOpenGL::CopySurface(const Surface& src_surface, const Surfac
}
CachedSurface::CachedSurface(const SurfaceParams& params)
- : params(params), gl_target(SurfaceTargetToGL(params.target)),
- cached_size_in_bytes(params.size_in_bytes) {
+ : params{params}, gl_target{SurfaceTargetToGL(params.target)},
+ cached_size_in_bytes{params.size_in_bytes}, RasterizerCacheObject{params.host_ptr} {
texture.Create(gl_target);
// TODO(Rodrigo): Using params.GetRect() returns a different size than using its Mip*(0)
// alternatives. This signals a bug on those functions.
const auto width = static_cast<GLsizei>(params.MipWidth(0));
const auto height = static_cast<GLsizei>(params.MipHeight(0));
+ memory_size = params.MemorySize();
+ reinterpreted = false;
const auto& format_tuple = GetFormatTuple(params.pixel_format, params.component_type);
gl_internal_format = format_tuple.internal_format;
@@ -594,103 +619,6 @@ CachedSurface::CachedSurface(const SurfaceParams& params)
}
}
-static void ConvertS8Z24ToZ24S8(std::vector<u8>& data, u32 width, u32 height, bool reverse) {
- union S8Z24 {
- BitField<0, 24, u32> z24;
- BitField<24, 8, u32> s8;
- };
- static_assert(sizeof(S8Z24) == 4, "S8Z24 is incorrect size");
-
- union Z24S8 {
- BitField<0, 8, u32> s8;
- BitField<8, 24, u32> z24;
- };
- static_assert(sizeof(Z24S8) == 4, "Z24S8 is incorrect size");
-
- S8Z24 s8z24_pixel{};
- Z24S8 z24s8_pixel{};
- constexpr auto bpp{GetBytesPerPixel(PixelFormat::S8Z24)};
- for (std::size_t y = 0; y < height; ++y) {
- for (std::size_t x = 0; x < width; ++x) {
- const std::size_t offset{bpp * (y * width + x)};
- if (reverse) {
- std::memcpy(&z24s8_pixel, &data[offset], sizeof(Z24S8));
- s8z24_pixel.s8.Assign(z24s8_pixel.s8);
- s8z24_pixel.z24.Assign(z24s8_pixel.z24);
- std::memcpy(&data[offset], &s8z24_pixel, sizeof(S8Z24));
- } else {
- std::memcpy(&s8z24_pixel, &data[offset], sizeof(S8Z24));
- z24s8_pixel.s8.Assign(s8z24_pixel.s8);
- z24s8_pixel.z24.Assign(s8z24_pixel.z24);
- std::memcpy(&data[offset], &z24s8_pixel, sizeof(Z24S8));
- }
- }
- }
-}
-
-/**
- * Helper function to perform software conversion (as needed) when loading a buffer from Switch
- * memory. This is for Maxwell pixel formats that cannot be represented as-is in OpenGL or with
- * typical desktop GPUs.
- */
-static void ConvertFormatAsNeeded_LoadGLBuffer(std::vector<u8>& data, PixelFormat pixel_format,
- u32 width, u32 height, u32 depth) {
- switch (pixel_format) {
- case PixelFormat::ASTC_2D_4X4:
- case PixelFormat::ASTC_2D_8X8:
- case PixelFormat::ASTC_2D_8X5:
- case PixelFormat::ASTC_2D_5X4:
- case PixelFormat::ASTC_2D_5X5:
- case PixelFormat::ASTC_2D_4X4_SRGB:
- case PixelFormat::ASTC_2D_8X8_SRGB:
- case PixelFormat::ASTC_2D_8X5_SRGB:
- case PixelFormat::ASTC_2D_5X4_SRGB:
- case PixelFormat::ASTC_2D_5X5_SRGB:
- case PixelFormat::ASTC_2D_10X8:
- case PixelFormat::ASTC_2D_10X8_SRGB: {
- // Convert ASTC pixel formats to RGBA8, as most desktop GPUs do not support ASTC.
- u32 block_width{};
- u32 block_height{};
- std::tie(block_width, block_height) = GetASTCBlockSize(pixel_format);
- data =
- Tegra::Texture::ASTC::Decompress(data, width, height, depth, block_width, block_height);
- break;
- }
- case PixelFormat::S8Z24:
- // Convert the S8Z24 depth format to Z24S8, as OpenGL does not support S8Z24.
- ConvertS8Z24ToZ24S8(data, width, height, false);
- break;
- }
-}
-
-/**
- * Helper function to perform software conversion (as needed) when flushing a buffer from OpenGL to
- * Switch memory. This is for Maxwell pixel formats that cannot be represented as-is in OpenGL or
- * with typical desktop GPUs.
- */
-static void ConvertFormatAsNeeded_FlushGLBuffer(std::vector<u8>& data, PixelFormat pixel_format,
- u32 width, u32 height) {
- switch (pixel_format) {
- case PixelFormat::ASTC_2D_4X4:
- case PixelFormat::ASTC_2D_8X8:
- case PixelFormat::ASTC_2D_4X4_SRGB:
- case PixelFormat::ASTC_2D_8X8_SRGB:
- case PixelFormat::ASTC_2D_5X5:
- case PixelFormat::ASTC_2D_5X5_SRGB:
- case PixelFormat::ASTC_2D_10X8:
- case PixelFormat::ASTC_2D_10X8_SRGB: {
- LOG_CRITICAL(HW_GPU, "Conversion of format {} after texture flushing is not implemented",
- static_cast<u32>(pixel_format));
- UNREACHABLE();
- break;
- }
- case PixelFormat::S8Z24:
- // Convert the Z24S8 depth format to S8Z24, as OpenGL does not support S8Z24.
- ConvertS8Z24ToZ24S8(data, width, height, true);
- break;
- }
-}
-
MICROPROFILE_DEFINE(OpenGL_SurfaceLoad, "OpenGL", "Surface Load", MP_RGB(128, 192, 64));
void CachedSurface::LoadGLBuffer() {
MICROPROFILE_SCOPE(OpenGL_SurfaceLoad);
@@ -706,10 +634,9 @@ void CachedSurface::LoadGLBuffer() {
const u32 bpp = params.GetFormatBpp() / 8;
const u32 copy_size = params.width * bpp;
if (params.pitch == copy_size) {
- std::memcpy(gl_buffer[0].data(), Memory::GetPointer(params.addr),
- params.size_in_bytes_gl);
+ std::memcpy(gl_buffer[0].data(), params.host_ptr, params.size_in_bytes_gl);
} else {
- const u8* start = Memory::GetPointer(params.addr);
+ const u8* start{params.host_ptr};
u8* write_to = gl_buffer[0].data();
for (u32 h = params.height; h > 0; h--) {
std::memcpy(write_to, start, copy_size);
@@ -719,8 +646,16 @@ void CachedSurface::LoadGLBuffer() {
}
}
for (u32 i = 0; i < params.max_mip_level; i++) {
- ConvertFormatAsNeeded_LoadGLBuffer(gl_buffer[i], params.pixel_format, params.MipWidth(i),
- params.MipHeight(i), params.MipDepth(i));
+ const u32 width = params.MipWidth(i);
+ const u32 height = params.MipHeight(i);
+ const u32 depth = params.MipDepth(i);
+ if (VideoCore::Surface::IsPixelFormatASTC(params.pixel_format)) {
+ // Reserve size for RGBA8 conversion
+ constexpr std::size_t rgba_bpp = 4;
+ gl_buffer[i].resize(std::max(gl_buffer[i].size(), width * height * depth * rgba_bpp));
+ }
+ Tegra::Texture::ConvertFromGuestToHost(gl_buffer[i].data(), params.pixel_format, width,
+ height, depth, true, true);
}
}
@@ -743,10 +678,8 @@ void CachedSurface::FlushGLBuffer() {
glGetTextureImage(texture.handle, 0, tuple.format, tuple.type,
static_cast<GLsizei>(gl_buffer[0].size()), gl_buffer[0].data());
glPixelStorei(GL_PACK_ROW_LENGTH, 0);
- ConvertFormatAsNeeded_FlushGLBuffer(gl_buffer[0], params.pixel_format, params.width,
- params.height);
- const u8* const texture_src_data = Memory::GetPointer(params.addr);
- ASSERT(texture_src_data);
+ Tegra::Texture::ConvertFromHostToGuest(gl_buffer[0].data(), params.pixel_format, params.width,
+ params.height, params.depth, true, true);
if (params.is_tiled) {
ASSERT_MSG(params.block_width == 1, "Block width is defined as {} on texture type {}",
params.block_width, static_cast<u32>(params.target));
@@ -756,9 +689,9 @@ void CachedSurface::FlushGLBuffer() {
const u32 bpp = params.GetFormatBpp() / 8;
const u32 copy_size = params.width * bpp;
if (params.pitch == copy_size) {
- std::memcpy(Memory::GetPointer(params.addr), gl_buffer[0].data(), GetSizeInBytes());
+ std::memcpy(params.host_ptr, gl_buffer[0].data(), GetSizeInBytes());
} else {
- u8* start = Memory::GetPointer(params.addr);
+ u8* start{params.host_ptr};
const u8* read_to = gl_buffer[0].data();
for (u32 h = params.height; h > 0; h--) {
std::memcpy(start, read_to, copy_size);
@@ -881,20 +814,22 @@ void CachedSurface::UploadGLMipmapTexture(u32 mip_map, GLuint read_fb_handle,
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
}
-void CachedSurface::EnsureTextureView() {
- if (texture_view.handle != 0)
+void CachedSurface::EnsureTextureDiscrepantView() {
+ if (discrepant_view.handle != 0)
return;
- const GLenum target{TargetLayer()};
+ const GLenum target{GetArrayDiscrepantTarget(params.target)};
+ ASSERT(target != GL_NONE);
+
const GLuint num_layers{target == GL_TEXTURE_CUBE_MAP_ARRAY ? 6u : 1u};
constexpr GLuint min_layer = 0;
constexpr GLuint min_level = 0;
- glGenTextures(1, &texture_view.handle);
- glTextureView(texture_view.handle, target, texture.handle, gl_internal_format, min_level,
+ glGenTextures(1, &discrepant_view.handle);
+ glTextureView(discrepant_view.handle, target, texture.handle, gl_internal_format, min_level,
params.max_mip_level, min_layer, num_layers);
- ApplyTextureDefaults(texture_view.handle, params.max_mip_level);
- glTextureParameteriv(texture_view.handle, GL_TEXTURE_SWIZZLE_RGBA,
+ ApplyTextureDefaults(discrepant_view.handle, params.max_mip_level);
+ glTextureParameteriv(discrepant_view.handle, GL_TEXTURE_SWIZZLE_RGBA,
reinterpret_cast<const GLint*>(swizzle.data()));
}
@@ -920,8 +855,8 @@ void CachedSurface::UpdateSwizzle(Tegra::Texture::SwizzleSource swizzle_x,
swizzle = {new_x, new_y, new_z, new_w};
const auto swizzle_data = reinterpret_cast<const GLint*>(swizzle.data());
glTextureParameteriv(texture.handle, GL_TEXTURE_SWIZZLE_RGBA, swizzle_data);
- if (texture_view.handle != 0) {
- glTextureParameteriv(texture_view.handle, GL_TEXTURE_SWIZZLE_RGBA, swizzle_data);
+ if (discrepant_view.handle != 0) {
+ glTextureParameteriv(discrepant_view.handle, GL_TEXTURE_SWIZZLE_RGBA, swizzle_data);
}
}
@@ -962,30 +897,31 @@ Surface RasterizerCacheOpenGL::GetColorBufferSurface(std::size_t index, bool pre
auto& gpu{Core::System::GetInstance().GPU().Maxwell3D()};
const auto& regs{gpu.regs};
- if ((gpu.dirty_flags.color_buffer & (1u << static_cast<u32>(index))) == 0) {
- return last_color_buffers[index];
+ if (!gpu.dirty_flags.color_buffer[index]) {
+ return current_color_buffers[index];
}
- gpu.dirty_flags.color_buffer &= ~(1u << static_cast<u32>(index));
+ gpu.dirty_flags.color_buffer.reset(index);
ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets);
if (index >= regs.rt_control.count) {
- return last_color_buffers[index] = {};
+ return current_color_buffers[index] = {};
}
if (regs.rt[index].Address() == 0 || regs.rt[index].format == Tegra::RenderTargetFormat::NONE) {
- return last_color_buffers[index] = {};
+ return current_color_buffers[index] = {};
}
const SurfaceParams color_params{SurfaceParams::CreateForFramebuffer(index)};
- return last_color_buffers[index] = GetSurface(color_params, preserve_contents);
+ return current_color_buffers[index] = GetSurface(color_params, preserve_contents);
}
void RasterizerCacheOpenGL::LoadSurface(const Surface& surface) {
surface->LoadGLBuffer();
surface->UploadGLTexture(read_framebuffer.handle, draw_framebuffer.handle);
surface->MarkAsModified(false, *this);
+ surface->MarkForReload(false);
}
Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool preserve_contents) {
@@ -994,21 +930,26 @@ Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, bool pres
}
// Look up surface in the cache based on address
- Surface surface{TryGet(params.addr)};
+ Surface surface{TryGet(params.host_ptr)};
if (surface) {
if (surface->GetSurfaceParams().IsCompatibleSurface(params)) {
- // Use the cached surface as-is
+ // Use the cached surface as-is unless it's not synced with memory
+ if (surface->MustReload())
+ LoadSurface(surface);
return surface;
} else if (preserve_contents) {
// If surface parameters changed and we care about keeping the previous data, recreate
// the surface from the old one
Surface new_surface{RecreateSurface(surface, params)};
- Unregister(surface);
+ UnregisterSurface(surface);
Register(new_surface);
+ if (new_surface->IsUploaded()) {
+ RegisterReinterpretSurface(new_surface);
+ }
return new_surface;
} else {
// Delete the old surface before creating a new one to prevent collisions.
- Unregister(surface);
+ UnregisterSurface(surface);
}
}
@@ -1043,7 +984,7 @@ void RasterizerCacheOpenGL::FastLayeredCopySurface(const Surface& src_surface,
for (u32 layer = 0; layer < dst_params.depth; layer++) {
for (u32 mipmap = 0; mipmap < dst_params.max_mip_level; mipmap++) {
const VAddr sub_address = address + dst_params.GetMipmapLevelOffset(mipmap);
- const Surface& copy = TryGet(sub_address);
+ const Surface& copy = TryGet(Memory::GetPointer(sub_address));
if (!copy)
continue;
const auto& src_params{copy->GetSurfaceParams()};
@@ -1062,8 +1003,8 @@ void RasterizerCacheOpenGL::FastLayeredCopySurface(const Surface& src_surface,
}
static bool BlitSurface(const Surface& src_surface, const Surface& dst_surface,
- const MathUtil::Rectangle<u32>& src_rect,
- const MathUtil::Rectangle<u32>& dst_rect, GLuint read_fb_handle,
+ const Common::Rectangle<u32>& src_rect,
+ const Common::Rectangle<u32>& dst_rect, GLuint read_fb_handle,
GLuint draw_fb_handle, GLenum src_attachment = 0, GLenum dst_attachment = 0,
std::size_t cubemap_face = 0) {
@@ -1193,7 +1134,7 @@ static bool BlitSurface(const Surface& src_surface, const Surface& dst_surface,
void RasterizerCacheOpenGL::FermiCopySurface(
const Tegra::Engines::Fermi2D::Regs::Surface& src_config,
const Tegra::Engines::Fermi2D::Regs::Surface& dst_config,
- const MathUtil::Rectangle<u32>& src_rect, const MathUtil::Rectangle<u32>& dst_rect) {
+ const Common::Rectangle<u32>& src_rect, const Common::Rectangle<u32>& dst_rect) {
const auto& src_params = SurfaceParams::CreateForFermiCopySurface(src_config);
const auto& dst_params = SurfaceParams::CreateForFermiCopySurface(dst_config);
@@ -1220,7 +1161,8 @@ void RasterizerCacheOpenGL::AccurateCopySurface(const Surface& src_surface,
const auto& dst_params{dst_surface->GetSurfaceParams()};
// Flush enough memory for both the source and destination surface
- FlushRegion(src_params.addr, std::max(src_params.MemorySize(), dst_params.MemorySize()));
+ FlushRegion(ToCacheAddr(src_params.host_ptr),
+ std::max(src_params.MemorySize(), dst_params.MemorySize()));
LoadSurface(dst_surface);
}
@@ -1257,7 +1199,11 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& old_surface,
case SurfaceTarget::TextureCubemap:
case SurfaceTarget::Texture2DArray:
case SurfaceTarget::TextureCubeArray:
- FastLayeredCopySurface(old_surface, new_surface);
+ if (old_params.pixel_format == new_params.pixel_format)
+ FastLayeredCopySurface(old_surface, new_surface);
+ else {
+ AccurateCopySurface(old_surface, new_surface);
+ }
break;
default:
LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}",
@@ -1268,8 +1214,8 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& old_surface,
return new_surface;
}
-Surface RasterizerCacheOpenGL::TryFindFramebufferSurface(VAddr addr) const {
- return TryGet(addr);
+Surface RasterizerCacheOpenGL::TryFindFramebufferSurface(const u8* host_ptr) const {
+ return TryGet(host_ptr);
}
void RasterizerCacheOpenGL::ReserveSurface(const Surface& surface) {
@@ -1286,4 +1232,108 @@ Surface RasterizerCacheOpenGL::TryGetReservedSurface(const SurfaceParams& params
return {};
}
+static std::optional<u32> TryFindBestMipMap(std::size_t memory, const SurfaceParams params,
+ u32 height) {
+ for (u32 i = 0; i < params.max_mip_level; i++) {
+ if (memory == params.GetMipmapSingleSize(i) && params.MipHeight(i) == height) {
+ return {i};
+ }
+ }
+ return {};
+}
+
+static std::optional<u32> TryFindBestLayer(VAddr addr, const SurfaceParams params, u32 mipmap) {
+ const std::size_t size = params.LayerMemorySize();
+ VAddr start = params.addr + params.GetMipmapLevelOffset(mipmap);
+ for (u32 i = 0; i < params.depth; i++) {
+ if (start == addr) {
+ return {i};
+ }
+ start += size;
+ }
+ return {};
+}
+
+static bool LayerFitReinterpretSurface(RasterizerCacheOpenGL& cache, const Surface render_surface,
+ const Surface blitted_surface) {
+ const auto& dst_params = blitted_surface->GetSurfaceParams();
+ const auto& src_params = render_surface->GetSurfaceParams();
+ const std::size_t src_memory_size = src_params.size_in_bytes;
+ const std::optional<u32> level =
+ TryFindBestMipMap(src_memory_size, dst_params, src_params.height);
+ if (level.has_value()) {
+ if (src_params.width == dst_params.MipWidthGobAligned(*level) &&
+ src_params.height == dst_params.MipHeight(*level) &&
+ src_params.block_height >= dst_params.MipBlockHeight(*level)) {
+ const std::optional<u32> slot =
+ TryFindBestLayer(render_surface->GetCpuAddr(), dst_params, *level);
+ if (slot.has_value()) {
+ glCopyImageSubData(render_surface->Texture().handle,
+ SurfaceTargetToGL(src_params.target), 0, 0, 0, 0,
+ blitted_surface->Texture().handle,
+ SurfaceTargetToGL(dst_params.target), *level, 0, 0, *slot,
+ dst_params.MipWidth(*level), dst_params.MipHeight(*level), 1);
+ blitted_surface->MarkAsModified(true, cache);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static bool IsReinterpretInvalid(const Surface render_surface, const Surface blitted_surface) {
+ const VAddr bound1 = blitted_surface->GetCpuAddr() + blitted_surface->GetMemorySize();
+ const VAddr bound2 = render_surface->GetCpuAddr() + render_surface->GetMemorySize();
+ if (bound2 > bound1)
+ return true;
+ const auto& dst_params = blitted_surface->GetSurfaceParams();
+ const auto& src_params = render_surface->GetSurfaceParams();
+ return (dst_params.component_type != src_params.component_type);
+}
+
+static bool IsReinterpretInvalidSecond(const Surface render_surface,
+ const Surface blitted_surface) {
+ const auto& dst_params = blitted_surface->GetSurfaceParams();
+ const auto& src_params = render_surface->GetSurfaceParams();
+ return (dst_params.height > src_params.height && dst_params.width > src_params.width);
+}
+
+bool RasterizerCacheOpenGL::PartialReinterpretSurface(Surface triggering_surface,
+ Surface intersect) {
+ if (IsReinterpretInvalid(triggering_surface, intersect)) {
+ UnregisterSurface(intersect);
+ return false;
+ }
+ if (!LayerFitReinterpretSurface(*this, triggering_surface, intersect)) {
+ if (IsReinterpretInvalidSecond(triggering_surface, intersect)) {
+ UnregisterSurface(intersect);
+ return false;
+ }
+ FlushObject(intersect);
+ FlushObject(triggering_surface);
+ intersect->MarkForReload(true);
+ }
+ return true;
+}
+
+void RasterizerCacheOpenGL::SignalPreDrawCall() {
+ if (texception && GLAD_GL_ARB_texture_barrier) {
+ glTextureBarrier();
+ }
+ texception = false;
+}
+
+void RasterizerCacheOpenGL::SignalPostDrawCall() {
+ for (u32 i = 0; i < Maxwell::NumRenderTargets; i++) {
+ if (current_color_buffers[i] != nullptr) {
+ Surface intersect =
+ CollideOnReinterpretedSurface(current_color_buffers[i]->GetCacheAddr());
+ if (intersect != nullptr) {
+ PartialReinterpretSurface(current_color_buffers[i], intersect);
+ texception = true;
+ }
+ }
+ }
+}
+
} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h
index 89d733c50..b3afad139 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h
@@ -28,12 +28,13 @@ namespace OpenGL {
class CachedSurface;
using Surface = std::shared_ptr<CachedSurface>;
-using SurfaceSurfaceRect_Tuple = std::tuple<Surface, Surface, MathUtil::Rectangle<u32>>;
+using SurfaceSurfaceRect_Tuple = std::tuple<Surface, Surface, Common::Rectangle<u32>>;
using SurfaceTarget = VideoCore::Surface::SurfaceTarget;
using SurfaceType = VideoCore::Surface::SurfaceType;
using PixelFormat = VideoCore::Surface::PixelFormat;
using ComponentType = VideoCore::Surface::ComponentType;
+using Maxwell = Tegra::Engines::Maxwell3D::Regs;
struct SurfaceParams {
enum class SurfaceClass {
@@ -71,7 +72,7 @@ struct SurfaceParams {
}
/// Returns the rectangle corresponding to this surface
- MathUtil::Rectangle<u32> GetRect(u32 mip_level = 0) const;
+ Common::Rectangle<u32> GetRect(u32 mip_level = 0) const;
/// Returns the total size of this surface in bytes, adjusted for compression
std::size_t SizeInBytesRaw(bool ignore_tiled = false) const {
@@ -140,10 +141,18 @@ struct SurfaceParams {
return offset;
}
+ std::size_t GetMipmapSingleSize(u32 mip_level) const {
+ return InnerMipmapMemorySize(mip_level, false, is_layered);
+ }
+
u32 MipWidth(u32 mip_level) const {
return std::max(1U, width >> mip_level);
}
+ u32 MipWidthGobAligned(u32 mip_level) const {
+ return Common::AlignUp(std::max(1U, width >> mip_level), 64U * 8U / GetFormatBpp());
+ }
+
u32 MipHeight(u32 mip_level) const {
return std::max(1U, height >> mip_level);
}
@@ -288,6 +297,7 @@ struct SurfaceParams {
bool srgb_conversion;
// Parameters used for caching
VAddr addr;
+ u8* host_ptr;
Tegra::GPUVAddr gpu_addr;
std::size_t size_in_bytes;
std::size_t size_in_bytes_gl;
@@ -336,9 +346,9 @@ class RasterizerOpenGL;
class CachedSurface final : public RasterizerCacheObject {
public:
- CachedSurface(const SurfaceParams& params);
+ explicit CachedSurface(const SurfaceParams& params);
- VAddr GetAddr() const override {
+ VAddr GetCpuAddr() const override {
return params.addr;
}
@@ -346,6 +356,10 @@ public:
return cached_size_in_bytes;
}
+ std::size_t GetMemorySize() const {
+ return memory_size;
+ }
+
void Flush() override {
FlushGLBuffer();
}
@@ -354,31 +368,19 @@ public:
return texture;
}
- const OGLTexture& TextureLayer() {
- if (params.is_array) {
- return Texture();
+ const OGLTexture& Texture(bool as_array) {
+ if (params.is_array == as_array) {
+ return texture;
+ } else {
+ EnsureTextureDiscrepantView();
+ return discrepant_view;
}
- EnsureTextureView();
- return texture_view;
}
GLenum Target() const {
return gl_target;
}
- GLenum TargetLayer() const {
- using VideoCore::Surface::SurfaceTarget;
- switch (params.target) {
- case SurfaceTarget::Texture1D:
- return GL_TEXTURE_1D_ARRAY;
- case SurfaceTarget::Texture2D:
- return GL_TEXTURE_2D_ARRAY;
- case SurfaceTarget::TextureCubemap:
- return GL_TEXTURE_CUBE_MAP_ARRAY;
- }
- return Target();
- }
-
const SurfaceParams& GetSurfaceParams() const {
return params;
}
@@ -395,19 +397,42 @@ public:
Tegra::Texture::SwizzleSource swizzle_z,
Tegra::Texture::SwizzleSource swizzle_w);
+ void MarkReinterpreted() {
+ reinterpreted = true;
+ }
+
+ bool IsReinterpreted() const {
+ return reinterpreted;
+ }
+
+ void MarkForReload(bool reload) {
+ must_reload = reload;
+ }
+
+ bool MustReload() const {
+ return must_reload;
+ }
+
+ bool IsUploaded() const {
+ return params.identity == SurfaceParams::SurfaceClass::Uploaded;
+ }
+
private:
void UploadGLMipmapTexture(u32 mip_map, GLuint read_fb_handle, GLuint draw_fb_handle);
- void EnsureTextureView();
+ void EnsureTextureDiscrepantView();
OGLTexture texture;
- OGLTexture texture_view;
+ OGLTexture discrepant_view;
std::vector<std::vector<u8>> gl_buffer;
SurfaceParams params{};
GLenum gl_target{};
GLenum gl_internal_format{};
std::size_t cached_size_in_bytes{};
std::array<GLenum, 4> swizzle{GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA};
+ std::size_t memory_size;
+ bool reinterpreted = false;
+ bool must_reload = false;
};
class RasterizerCacheOpenGL final : public RasterizerCache<Surface> {
@@ -425,13 +450,16 @@ public:
Surface GetColorBufferSurface(std::size_t index, bool preserve_contents);
/// Tries to find a framebuffer using on the provided CPU address
- Surface TryFindFramebufferSurface(VAddr addr) const;
+ Surface TryFindFramebufferSurface(const u8* host_ptr) const;
/// Copies the contents of one surface to another
void FermiCopySurface(const Tegra::Engines::Fermi2D::Regs::Surface& src_config,
const Tegra::Engines::Fermi2D::Regs::Surface& dst_config,
- const MathUtil::Rectangle<u32>& src_rect,
- const MathUtil::Rectangle<u32>& dst_rect);
+ const Common::Rectangle<u32>& src_rect,
+ const Common::Rectangle<u32>& dst_rect);
+
+ void SignalPreDrawCall();
+ void SignalPostDrawCall();
private:
void LoadSurface(const Surface& surface);
@@ -449,6 +477,10 @@ private:
/// Tries to get a reserved surface for the specified parameters
Surface TryGetReservedSurface(const SurfaceParams& params);
+ // Partialy reinterpret a surface based on a triggering_surface that collides with it.
+ // returns true if the reinterpret was successful, false in case it was not.
+ bool PartialReinterpretSurface(Surface triggering_surface, Surface intersect);
+
/// Performs a slow but accurate surface copy, flushing to RAM and reinterpreting the data
void AccurateCopySurface(const Surface& src_surface, const Surface& dst_surface);
void FastLayeredCopySurface(const Surface& src_surface, const Surface& dst_surface);
@@ -465,12 +497,50 @@ private:
OGLFramebuffer read_framebuffer;
OGLFramebuffer draw_framebuffer;
+ bool texception = false;
+
/// Use a Pixel Buffer Object to download the previous texture and then upload it to the new one
/// using the new format.
OGLBuffer copy_pbo;
- std::array<Surface, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> last_color_buffers;
+ std::array<Surface, Maxwell::NumRenderTargets> last_color_buffers;
+ std::array<Surface, Maxwell::NumRenderTargets> current_color_buffers;
Surface last_depth_buffer;
+
+ using SurfaceIntervalCache = boost::icl::interval_map<CacheAddr, Surface>;
+ using SurfaceInterval = typename SurfaceIntervalCache::interval_type;
+
+ static auto GetReinterpretInterval(const Surface& object) {
+ return SurfaceInterval::right_open(object->GetCacheAddr() + 1,
+ object->GetCacheAddr() + object->GetMemorySize() - 1);
+ }
+
+ // Reinterpreted surfaces are very fragil as the game may keep rendering into them.
+ SurfaceIntervalCache reinterpreted_surfaces;
+
+ void RegisterReinterpretSurface(Surface reinterpret_surface) {
+ auto interval = GetReinterpretInterval(reinterpret_surface);
+ reinterpreted_surfaces.insert({interval, reinterpret_surface});
+ reinterpret_surface->MarkReinterpreted();
+ }
+
+ Surface CollideOnReinterpretedSurface(CacheAddr addr) const {
+ const SurfaceInterval interval{addr};
+ for (auto& pair :
+ boost::make_iterator_range(reinterpreted_surfaces.equal_range(interval))) {
+ return pair.second;
+ }
+ return nullptr;
+ }
+
+ /// Unregisters an object from the cache
+ void UnregisterSurface(const Surface& object) {
+ if (object->IsReinterpreted()) {
+ auto interval = GetReinterpretInterval(object);
+ reinterpreted_surfaces.erase(interval);
+ }
+ Unregister(object);
+ }
};
} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 4883e4f62..60a04e146 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -42,9 +42,9 @@ VAddr GetShaderAddress(Maxwell::ShaderProgram program) {
}
/// Gets the shader program code from memory for the specified address
-ProgramCode GetShaderCode(VAddr addr) {
+ProgramCode GetShaderCode(const u8* host_ptr) {
ProgramCode program_code(VideoCommon::Shader::MAX_PROGRAM_LENGTH);
- Memory::ReadBlock(addr, program_code.data(), program_code.size() * sizeof(u64));
+ std::memcpy(program_code.data(), host_ptr, program_code.size() * sizeof(u64));
return program_code;
}
@@ -214,12 +214,13 @@ std::set<GLenum> GetSupportedFormats() {
} // namespace
-CachedShader::CachedShader(VAddr addr, u64 unique_identifier, Maxwell::ShaderProgram program_type,
- ShaderDiskCacheOpenGL& disk_cache,
+CachedShader::CachedShader(VAddr guest_addr, u64 unique_identifier,
+ Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache,
const PrecompiledPrograms& precompiled_programs,
- ProgramCode&& program_code, ProgramCode&& program_code_b)
- : addr{addr}, unique_identifier{unique_identifier}, program_type{program_type},
- disk_cache{disk_cache}, precompiled_programs{precompiled_programs} {
+ ProgramCode&& program_code, ProgramCode&& program_code_b, u8* host_ptr)
+ : host_ptr{host_ptr}, guest_addr{guest_addr}, unique_identifier{unique_identifier},
+ program_type{program_type}, disk_cache{disk_cache},
+ precompiled_programs{precompiled_programs}, RasterizerCacheObject{host_ptr} {
const std::size_t code_size = CalculateProgramSize(program_code);
const std::size_t code_size_b =
@@ -243,12 +244,13 @@ CachedShader::CachedShader(VAddr addr, u64 unique_identifier, Maxwell::ShaderPro
disk_cache.SaveRaw(raw);
}
-CachedShader::CachedShader(VAddr addr, u64 unique_identifier, Maxwell::ShaderProgram program_type,
- ShaderDiskCacheOpenGL& disk_cache,
+CachedShader::CachedShader(VAddr guest_addr, u64 unique_identifier,
+ Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache,
const PrecompiledPrograms& precompiled_programs,
- GLShader::ProgramResult result)
- : addr{addr}, unique_identifier{unique_identifier}, program_type{program_type},
- disk_cache{disk_cache}, precompiled_programs{precompiled_programs} {
+ GLShader::ProgramResult result, u8* host_ptr)
+ : guest_addr{guest_addr}, unique_identifier{unique_identifier}, program_type{program_type},
+ disk_cache{disk_cache}, precompiled_programs{precompiled_programs}, RasterizerCacheObject{
+ host_ptr} {
code = std::move(result.first);
entries = result.second;
@@ -271,7 +273,7 @@ std::tuple<GLuint, BaseBindings> CachedShader::GetProgramHandle(GLenum primitive
disk_cache.SaveUsage(GetUsage(primitive_mode, base_bindings));
}
- LabelGLObject(GL_PROGRAM, program->handle, addr);
+ LabelGLObject(GL_PROGRAM, program->handle, guest_addr);
}
handle = program->handle;
@@ -323,7 +325,7 @@ GLuint CachedShader::LazyGeometryProgram(CachedProgram& target_program, BaseBind
disk_cache.SaveUsage(GetUsage(primitive_mode, base_bindings));
}
- LabelGLObject(GL_PROGRAM, target_program->handle, addr, debug_name);
+ LabelGLObject(GL_PROGRAM, target_program->handle, guest_addr, debug_name);
return target_program->handle;
};
@@ -489,14 +491,17 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
const VAddr program_addr{GetShaderAddress(program)};
// Look up shader in the cache based on address
- Shader shader{TryGet(program_addr)};
+ const auto& host_ptr{Memory::GetPointer(program_addr)};
+ Shader shader{TryGet(host_ptr)};
if (!shader) {
// No shader found - create a new one
- ProgramCode program_code = GetShaderCode(program_addr);
+ const auto& host_ptr{Memory::GetPointer(program_addr)};
+ ProgramCode program_code{GetShaderCode(host_ptr)};
ProgramCode program_code_b;
if (program == Maxwell::ShaderProgram::VertexA) {
- program_code_b = GetShaderCode(GetShaderAddress(Maxwell::ShaderProgram::VertexB));
+ program_code_b = GetShaderCode(
+ Memory::GetPointer(GetShaderAddress(Maxwell::ShaderProgram::VertexB)));
}
const u64 unique_identifier = GetUniqueIdentifier(program, program_code, program_code_b);
@@ -504,11 +509,11 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
if (found != precompiled_shaders.end()) {
shader =
std::make_shared<CachedShader>(program_addr, unique_identifier, program, disk_cache,
- precompiled_programs, found->second);
+ precompiled_programs, found->second, host_ptr);
} else {
shader = std::make_shared<CachedShader>(
program_addr, unique_identifier, program, disk_cache, precompiled_programs,
- std::move(program_code), std::move(program_code_b));
+ std::move(program_code), std::move(program_code_b), host_ptr);
}
Register(shader);
}
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h
index 97eed192f..81fe716b4 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_cache.h
@@ -39,18 +39,18 @@ using PrecompiledShaders = std::unordered_map<u64, GLShader::ProgramResult>;
class CachedShader final : public RasterizerCacheObject {
public:
- explicit CachedShader(VAddr addr, u64 unique_identifier, Maxwell::ShaderProgram program_type,
- ShaderDiskCacheOpenGL& disk_cache,
+ explicit CachedShader(VAddr guest_addr, u64 unique_identifier,
+ Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache,
const PrecompiledPrograms& precompiled_programs,
- ProgramCode&& program_code, ProgramCode&& program_code_b);
+ ProgramCode&& program_code, ProgramCode&& program_code_b, u8* host_ptr);
- explicit CachedShader(VAddr addr, u64 unique_identifier, Maxwell::ShaderProgram program_type,
- ShaderDiskCacheOpenGL& disk_cache,
+ explicit CachedShader(VAddr guest_addr, u64 unique_identifier,
+ Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache,
const PrecompiledPrograms& precompiled_programs,
- GLShader::ProgramResult result);
+ GLShader::ProgramResult result, u8* host_ptr);
- VAddr GetAddr() const override {
- return addr;
+ VAddr GetCpuAddr() const override {
+ return guest_addr;
}
std::size_t GetSizeInBytes() const override {
@@ -91,7 +91,8 @@ private:
ShaderDiskCacheUsage GetUsage(GLenum primitive_mode, BaseBindings base_bindings) const;
- VAddr addr{};
+ u8* host_ptr{};
+ VAddr guest_addr{};
u64 unique_identifier{};
Maxwell::ShaderProgram program_type{};
ShaderDiskCacheOpenGL& disk_cache;
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
index db18f4dbe..11d1169f0 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
@@ -5,7 +5,9 @@
#include <array>
#include <string>
#include <string_view>
+#include <utility>
#include <variant>
+#include <vector>
#include <fmt/format.h>
@@ -20,6 +22,7 @@
namespace OpenGL::GLShader {
using Tegra::Shader::Attribute;
+using Tegra::Shader::AttributeUse;
using Tegra::Shader::Header;
using Tegra::Shader::IpaInterpMode;
using Tegra::Shader::IpaMode;
@@ -288,34 +291,22 @@ private:
code.AddNewLine();
}
- std::string GetInputFlags(const IpaMode& input_mode) {
- const IpaSampleMode sample_mode = input_mode.sampling_mode;
- const IpaInterpMode interp_mode = input_mode.interpolation_mode;
+ std::string GetInputFlags(AttributeUse attribute) {
std::string out;
- switch (interp_mode) {
- case IpaInterpMode::Flat:
+ switch (attribute) {
+ case AttributeUse::Constant:
out += "flat ";
break;
- case IpaInterpMode::Linear:
+ case AttributeUse::ScreenLinear:
out += "noperspective ";
break;
- case IpaInterpMode::Perspective:
+ case AttributeUse::Perspective:
// Default, Smooth
break;
default:
- UNIMPLEMENTED_MSG("Unhandled IPA interp mode: {}", static_cast<u32>(interp_mode));
- }
- switch (sample_mode) {
- case IpaSampleMode::Centroid:
- // It can be implemented with the "centroid " keyword in GLSL
- UNIMPLEMENTED_MSG("Unimplemented IPA sampler mode centroid");
- break;
- case IpaSampleMode::Default:
- // Default, n/a
- break;
- default:
- UNIMPLEMENTED_MSG("Unimplemented IPA sampler mode: {}", static_cast<u32>(sample_mode));
+ LOG_CRITICAL(HW_GPU, "Unused attribute being fetched");
+ UNREACHABLE();
}
return out;
}
@@ -324,16 +315,11 @@ private:
const auto& attributes = ir.GetInputAttributes();
for (const auto element : attributes) {
const Attribute::Index index = element.first;
- const IpaMode& input_mode = *element.second.begin();
if (index < Attribute::Index::Attribute_0 || index > Attribute::Index::Attribute_31) {
// Skip when it's not a generic attribute
continue;
}
- ASSERT(element.second.size() > 0);
- UNIMPLEMENTED_IF_MSG(element.second.size() > 1,
- "Multiple input flag modes are not supported in GLSL");
-
// TODO(bunnei): Use proper number of elements for these
u32 idx = static_cast<u32>(index) - static_cast<u32>(Attribute::Index::Attribute_0);
if (stage != ShaderStage::Vertex) {
@@ -345,8 +331,14 @@ private:
if (stage == ShaderStage::Geometry) {
attr = "gs_" + attr + "[]";
}
- code.AddLine("layout (location = " + std::to_string(idx) + ") " +
- GetInputFlags(input_mode) + "in vec4 " + attr + ';');
+ std::string suffix;
+ if (stage == ShaderStage::Fragment) {
+ const auto input_mode =
+ header.ps.GetAttributeUse(idx - GENERIC_VARYING_START_LOCATION);
+ suffix = GetInputFlags(input_mode);
+ }
+ code.AddLine("layout (location = " + std::to_string(idx) + ") " + suffix + "in vec4 " +
+ attr + ';');
}
if (!attributes.empty())
code.AddNewLine();
@@ -727,7 +719,7 @@ private:
}
std::string GenerateTexture(Operation operation, const std::string& func,
- bool is_extra_int = false) {
+ const std::vector<std::pair<Type, Node>>& extras) {
constexpr std::array<const char*, 4> coord_constructors = {"float", "vec2", "vec3", "vec4"};
const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
@@ -748,36 +740,47 @@ private:
expr += Visit(operation[i]);
const std::size_t next = i + 1;
- if (next < count || has_array || has_shadow)
+ if (next < count)
expr += ", ";
}
if (has_array) {
- expr += "float(ftoi(" + Visit(meta->array) + "))";
+ expr += ", float(ftoi(" + Visit(meta->array) + "))";
}
if (has_shadow) {
- if (has_array)
- expr += ", ";
- expr += Visit(meta->depth_compare);
+ expr += ", " + Visit(meta->depth_compare);
}
expr += ')';
- for (const Node extra : meta->extras) {
+ for (const auto& extra_pair : extras) {
+ const auto [type, operand] = extra_pair;
+ if (operand == nullptr) {
+ continue;
+ }
expr += ", ";
- if (is_extra_int) {
- if (const auto immediate = std::get_if<ImmediateNode>(extra)) {
+
+ switch (type) {
+ case Type::Int:
+ if (const auto immediate = std::get_if<ImmediateNode>(operand)) {
// Inline the string as an immediate integer in GLSL (some extra arguments are
// required to be constant)
expr += std::to_string(static_cast<s32>(immediate->GetValue()));
} else {
- expr += "ftoi(" + Visit(extra) + ')';
+ expr += "ftoi(" + Visit(operand) + ')';
}
- } else {
- expr += Visit(extra);
+ break;
+ case Type::Float:
+ expr += Visit(operand);
+ break;
+ default: {
+ const auto type_int = static_cast<u32>(type);
+ UNIMPLEMENTED_MSG("Unimplemented extra type={}", type_int);
+ expr += '0';
+ break;
+ }
}
}
- expr += ')';
- return expr;
+ return expr + ')';
}
std::string Assign(Operation operation) {
@@ -1156,7 +1159,7 @@ private:
const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
ASSERT(meta);
- std::string expr = GenerateTexture(operation, "texture");
+ std::string expr = GenerateTexture(operation, "texture", {{Type::Float, meta->bias}});
if (meta->sampler.IsShadow()) {
expr = "vec4(" + expr + ')';
}
@@ -1167,7 +1170,7 @@ private:
const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
ASSERT(meta);
- std::string expr = GenerateTexture(operation, "textureLod");
+ std::string expr = GenerateTexture(operation, "textureLod", {{Type::Float, meta->lod}});
if (meta->sampler.IsShadow()) {
expr = "vec4(" + expr + ')';
}
@@ -1178,7 +1181,8 @@ private:
const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
ASSERT(meta);
- return GenerateTexture(operation, "textureGather", !meta->sampler.IsShadow()) +
+ const auto type = meta->sampler.IsShadow() ? Type::Float : Type::Int;
+ return GenerateTexture(operation, "textureGather", {{type, meta->component}}) +
GetSwizzle(meta->element);
}
@@ -1207,8 +1211,8 @@ private:
ASSERT(meta);
if (meta->element < 2) {
- return "itof(int((" + GenerateTexture(operation, "textureQueryLod") + " * vec2(256))" +
- GetSwizzle(meta->element) + "))";
+ return "itof(int((" + GenerateTexture(operation, "textureQueryLod", {}) +
+ " * vec2(256))" + GetSwizzle(meta->element) + "))";
}
return "0";
}
@@ -1234,9 +1238,9 @@ private:
else if (next < count)
expr += ", ";
}
- for (std::size_t i = 0; i < meta->extras.size(); ++i) {
+ if (meta->lod) {
expr += ", ";
- expr += CastOperand(Visit(meta->extras.at(i)), Type::Int);
+ expr += CastOperand(Visit(meta->lod), Type::Int);
}
expr += ')';
@@ -1584,4 +1588,4 @@ ProgramResult Decompile(const ShaderIR& ir, Maxwell::ShaderStage stage, const st
return {decompiler.GetResult(), decompiler.GetShaderEntries()};
}
-} // namespace OpenGL::GLShader \ No newline at end of file
+} // namespace OpenGL::GLShader
diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp
index 81882822b..82fc4d44b 100644
--- a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp
@@ -2,8 +2,6 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
-#pragma once
-
#include <cstring>
#include <fmt/format.h>
#include <lz4.h>
diff --git a/src/video_core/renderer_opengl/gl_shader_gen.cpp b/src/video_core/renderer_opengl/gl_shader_gen.cpp
index 04e1db911..7d96649af 100644
--- a/src/video_core/renderer_opengl/gl_shader_gen.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_gen.cpp
@@ -124,7 +124,7 @@ layout (location = 5) out vec4 FragColor5;
layout (location = 6) out vec4 FragColor6;
layout (location = 7) out vec4 FragColor7;
-layout (location = 0) in vec4 position;
+layout (location = 0) in noperspective vec4 position;
layout (std140, binding = EMULATION_UBO_BINDING) uniform fs_config {
vec4 viewport_flip;
@@ -172,4 +172,4 @@ void main() {
return {out, program.second};
}
-} // namespace OpenGL::GLShader \ No newline at end of file
+} // namespace OpenGL::GLShader
diff --git a/src/video_core/renderer_opengl/gl_state.cpp b/src/video_core/renderer_opengl/gl_state.cpp
index 81af803bc..9419326a3 100644
--- a/src/video_core/renderer_opengl/gl_state.cpp
+++ b/src/video_core/renderer_opengl/gl_state.cpp
@@ -11,7 +11,9 @@
namespace OpenGL {
OpenGLState OpenGLState::cur_state;
+
bool OpenGLState::s_rgb_used;
+
OpenGLState::OpenGLState() {
// These all match default OpenGL values
geometry_shaders.enabled = false;
@@ -112,7 +114,6 @@ void OpenGLState::ApplyDefaultState() {
}
void OpenGLState::ApplySRgb() const {
- // sRGB
if (framebuffer_srgb.enabled != cur_state.framebuffer_srgb.enabled) {
if (framebuffer_srgb.enabled) {
// Track if sRGB is used
@@ -125,23 +126,20 @@ void OpenGLState::ApplySRgb() const {
}
void OpenGLState::ApplyCulling() const {
- // Culling
- const bool cull_changed = cull.enabled != cur_state.cull.enabled;
- if (cull_changed) {
+ if (cull.enabled != cur_state.cull.enabled) {
if (cull.enabled) {
glEnable(GL_CULL_FACE);
} else {
glDisable(GL_CULL_FACE);
}
}
- if (cull.enabled) {
- if (cull_changed || cull.mode != cur_state.cull.mode) {
- glCullFace(cull.mode);
- }
- if (cull_changed || cull.front_face != cur_state.cull.front_face) {
- glFrontFace(cull.front_face);
- }
+ if (cull.mode != cur_state.cull.mode) {
+ glCullFace(cull.mode);
+ }
+
+ if (cull.front_face != cur_state.cull.front_face) {
+ glFrontFace(cull.front_face);
}
}
@@ -172,72 +170,63 @@ void OpenGLState::ApplyColorMask() const {
}
void OpenGLState::ApplyDepth() const {
- // Depth test
- const bool depth_test_changed = depth.test_enabled != cur_state.depth.test_enabled;
- if (depth_test_changed) {
+ if (depth.test_enabled != cur_state.depth.test_enabled) {
if (depth.test_enabled) {
glEnable(GL_DEPTH_TEST);
} else {
glDisable(GL_DEPTH_TEST);
}
}
- if (depth.test_enabled &&
- (depth_test_changed || depth.test_func != cur_state.depth.test_func)) {
+
+ if (depth.test_func != cur_state.depth.test_func) {
glDepthFunc(depth.test_func);
}
- // Depth mask
+
if (depth.write_mask != cur_state.depth.write_mask) {
glDepthMask(depth.write_mask);
}
}
void OpenGLState::ApplyPrimitiveRestart() const {
- const bool primitive_restart_changed =
- primitive_restart.enabled != cur_state.primitive_restart.enabled;
- if (primitive_restart_changed) {
+ if (primitive_restart.enabled != cur_state.primitive_restart.enabled) {
if (primitive_restart.enabled) {
glEnable(GL_PRIMITIVE_RESTART);
} else {
glDisable(GL_PRIMITIVE_RESTART);
}
}
- if (primitive_restart_changed ||
- (primitive_restart.enabled &&
- primitive_restart.index != cur_state.primitive_restart.index)) {
+
+ if (primitive_restart.index != cur_state.primitive_restart.index) {
glPrimitiveRestartIndex(primitive_restart.index);
}
}
void OpenGLState::ApplyStencilTest() const {
- const bool stencil_test_changed = stencil.test_enabled != cur_state.stencil.test_enabled;
- if (stencil_test_changed) {
+ if (stencil.test_enabled != cur_state.stencil.test_enabled) {
if (stencil.test_enabled) {
glEnable(GL_STENCIL_TEST);
} else {
glDisable(GL_STENCIL_TEST);
}
}
- if (stencil.test_enabled) {
- auto config_stencil = [stencil_test_changed](GLenum face, const auto& config,
- const auto& prev_config) {
- if (stencil_test_changed || config.test_func != prev_config.test_func ||
- config.test_ref != prev_config.test_ref ||
- config.test_mask != prev_config.test_mask) {
- glStencilFuncSeparate(face, config.test_func, config.test_ref, config.test_mask);
- }
- if (stencil_test_changed || config.action_depth_fail != prev_config.action_depth_fail ||
- config.action_depth_pass != prev_config.action_depth_pass ||
- config.action_stencil_fail != prev_config.action_stencil_fail) {
- glStencilOpSeparate(face, config.action_stencil_fail, config.action_depth_fail,
- config.action_depth_pass);
- }
- if (config.write_mask != prev_config.write_mask) {
- glStencilMaskSeparate(face, config.write_mask);
- }
- };
- config_stencil(GL_FRONT, stencil.front, cur_state.stencil.front);
- config_stencil(GL_BACK, stencil.back, cur_state.stencil.back);
- }
+
+ const auto ConfigStencil = [](GLenum face, const auto& config, const auto& prev_config) {
+ if (config.test_func != prev_config.test_func || config.test_ref != prev_config.test_ref ||
+ config.test_mask != prev_config.test_mask) {
+ glStencilFuncSeparate(face, config.test_func, config.test_ref, config.test_mask);
+ }
+ if (config.action_depth_fail != prev_config.action_depth_fail ||
+ config.action_depth_pass != prev_config.action_depth_pass ||
+ config.action_stencil_fail != prev_config.action_stencil_fail) {
+ glStencilOpSeparate(face, config.action_stencil_fail, config.action_depth_fail,
+ config.action_depth_pass);
+ }
+ if (config.write_mask != prev_config.write_mask) {
+ glStencilMaskSeparate(face, config.write_mask);
+ }
+ };
+ ConfigStencil(GL_FRONT, stencil.front, cur_state.stencil.front);
+ ConfigStencil(GL_BACK, stencil.back, cur_state.stencil.back);
}
// Viewport does not affects glClearBuffer so emulate viewport using scissor test
void OpenGLState::EmulateViewportWithScissor() {
@@ -278,19 +267,18 @@ void OpenGLState::ApplyViewport() const {
updated.depth_range_far != current.depth_range_far) {
glDepthRangeIndexed(i, updated.depth_range_near, updated.depth_range_far);
}
- const bool scissor_changed = updated.scissor.enabled != current.scissor.enabled;
- if (scissor_changed) {
+
+ if (updated.scissor.enabled != current.scissor.enabled) {
if (updated.scissor.enabled) {
glEnablei(GL_SCISSOR_TEST, i);
} else {
glDisablei(GL_SCISSOR_TEST, i);
}
}
- if (updated.scissor.enabled &&
- (scissor_changed || updated.scissor.x != current.scissor.x ||
- updated.scissor.y != current.scissor.y ||
- updated.scissor.width != current.scissor.width ||
- updated.scissor.height != current.scissor.height)) {
+
+ if (updated.scissor.x != current.scissor.x || updated.scissor.y != current.scissor.y ||
+ updated.scissor.width != current.scissor.width ||
+ updated.scissor.height != current.scissor.height) {
glScissorIndexed(i, updated.scissor.x, updated.scissor.y, updated.scissor.width,
updated.scissor.height);
}
@@ -302,22 +290,23 @@ void OpenGLState::ApplyViewport() const {
updated.height != current.height) {
glViewport(updated.x, updated.y, updated.width, updated.height);
}
+
if (updated.depth_range_near != current.depth_range_near ||
updated.depth_range_far != current.depth_range_far) {
glDepthRange(updated.depth_range_near, updated.depth_range_far);
}
- const bool scissor_changed = updated.scissor.enabled != current.scissor.enabled;
- if (scissor_changed) {
+
+ if (updated.scissor.enabled != current.scissor.enabled) {
if (updated.scissor.enabled) {
glEnable(GL_SCISSOR_TEST);
} else {
glDisable(GL_SCISSOR_TEST);
}
}
- if (updated.scissor.enabled && (scissor_changed || updated.scissor.x != current.scissor.x ||
- updated.scissor.y != current.scissor.y ||
- updated.scissor.width != current.scissor.width ||
- updated.scissor.height != current.scissor.height)) {
+
+ if (updated.scissor.x != current.scissor.x || updated.scissor.y != current.scissor.y ||
+ updated.scissor.width != current.scissor.width ||
+ updated.scissor.height != current.scissor.height) {
glScissor(updated.scissor.x, updated.scissor.y, updated.scissor.width,
updated.scissor.height);
}
@@ -327,8 +316,7 @@ void OpenGLState::ApplyViewport() const {
void OpenGLState::ApplyGlobalBlending() const {
const Blend& current = cur_state.blend[0];
const Blend& updated = blend[0];
- const bool blend_changed = updated.enabled != current.enabled;
- if (blend_changed) {
+ if (updated.enabled != current.enabled) {
if (updated.enabled) {
glEnable(GL_BLEND);
} else {
@@ -338,15 +326,14 @@ void OpenGLState::ApplyGlobalBlending() const {
if (!updated.enabled) {
return;
}
- if (blend_changed || updated.src_rgb_func != current.src_rgb_func ||
+ if (updated.src_rgb_func != current.src_rgb_func ||
updated.dst_rgb_func != current.dst_rgb_func || updated.src_a_func != current.src_a_func ||
updated.dst_a_func != current.dst_a_func) {
glBlendFuncSeparate(updated.src_rgb_func, updated.dst_rgb_func, updated.src_a_func,
updated.dst_a_func);
}
- if (blend_changed || updated.rgb_equation != current.rgb_equation ||
- updated.a_equation != current.a_equation) {
+ if (updated.rgb_equation != current.rgb_equation || updated.a_equation != current.a_equation) {
glBlendEquationSeparate(updated.rgb_equation, updated.a_equation);
}
}
@@ -354,26 +341,22 @@ void OpenGLState::ApplyGlobalBlending() const {
void OpenGLState::ApplyTargetBlending(std::size_t target, bool force) const {
const Blend& updated = blend[target];
const Blend& current = cur_state.blend[target];
- const bool blend_changed = updated.enabled != current.enabled || force;
- if (blend_changed) {
+ if (updated.enabled != current.enabled || force) {
if (updated.enabled) {
glEnablei(GL_BLEND, static_cast<GLuint>(target));
} else {
glDisablei(GL_BLEND, static_cast<GLuint>(target));
}
}
- if (!updated.enabled) {
- return;
- }
- if (blend_changed || updated.src_rgb_func != current.src_rgb_func ||
+
+ if (updated.src_rgb_func != current.src_rgb_func ||
updated.dst_rgb_func != current.dst_rgb_func || updated.src_a_func != current.src_a_func ||
updated.dst_a_func != current.dst_a_func) {
glBlendFuncSeparatei(static_cast<GLuint>(target), updated.src_rgb_func,
updated.dst_rgb_func, updated.src_a_func, updated.dst_a_func);
}
- if (blend_changed || updated.rgb_equation != current.rgb_equation ||
- updated.a_equation != current.a_equation) {
+ if (updated.rgb_equation != current.rgb_equation || updated.a_equation != current.a_equation) {
glBlendEquationSeparatei(static_cast<GLuint>(target), updated.rgb_equation,
updated.a_equation);
}
@@ -397,8 +380,7 @@ void OpenGLState::ApplyBlending() const {
}
void OpenGLState::ApplyLogicOp() const {
- const bool logic_op_changed = logic_op.enabled != cur_state.logic_op.enabled;
- if (logic_op_changed) {
+ if (logic_op.enabled != cur_state.logic_op.enabled) {
if (logic_op.enabled) {
glEnable(GL_COLOR_LOGIC_OP);
} else {
@@ -406,14 +388,12 @@ void OpenGLState::ApplyLogicOp() const {
}
}
- if (logic_op.enabled &&
- (logic_op_changed || logic_op.operation != cur_state.logic_op.operation)) {
+ if (logic_op.operation != cur_state.logic_op.operation) {
glLogicOp(logic_op.operation);
}
}
void OpenGLState::ApplyPolygonOffset() const {
-
const bool fill_enable_changed =
polygon_offset.fill_enable != cur_state.polygon_offset.fill_enable;
const bool line_enable_changed =
@@ -448,9 +428,7 @@ void OpenGLState::ApplyPolygonOffset() const {
}
}
- if ((polygon_offset.fill_enable || polygon_offset.line_enable || polygon_offset.point_enable) &&
- (factor_changed || units_changed || clamp_changed)) {
-
+ if (factor_changed || units_changed || clamp_changed) {
if (GLAD_GL_EXT_polygon_offset_clamp && polygon_offset.clamp != 0) {
glPolygonOffsetClamp(polygon_offset.factor, polygon_offset.units, polygon_offset.clamp);
} else {
@@ -483,7 +461,7 @@ void OpenGLState::ApplyTextures() const {
if (has_delta) {
glBindTextures(static_cast<GLuint>(first), static_cast<GLsizei>(last - first + 1),
- textures.data());
+ textures.data() + first);
}
}
@@ -504,7 +482,7 @@ void OpenGLState::ApplySamplers() const {
}
if (has_delta) {
glBindSamplers(static_cast<GLuint>(first), static_cast<GLsizei>(last - first + 1),
- samplers.data());
+ samplers.data() + first);
}
}
@@ -528,9 +506,9 @@ void OpenGLState::ApplyDepthClamp() const {
depth_clamp.near_plane == cur_state.depth_clamp.near_plane) {
return;
}
- if (depth_clamp.far_plane != depth_clamp.near_plane) {
- UNIMPLEMENTED_MSG("Unimplemented Depth Clamp Separation!");
- }
+ UNIMPLEMENTED_IF_MSG(depth_clamp.far_plane != depth_clamp.near_plane,
+ "Unimplemented Depth Clamp Separation!");
+
if (depth_clamp.far_plane || depth_clamp.near_plane) {
glEnable(GL_DEPTH_CLAMP);
} else {
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp
index d40666ac6..b97576309 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.cpp
+++ b/src/video_core/renderer_opengl/renderer_opengl.cpp
@@ -167,9 +167,11 @@ void RendererOpenGL::LoadFBToScreenInfo(const Tegra::FramebufferConfig& framebuf
Memory::RasterizerFlushVirtualRegion(framebuffer_addr, size_in_bytes,
Memory::FlushMode::Flush);
- VideoCore::MortonCopyPixels128(framebuffer.width, framebuffer.height, bytes_per_pixel, 4,
- Memory::GetPointer(framebuffer_addr),
- gl_framebuffer_data.data(), true);
+ constexpr u32 linear_bpp = 4;
+ VideoCore::MortonCopyPixels128(VideoCore::MortonSwizzleMode::MortonToLinear,
+ framebuffer.width, framebuffer.height, bytes_per_pixel,
+ linear_bpp, Memory::GetPointer(framebuffer_addr),
+ gl_framebuffer_data.data());
glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(framebuffer.stride));
@@ -244,6 +246,21 @@ void RendererOpenGL::InitOpenGLObjects() {
LoadColorToActiveGLTexture(0, 0, 0, 0, screen_info.texture);
}
+void RendererOpenGL::AddTelemetryFields() {
+ const char* const gl_version{reinterpret_cast<char const*>(glGetString(GL_VERSION))};
+ const char* const gpu_vendor{reinterpret_cast<char const*>(glGetString(GL_VENDOR))};
+ const char* const gpu_model{reinterpret_cast<char const*>(glGetString(GL_RENDERER))};
+
+ LOG_INFO(Render_OpenGL, "GL_VERSION: {}", gl_version);
+ LOG_INFO(Render_OpenGL, "GL_VENDOR: {}", gpu_vendor);
+ LOG_INFO(Render_OpenGL, "GL_RENDERER: {}", gpu_model);
+
+ auto& telemetry_session = system.TelemetrySession();
+ telemetry_session.AddField(Telemetry::FieldType::UserSystem, "GPU_Vendor", gpu_vendor);
+ telemetry_session.AddField(Telemetry::FieldType::UserSystem, "GPU_Model", gpu_model);
+ telemetry_session.AddField(Telemetry::FieldType::UserSystem, "GPU_OpenGL_Version", gl_version);
+}
+
void RendererOpenGL::CreateRasterizer() {
if (rasterizer) {
return;
@@ -257,6 +274,7 @@ void RendererOpenGL::ConfigureFramebufferTexture(TextureInfo& texture,
const Tegra::FramebufferConfig& framebuffer) {
texture.width = framebuffer.width;
texture.height = framebuffer.height;
+ texture.pixel_format = framebuffer.pixel_format;
GLint internal_format;
switch (framebuffer.pixel_format) {
@@ -380,7 +398,8 @@ void RendererOpenGL::CaptureScreenshot() {
GLuint renderbuffer;
glGenRenderbuffers(1, &renderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, renderbuffer);
- glRenderbufferStorage(GL_RENDERBUFFER, GL_RGB8, layout.width, layout.height);
+ glRenderbufferStorage(GL_RENDERBUFFER, state.GetsRGBUsed() ? GL_SRGB8 : GL_RGB8, layout.width,
+ layout.height);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, renderbuffer);
DrawScreen(layout);
@@ -464,17 +483,7 @@ bool RendererOpenGL::Init() {
glDebugMessageCallback(DebugHandler, nullptr);
}
- const char* gl_version{reinterpret_cast<char const*>(glGetString(GL_VERSION))};
- const char* gpu_vendor{reinterpret_cast<char const*>(glGetString(GL_VENDOR))};
- const char* gpu_model{reinterpret_cast<char const*>(glGetString(GL_RENDERER))};
-
- LOG_INFO(Render_OpenGL, "GL_VERSION: {}", gl_version);
- LOG_INFO(Render_OpenGL, "GL_VENDOR: {}", gpu_vendor);
- LOG_INFO(Render_OpenGL, "GL_RENDERER: {}", gpu_model);
-
- Core::Telemetry().AddField(Telemetry::FieldType::UserSystem, "GPU_Vendor", gpu_vendor);
- Core::Telemetry().AddField(Telemetry::FieldType::UserSystem, "GPU_Model", gpu_model);
- Core::Telemetry().AddField(Telemetry::FieldType::UserSystem, "GPU_OpenGL_Version", gl_version);
+ AddTelemetryFields();
if (!GLAD_GL_VERSION_4_3) {
return false;
diff --git a/src/video_core/renderer_opengl/renderer_opengl.h b/src/video_core/renderer_opengl/renderer_opengl.h
index 7e13e566b..6cbf9d2cb 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.h
+++ b/src/video_core/renderer_opengl/renderer_opengl.h
@@ -39,7 +39,7 @@ struct TextureInfo {
/// Structure used for storing information about the display target for the Switch screen
struct ScreenInfo {
GLuint display_texture;
- const MathUtil::Rectangle<float> display_texcoords{0.0f, 0.0f, 1.0f, 1.0f};
+ const Common::Rectangle<float> display_texcoords{0.0f, 0.0f, 1.0f, 1.0f};
TextureInfo texture;
};
@@ -60,6 +60,7 @@ public:
private:
void InitOpenGLObjects();
+ void AddTelemetryFields();
void CreateRasterizer();
void ConfigureFramebufferTexture(TextureInfo& texture,
@@ -102,7 +103,7 @@ private:
/// Used for transforming the framebuffer orientation
Tegra::FramebufferConfig::TransformFlags framebuffer_transform_flags;
- MathUtil::Rectangle<int> framebuffer_crop_rect;
+ Common::Rectangle<int> framebuffer_crop_rect;
};
} // namespace OpenGL
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
new file mode 100644
index 000000000..34bf26ff2
--- /dev/null
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -0,0 +1,483 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "common/logging/log.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/maxwell_to_vk.h"
+#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/surface.h"
+
+namespace Vulkan::MaxwellToVK {
+
+namespace Sampler {
+
+vk::Filter Filter(Tegra::Texture::TextureFilter filter) {
+ switch (filter) {
+ case Tegra::Texture::TextureFilter::Linear:
+ return vk::Filter::eLinear;
+ case Tegra::Texture::TextureFilter::Nearest:
+ return vk::Filter::eNearest;
+ }
+ UNIMPLEMENTED_MSG("Unimplemented sampler filter={}", static_cast<u32>(filter));
+ return {};
+}
+
+vk::SamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter) {
+ switch (mipmap_filter) {
+ case Tegra::Texture::TextureMipmapFilter::None:
+ // TODO(Rodrigo): None seems to be mapped to OpenGL's mag and min filters without mipmapping
+ // (e.g. GL_NEAREST and GL_LINEAR). Vulkan doesn't have such a thing, find out if we have to
+ // use an image view with a single mipmap level to emulate this.
+ return vk::SamplerMipmapMode::eLinear;
+ case Tegra::Texture::TextureMipmapFilter::Linear:
+ return vk::SamplerMipmapMode::eLinear;
+ case Tegra::Texture::TextureMipmapFilter::Nearest:
+ return vk::SamplerMipmapMode::eNearest;
+ }
+ UNIMPLEMENTED_MSG("Unimplemented sampler mipmap mode={}", static_cast<u32>(mipmap_filter));
+ return {};
+}
+
+vk::SamplerAddressMode WrapMode(Tegra::Texture::WrapMode wrap_mode) {
+ switch (wrap_mode) {
+ case Tegra::Texture::WrapMode::Wrap:
+ return vk::SamplerAddressMode::eRepeat;
+ case Tegra::Texture::WrapMode::Mirror:
+ return vk::SamplerAddressMode::eMirroredRepeat;
+ case Tegra::Texture::WrapMode::ClampToEdge:
+ return vk::SamplerAddressMode::eClampToEdge;
+ case Tegra::Texture::WrapMode::Border:
+ return vk::SamplerAddressMode::eClampToBorder;
+ case Tegra::Texture::WrapMode::ClampOGL:
+ // TODO(Rodrigo): GL_CLAMP was removed as of OpenGL 3.1, to implement GL_CLAMP, we can use
+ // eClampToBorder to get the border color of the texture, and then sample the edge to
+ // manually mix them. However the shader part of this is not yet implemented.
+ return vk::SamplerAddressMode::eClampToBorder;
+ case Tegra::Texture::WrapMode::MirrorOnceClampToEdge:
+ return vk::SamplerAddressMode::eMirrorClampToEdge;
+ case Tegra::Texture::WrapMode::MirrorOnceBorder:
+ UNIMPLEMENTED();
+ return vk::SamplerAddressMode::eMirrorClampToEdge;
+ }
+ UNIMPLEMENTED_MSG("Unimplemented wrap mode={}", static_cast<u32>(wrap_mode));
+ return {};
+}
+
+vk::CompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func) {
+ switch (depth_compare_func) {
+ case Tegra::Texture::DepthCompareFunc::Never:
+ return vk::CompareOp::eNever;
+ case Tegra::Texture::DepthCompareFunc::Less:
+ return vk::CompareOp::eLess;
+ case Tegra::Texture::DepthCompareFunc::LessEqual:
+ return vk::CompareOp::eLessOrEqual;
+ case Tegra::Texture::DepthCompareFunc::Equal:
+ return vk::CompareOp::eEqual;
+ case Tegra::Texture::DepthCompareFunc::NotEqual:
+ return vk::CompareOp::eNotEqual;
+ case Tegra::Texture::DepthCompareFunc::Greater:
+ return vk::CompareOp::eGreater;
+ case Tegra::Texture::DepthCompareFunc::GreaterEqual:
+ return vk::CompareOp::eGreaterOrEqual;
+ case Tegra::Texture::DepthCompareFunc::Always:
+ return vk::CompareOp::eAlways;
+ }
+ UNIMPLEMENTED_MSG("Unimplemented sampler depth compare function={}",
+ static_cast<u32>(depth_compare_func));
+ return {};
+}
+
+} // namespace Sampler
+
+struct FormatTuple {
+ vk::Format format; ///< Vulkan format
+ ComponentType component_type; ///< Abstracted component type
+ bool attachable; ///< True when this format can be used as an attachment
+};
+
+static constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex_format_tuples = {{
+ {vk::Format::eA8B8G8R8UnormPack32, ComponentType::UNorm, true}, // ABGR8U
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // ABGR8S
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // ABGR8UI
+ {vk::Format::eB5G6R5UnormPack16, ComponentType::UNorm, false}, // B5G6R5U
+ {vk::Format::eA2B10G10R10UnormPack32, ComponentType::UNorm, true}, // A2B10G10R10U
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // A1B5G5R5U
+ {vk::Format::eR8Unorm, ComponentType::UNorm, true}, // R8U
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // R8UI
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // RGBA16F
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // RGBA16U
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // RGBA16UI
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // R11FG11FB10F
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // RGBA32UI
+ {vk::Format::eBc1RgbaUnormBlock, ComponentType::UNorm, false}, // DXT1
+ {vk::Format::eBc2UnormBlock, ComponentType::UNorm, false}, // DXT23
+ {vk::Format::eBc3UnormBlock, ComponentType::UNorm, false}, // DXT45
+ {vk::Format::eBc4UnormBlock, ComponentType::UNorm, false}, // DXN1
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // DXN2UNORM
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // DXN2SNORM
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // BC7U
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // BC6H_UF16
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // BC6H_SF16
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_4X4
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // BGRA8
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // RGBA32F
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG32F
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // R32F
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // R16F
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // R16U
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // R16S
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // R16UI
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // R16I
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG16
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG16F
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG16UI
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG16I
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG16S
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // RGB32F
+ {vk::Format::eA8B8G8R8SrgbPack32, ComponentType::UNorm, true}, // RGBA8_SRGB
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG8U
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG8S
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // RG32UI
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // R32UI
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_8X8
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_8X5
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_5X4
+
+ // Compressed sRGB formats
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // BGRA8_SRGB
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // DXT1_SRGB
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // DXT23_SRGB
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // DXT45_SRGB
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // BC7U_SRGB
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_4X4_SRGB
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_8X8_SRGB
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_8X5_SRGB
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_5X4_SRGB
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_5X5
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_5X5_SRGB
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_10X8
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // ASTC_2D_10X8_SRGB
+
+ // Depth formats
+ {vk::Format::eD32Sfloat, ComponentType::Float, true}, // Z32F
+ {vk::Format::eD16Unorm, ComponentType::UNorm, true}, // Z16
+
+ // DepthStencil formats
+ {vk::Format::eD24UnormS8Uint, ComponentType::UNorm, true}, // Z24S8
+ {vk::Format::eD24UnormS8Uint, ComponentType::UNorm, true}, // S8Z24 (emulated)
+ {vk::Format::eUndefined, ComponentType::Invalid, false}, // Z32FS8
+}};
+
+static constexpr bool IsZetaFormat(PixelFormat pixel_format) {
+ return pixel_format >= PixelFormat::MaxColorFormat &&
+ pixel_format < PixelFormat::MaxDepthStencilFormat;
+}
+
+std::pair<vk::Format, bool> SurfaceFormat(const VKDevice& device, FormatType format_type,
+ PixelFormat pixel_format, ComponentType component_type) {
+ ASSERT(static_cast<std::size_t>(pixel_format) < tex_format_tuples.size());
+
+ const auto tuple = tex_format_tuples[static_cast<u32>(pixel_format)];
+ UNIMPLEMENTED_IF_MSG(tuple.format == vk::Format::eUndefined,
+ "Unimplemented texture format with pixel format={} and component type={}",
+ static_cast<u32>(pixel_format), static_cast<u32>(component_type));
+ ASSERT_MSG(component_type == tuple.component_type, "Component type mismatch");
+
+ auto usage = vk::FormatFeatureFlagBits::eSampledImage |
+ vk::FormatFeatureFlagBits::eTransferDst | vk::FormatFeatureFlagBits::eTransferSrc;
+ if (tuple.attachable) {
+ usage |= IsZetaFormat(pixel_format) ? vk::FormatFeatureFlagBits::eDepthStencilAttachment
+ : vk::FormatFeatureFlagBits::eColorAttachment;
+ }
+ return {device.GetSupportedFormat(tuple.format, usage, format_type), tuple.attachable};
+}
+
+vk::ShaderStageFlagBits ShaderStage(Maxwell::ShaderStage stage) {
+ switch (stage) {
+ case Maxwell::ShaderStage::Vertex:
+ return vk::ShaderStageFlagBits::eVertex;
+ case Maxwell::ShaderStage::TesselationControl:
+ return vk::ShaderStageFlagBits::eTessellationControl;
+ case Maxwell::ShaderStage::TesselationEval:
+ return vk::ShaderStageFlagBits::eTessellationEvaluation;
+ case Maxwell::ShaderStage::Geometry:
+ return vk::ShaderStageFlagBits::eGeometry;
+ case Maxwell::ShaderStage::Fragment:
+ return vk::ShaderStageFlagBits::eFragment;
+ }
+ UNIMPLEMENTED_MSG("Unimplemented shader stage={}", static_cast<u32>(stage));
+ return {};
+}
+
+vk::PrimitiveTopology PrimitiveTopology(Maxwell::PrimitiveTopology topology) {
+ switch (topology) {
+ case Maxwell::PrimitiveTopology::Points:
+ return vk::PrimitiveTopology::ePointList;
+ case Maxwell::PrimitiveTopology::Lines:
+ return vk::PrimitiveTopology::eLineList;
+ case Maxwell::PrimitiveTopology::LineStrip:
+ return vk::PrimitiveTopology::eLineStrip;
+ case Maxwell::PrimitiveTopology::Triangles:
+ return vk::PrimitiveTopology::eTriangleList;
+ case Maxwell::PrimitiveTopology::TriangleStrip:
+ return vk::PrimitiveTopology::eTriangleStrip;
+ }
+ UNIMPLEMENTED_MSG("Unimplemented topology={}", static_cast<u32>(topology));
+ return {};
+}
+
+vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size) {
+ switch (type) {
+ case Maxwell::VertexAttribute::Type::SignedNorm:
+ break;
+ case Maxwell::VertexAttribute::Type::UnsignedNorm:
+ switch (size) {
+ case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
+ return vk::Format::eR8G8B8A8Unorm;
+ default:
+ break;
+ }
+ break;
+ case Maxwell::VertexAttribute::Type::SignedInt:
+ break;
+ case Maxwell::VertexAttribute::Type::UnsignedInt:
+ switch (size) {
+ case Maxwell::VertexAttribute::Size::Size_32:
+ return vk::Format::eR32Uint;
+ default:
+ break;
+ }
+ case Maxwell::VertexAttribute::Type::UnsignedScaled:
+ case Maxwell::VertexAttribute::Type::SignedScaled:
+ break;
+ case Maxwell::VertexAttribute::Type::Float:
+ switch (size) {
+ case Maxwell::VertexAttribute::Size::Size_32_32_32_32:
+ return vk::Format::eR32G32B32A32Sfloat;
+ case Maxwell::VertexAttribute::Size::Size_32_32_32:
+ return vk::Format::eR32G32B32Sfloat;
+ case Maxwell::VertexAttribute::Size::Size_32_32:
+ return vk::Format::eR32G32Sfloat;
+ case Maxwell::VertexAttribute::Size::Size_32:
+ return vk::Format::eR32Sfloat;
+ default:
+ break;
+ }
+ break;
+ }
+ UNIMPLEMENTED_MSG("Unimplemented vertex format of type={} and size={}", static_cast<u32>(type),
+ static_cast<u32>(size));
+ return {};
+}
+
+vk::CompareOp ComparisonOp(Maxwell::ComparisonOp comparison) {
+ switch (comparison) {
+ case Maxwell::ComparisonOp::Never:
+ case Maxwell::ComparisonOp::NeverOld:
+ return vk::CompareOp::eNever;
+ case Maxwell::ComparisonOp::Less:
+ case Maxwell::ComparisonOp::LessOld:
+ return vk::CompareOp::eLess;
+ case Maxwell::ComparisonOp::Equal:
+ case Maxwell::ComparisonOp::EqualOld:
+ return vk::CompareOp::eEqual;
+ case Maxwell::ComparisonOp::LessEqual:
+ case Maxwell::ComparisonOp::LessEqualOld:
+ return vk::CompareOp::eLessOrEqual;
+ case Maxwell::ComparisonOp::Greater:
+ case Maxwell::ComparisonOp::GreaterOld:
+ return vk::CompareOp::eGreater;
+ case Maxwell::ComparisonOp::NotEqual:
+ case Maxwell::ComparisonOp::NotEqualOld:
+ return vk::CompareOp::eNotEqual;
+ case Maxwell::ComparisonOp::GreaterEqual:
+ case Maxwell::ComparisonOp::GreaterEqualOld:
+ return vk::CompareOp::eGreaterOrEqual;
+ case Maxwell::ComparisonOp::Always:
+ case Maxwell::ComparisonOp::AlwaysOld:
+ return vk::CompareOp::eAlways;
+ }
+ UNIMPLEMENTED_MSG("Unimplemented comparison op={}", static_cast<u32>(comparison));
+ return {};
+}
+
+vk::IndexType IndexFormat(Maxwell::IndexFormat index_format) {
+ switch (index_format) {
+ case Maxwell::IndexFormat::UnsignedByte:
+ UNIMPLEMENTED_MSG("Vulkan does not support native u8 index format");
+ return vk::IndexType::eUint16;
+ case Maxwell::IndexFormat::UnsignedShort:
+ return vk::IndexType::eUint16;
+ case Maxwell::IndexFormat::UnsignedInt:
+ return vk::IndexType::eUint32;
+ }
+ UNIMPLEMENTED_MSG("Unimplemented index_format={}", static_cast<u32>(index_format));
+ return {};
+}
+
+vk::StencilOp StencilOp(Maxwell::StencilOp stencil_op) {
+ switch (stencil_op) {
+ case Maxwell::StencilOp::Keep:
+ case Maxwell::StencilOp::KeepOGL:
+ return vk::StencilOp::eKeep;
+ case Maxwell::StencilOp::Zero:
+ case Maxwell::StencilOp::ZeroOGL:
+ return vk::StencilOp::eZero;
+ case Maxwell::StencilOp::Replace:
+ case Maxwell::StencilOp::ReplaceOGL:
+ return vk::StencilOp::eReplace;
+ case Maxwell::StencilOp::Incr:
+ case Maxwell::StencilOp::IncrOGL:
+ return vk::StencilOp::eIncrementAndClamp;
+ case Maxwell::StencilOp::Decr:
+ case Maxwell::StencilOp::DecrOGL:
+ return vk::StencilOp::eDecrementAndClamp;
+ case Maxwell::StencilOp::Invert:
+ case Maxwell::StencilOp::InvertOGL:
+ return vk::StencilOp::eInvert;
+ case Maxwell::StencilOp::IncrWrap:
+ case Maxwell::StencilOp::IncrWrapOGL:
+ return vk::StencilOp::eIncrementAndWrap;
+ case Maxwell::StencilOp::DecrWrap:
+ case Maxwell::StencilOp::DecrWrapOGL:
+ return vk::StencilOp::eDecrementAndWrap;
+ }
+ UNIMPLEMENTED_MSG("Unimplemented stencil op={}", static_cast<u32>(stencil_op));
+ return {};
+}
+
+vk::BlendOp BlendEquation(Maxwell::Blend::Equation equation) {
+ switch (equation) {
+ case Maxwell::Blend::Equation::Add:
+ case Maxwell::Blend::Equation::AddGL:
+ return vk::BlendOp::eAdd;
+ case Maxwell::Blend::Equation::Subtract:
+ case Maxwell::Blend::Equation::SubtractGL:
+ return vk::BlendOp::eSubtract;
+ case Maxwell::Blend::Equation::ReverseSubtract:
+ case Maxwell::Blend::Equation::ReverseSubtractGL:
+ return vk::BlendOp::eReverseSubtract;
+ case Maxwell::Blend::Equation::Min:
+ case Maxwell::Blend::Equation::MinGL:
+ return vk::BlendOp::eMin;
+ case Maxwell::Blend::Equation::Max:
+ case Maxwell::Blend::Equation::MaxGL:
+ return vk::BlendOp::eMax;
+ }
+ UNIMPLEMENTED_MSG("Unimplemented blend equation={}", static_cast<u32>(equation));
+ return {};
+}
+
+vk::BlendFactor BlendFactor(Maxwell::Blend::Factor factor) {
+ switch (factor) {
+ case Maxwell::Blend::Factor::Zero:
+ case Maxwell::Blend::Factor::ZeroGL:
+ return vk::BlendFactor::eZero;
+ case Maxwell::Blend::Factor::One:
+ case Maxwell::Blend::Factor::OneGL:
+ return vk::BlendFactor::eOne;
+ case Maxwell::Blend::Factor::SourceColor:
+ case Maxwell::Blend::Factor::SourceColorGL:
+ return vk::BlendFactor::eSrcColor;
+ case Maxwell::Blend::Factor::OneMinusSourceColor:
+ case Maxwell::Blend::Factor::OneMinusSourceColorGL:
+ return vk::BlendFactor::eOneMinusSrcColor;
+ case Maxwell::Blend::Factor::SourceAlpha:
+ case Maxwell::Blend::Factor::SourceAlphaGL:
+ return vk::BlendFactor::eSrcAlpha;
+ case Maxwell::Blend::Factor::OneMinusSourceAlpha:
+ case Maxwell::Blend::Factor::OneMinusSourceAlphaGL:
+ return vk::BlendFactor::eOneMinusSrcAlpha;
+ case Maxwell::Blend::Factor::DestAlpha:
+ case Maxwell::Blend::Factor::DestAlphaGL:
+ return vk::BlendFactor::eDstAlpha;
+ case Maxwell::Blend::Factor::OneMinusDestAlpha:
+ case Maxwell::Blend::Factor::OneMinusDestAlphaGL:
+ return vk::BlendFactor::eOneMinusDstAlpha;
+ case Maxwell::Blend::Factor::DestColor:
+ case Maxwell::Blend::Factor::DestColorGL:
+ return vk::BlendFactor::eDstColor;
+ case Maxwell::Blend::Factor::OneMinusDestColor:
+ case Maxwell::Blend::Factor::OneMinusDestColorGL:
+ return vk::BlendFactor::eOneMinusDstColor;
+ case Maxwell::Blend::Factor::SourceAlphaSaturate:
+ case Maxwell::Blend::Factor::SourceAlphaSaturateGL:
+ return vk::BlendFactor::eSrcAlphaSaturate;
+ case Maxwell::Blend::Factor::Source1Color:
+ case Maxwell::Blend::Factor::Source1ColorGL:
+ return vk::BlendFactor::eSrc1Color;
+ case Maxwell::Blend::Factor::OneMinusSource1Color:
+ case Maxwell::Blend::Factor::OneMinusSource1ColorGL:
+ return vk::BlendFactor::eOneMinusSrc1Color;
+ case Maxwell::Blend::Factor::Source1Alpha:
+ case Maxwell::Blend::Factor::Source1AlphaGL:
+ return vk::BlendFactor::eSrc1Alpha;
+ case Maxwell::Blend::Factor::OneMinusSource1Alpha:
+ case Maxwell::Blend::Factor::OneMinusSource1AlphaGL:
+ return vk::BlendFactor::eOneMinusSrc1Alpha;
+ case Maxwell::Blend::Factor::ConstantColor:
+ case Maxwell::Blend::Factor::ConstantColorGL:
+ return vk::BlendFactor::eConstantColor;
+ case Maxwell::Blend::Factor::OneMinusConstantColor:
+ case Maxwell::Blend::Factor::OneMinusConstantColorGL:
+ return vk::BlendFactor::eOneMinusConstantColor;
+ case Maxwell::Blend::Factor::ConstantAlpha:
+ case Maxwell::Blend::Factor::ConstantAlphaGL:
+ return vk::BlendFactor::eConstantAlpha;
+ case Maxwell::Blend::Factor::OneMinusConstantAlpha:
+ case Maxwell::Blend::Factor::OneMinusConstantAlphaGL:
+ return vk::BlendFactor::eOneMinusConstantAlpha;
+ }
+ UNIMPLEMENTED_MSG("Unimplemented blend factor={}", static_cast<u32>(factor));
+ return {};
+}
+
+vk::FrontFace FrontFace(Maxwell::Cull::FrontFace front_face) {
+ switch (front_face) {
+ case Maxwell::Cull::FrontFace::ClockWise:
+ return vk::FrontFace::eClockwise;
+ case Maxwell::Cull::FrontFace::CounterClockWise:
+ return vk::FrontFace::eCounterClockwise;
+ }
+ UNIMPLEMENTED_MSG("Unimplemented front face={}", static_cast<u32>(front_face));
+ return {};
+}
+
+vk::CullModeFlags CullFace(Maxwell::Cull::CullFace cull_face) {
+ switch (cull_face) {
+ case Maxwell::Cull::CullFace::Front:
+ return vk::CullModeFlagBits::eFront;
+ case Maxwell::Cull::CullFace::Back:
+ return vk::CullModeFlagBits::eBack;
+ case Maxwell::Cull::CullFace::FrontAndBack:
+ return vk::CullModeFlagBits::eFrontAndBack;
+ }
+ UNIMPLEMENTED_MSG("Unimplemented cull face={}", static_cast<u32>(cull_face));
+ return {};
+}
+
+vk::ComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle) {
+ switch (swizzle) {
+ case Tegra::Texture::SwizzleSource::Zero:
+ return vk::ComponentSwizzle::eZero;
+ case Tegra::Texture::SwizzleSource::R:
+ return vk::ComponentSwizzle::eR;
+ case Tegra::Texture::SwizzleSource::G:
+ return vk::ComponentSwizzle::eG;
+ case Tegra::Texture::SwizzleSource::B:
+ return vk::ComponentSwizzle::eB;
+ case Tegra::Texture::SwizzleSource::A:
+ return vk::ComponentSwizzle::eA;
+ case Tegra::Texture::SwizzleSource::OneInt:
+ case Tegra::Texture::SwizzleSource::OneFloat:
+ return vk::ComponentSwizzle::eOne;
+ }
+ UNIMPLEMENTED_MSG("Unimplemented swizzle source={}", static_cast<u32>(swizzle));
+ return {};
+}
+
+} // namespace Vulkan::MaxwellToVK
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.h b/src/video_core/renderer_vulkan/maxwell_to_vk.h
new file mode 100644
index 000000000..4cadc0721
--- /dev/null
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.h
@@ -0,0 +1,58 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <utility>
+#include "common/common_types.h"
+#include "video_core/engines/maxwell_3d.h"
+#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/surface.h"
+#include "video_core/textures/texture.h"
+
+namespace Vulkan::MaxwellToVK {
+
+using Maxwell = Tegra::Engines::Maxwell3D::Regs;
+using PixelFormat = VideoCore::Surface::PixelFormat;
+using ComponentType = VideoCore::Surface::ComponentType;
+
+namespace Sampler {
+
+vk::Filter Filter(Tegra::Texture::TextureFilter filter);
+
+vk::SamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter);
+
+vk::SamplerAddressMode WrapMode(Tegra::Texture::WrapMode wrap_mode);
+
+vk::CompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func);
+
+} // namespace Sampler
+
+std::pair<vk::Format, bool> SurfaceFormat(const VKDevice& device, FormatType format_type,
+ PixelFormat pixel_format, ComponentType component_type);
+
+vk::ShaderStageFlagBits ShaderStage(Maxwell::ShaderStage stage);
+
+vk::PrimitiveTopology PrimitiveTopology(Maxwell::PrimitiveTopology topology);
+
+vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size);
+
+vk::CompareOp ComparisonOp(Maxwell::ComparisonOp comparison);
+
+vk::IndexType IndexFormat(Maxwell::IndexFormat index_format);
+
+vk::StencilOp StencilOp(Maxwell::StencilOp stencil_op);
+
+vk::BlendOp BlendEquation(Maxwell::Blend::Equation equation);
+
+vk::BlendFactor BlendFactor(Maxwell::Blend::Factor factor);
+
+vk::FrontFace FrontFace(Maxwell::Cull::FrontFace front_face);
+
+vk::CullModeFlags CullFace(Maxwell::Cull::CullFace cull_face);
+
+vk::ComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle);
+
+} // namespace Vulkan::MaxwellToVK
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
new file mode 100644
index 000000000..95eab3fec
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -0,0 +1,123 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <cstring>
+#include <memory>
+#include <optional>
+#include <tuple>
+
+#include "common/alignment.h"
+#include "common/assert.h"
+#include "core/memory.h"
+#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/vk_buffer_cache.h"
+#include "video_core/renderer_vulkan/vk_scheduler.h"
+#include "video_core/renderer_vulkan/vk_stream_buffer.h"
+
+namespace Vulkan {
+
+CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, u64 offset,
+ std::size_t alignment, u8* host_ptr)
+ : cpu_addr{cpu_addr}, size{size}, offset{offset}, alignment{alignment}, RasterizerCacheObject{
+ host_ptr} {}
+
+VKBufferCache::VKBufferCache(Tegra::MemoryManager& tegra_memory_manager,
+ VideoCore::RasterizerInterface& rasterizer, const VKDevice& device,
+ VKMemoryManager& memory_manager, VKScheduler& scheduler, u64 size)
+ : RasterizerCache{rasterizer}, tegra_memory_manager{tegra_memory_manager} {
+ const auto usage = vk::BufferUsageFlagBits::eVertexBuffer |
+ vk::BufferUsageFlagBits::eIndexBuffer |
+ vk::BufferUsageFlagBits::eUniformBuffer;
+ const auto access = vk::AccessFlagBits::eVertexAttributeRead | vk::AccessFlagBits::eIndexRead |
+ vk::AccessFlagBits::eUniformRead;
+ stream_buffer =
+ std::make_unique<VKStreamBuffer>(device, memory_manager, scheduler, size, usage, access,
+ vk::PipelineStageFlagBits::eAllCommands);
+ buffer_handle = stream_buffer->GetBuffer();
+}
+
+VKBufferCache::~VKBufferCache() = default;
+
+u64 VKBufferCache::UploadMemory(Tegra::GPUVAddr gpu_addr, std::size_t size, u64 alignment,
+ bool cache) {
+ const auto cpu_addr{tegra_memory_manager.GpuToCpuAddress(gpu_addr)};
+ ASSERT_MSG(cpu_addr, "Invalid GPU address");
+
+ // Cache management is a big overhead, so only cache entries with a given size.
+ // TODO: Figure out which size is the best for given games.
+ cache &= size >= 2048;
+
+ const auto& host_ptr{Memory::GetPointer(*cpu_addr)};
+ if (cache) {
+ auto entry = TryGet(host_ptr);
+ if (entry) {
+ if (entry->GetSize() >= size && entry->GetAlignment() == alignment) {
+ return entry->GetOffset();
+ }
+ Unregister(entry);
+ }
+ }
+
+ AlignBuffer(alignment);
+ const u64 uploaded_offset = buffer_offset;
+
+ if (!host_ptr) {
+ return uploaded_offset;
+ }
+
+ std::memcpy(buffer_ptr, host_ptr, size);
+ buffer_ptr += size;
+ buffer_offset += size;
+
+ if (cache) {
+ auto entry = std::make_shared<CachedBufferEntry>(*cpu_addr, size, uploaded_offset,
+ alignment, host_ptr);
+ Register(entry);
+ }
+
+ return uploaded_offset;
+}
+
+u64 VKBufferCache::UploadHostMemory(const u8* raw_pointer, std::size_t size, u64 alignment) {
+ AlignBuffer(alignment);
+ std::memcpy(buffer_ptr, raw_pointer, size);
+ const u64 uploaded_offset = buffer_offset;
+
+ buffer_ptr += size;
+ buffer_offset += size;
+ return uploaded_offset;
+}
+
+std::tuple<u8*, u64> VKBufferCache::ReserveMemory(std::size_t size, u64 alignment) {
+ AlignBuffer(alignment);
+ u8* const uploaded_ptr = buffer_ptr;
+ const u64 uploaded_offset = buffer_offset;
+
+ buffer_ptr += size;
+ buffer_offset += size;
+ return {uploaded_ptr, uploaded_offset};
+}
+
+void VKBufferCache::Reserve(std::size_t max_size) {
+ bool invalidate;
+ std::tie(buffer_ptr, buffer_offset_base, invalidate) = stream_buffer->Reserve(max_size);
+ buffer_offset = buffer_offset_base;
+
+ if (invalidate) {
+ InvalidateAll();
+ }
+}
+
+VKExecutionContext VKBufferCache::Send(VKExecutionContext exctx) {
+ return stream_buffer->Send(exctx, buffer_offset - buffer_offset_base);
+}
+
+void VKBufferCache::AlignBuffer(std::size_t alignment) {
+ // Align the offset, not the mapped pointer
+ const u64 offset_aligned = Common::AlignUp(buffer_offset, alignment);
+ buffer_ptr += offset_aligned - buffer_offset;
+ buffer_offset = offset_aligned;
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
new file mode 100644
index 000000000..8b415744b
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -0,0 +1,104 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <memory>
+#include <tuple>
+
+#include "common/common_types.h"
+#include "video_core/gpu.h"
+#include "video_core/rasterizer_cache.h"
+#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/vk_scheduler.h"
+
+namespace Tegra {
+class MemoryManager;
+}
+
+namespace Vulkan {
+
+class VKDevice;
+class VKFence;
+class VKMemoryManager;
+class VKStreamBuffer;
+
+class CachedBufferEntry final : public RasterizerCacheObject {
+public:
+ explicit CachedBufferEntry(VAddr cpu_addr, std::size_t size, u64 offset, std::size_t alignment,
+ u8* host_ptr);
+
+ VAddr GetCpuAddr() const override {
+ return cpu_addr;
+ }
+
+ std::size_t GetSizeInBytes() const override {
+ return size;
+ }
+
+ std::size_t GetSize() const {
+ return size;
+ }
+
+ u64 GetOffset() const {
+ return offset;
+ }
+
+ std::size_t GetAlignment() const {
+ return alignment;
+ }
+
+ // We do not have to flush this cache as things in it are never modified by us.
+ void Flush() override {}
+
+private:
+ VAddr cpu_addr{};
+ std::size_t size{};
+ u64 offset{};
+ std::size_t alignment{};
+};
+
+class VKBufferCache final : public RasterizerCache<std::shared_ptr<CachedBufferEntry>> {
+public:
+ explicit VKBufferCache(Tegra::MemoryManager& tegra_memory_manager,
+ VideoCore::RasterizerInterface& rasterizer, const VKDevice& device,
+ VKMemoryManager& memory_manager, VKScheduler& scheduler, u64 size);
+ ~VKBufferCache();
+
+ /// Uploads data from a guest GPU address. Returns host's buffer offset where it's been
+ /// allocated.
+ u64 UploadMemory(Tegra::GPUVAddr gpu_addr, std::size_t size, u64 alignment = 4,
+ bool cache = true);
+
+ /// Uploads from a host memory. Returns host's buffer offset where it's been allocated.
+ u64 UploadHostMemory(const u8* raw_pointer, std::size_t size, u64 alignment = 4);
+
+ /// Reserves memory to be used by host's CPU. Returns mapped address and offset.
+ std::tuple<u8*, u64> ReserveMemory(std::size_t size, u64 alignment = 4);
+
+ /// Reserves a region of memory to be used in subsequent upload/reserve operations.
+ void Reserve(std::size_t max_size);
+
+ /// Ensures that the set data is sent to the device.
+ [[nodiscard]] VKExecutionContext Send(VKExecutionContext exctx);
+
+ /// Returns the buffer cache handle.
+ vk::Buffer GetBuffer() const {
+ return buffer_handle;
+ }
+
+private:
+ void AlignBuffer(std::size_t alignment);
+
+ Tegra::MemoryManager& tegra_memory_manager;
+
+ std::unique_ptr<VKStreamBuffer> stream_buffer;
+ vk::Buffer buffer_handle;
+
+ u8* buffer_ptr = nullptr;
+ u64 buffer_offset = 0;
+ u64 buffer_offset_base = 0;
+};
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp
index 78a4e5f0e..00242ecbe 100644
--- a/src/video_core/renderer_vulkan/vk_device.cpp
+++ b/src/video_core/renderer_vulkan/vk_device.cpp
@@ -122,8 +122,7 @@ bool VKDevice::IsFormatSupported(vk::Format wanted_format, vk::FormatFeatureFlag
FormatType format_type) const {
const auto it = format_properties.find(wanted_format);
if (it == format_properties.end()) {
- LOG_CRITICAL(Render_Vulkan, "Unimplemented format query={}",
- static_cast<u32>(wanted_format));
+ LOG_CRITICAL(Render_Vulkan, "Unimplemented format query={}", vk::to_string(wanted_format));
UNREACHABLE();
return true;
}
@@ -219,11 +218,19 @@ std::map<vk::Format, vk::FormatProperties> VKDevice::GetFormatProperties(
format_properties.emplace(format, physical.getFormatProperties(format, dldi));
};
AddFormatQuery(vk::Format::eA8B8G8R8UnormPack32);
- AddFormatQuery(vk::Format::eR5G6B5UnormPack16);
+ AddFormatQuery(vk::Format::eB5G6R5UnormPack16);
+ AddFormatQuery(vk::Format::eA2B10G10R10UnormPack32);
+ AddFormatQuery(vk::Format::eR8G8B8A8Srgb);
+ AddFormatQuery(vk::Format::eR8Unorm);
AddFormatQuery(vk::Format::eD32Sfloat);
+ AddFormatQuery(vk::Format::eD16Unorm);
AddFormatQuery(vk::Format::eD16UnormS8Uint);
AddFormatQuery(vk::Format::eD24UnormS8Uint);
AddFormatQuery(vk::Format::eD32SfloatS8Uint);
+ AddFormatQuery(vk::Format::eBc1RgbaUnormBlock);
+ AddFormatQuery(vk::Format::eBc2UnormBlock);
+ AddFormatQuery(vk::Format::eBc3UnormBlock);
+ AddFormatQuery(vk::Format::eBc4UnormBlock);
return format_properties;
}
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.cpp b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
new file mode 100644
index 000000000..0451babbf
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
@@ -0,0 +1,252 @@
+// Copyright 2018 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include <optional>
+#include <tuple>
+#include <vector>
+#include "common/alignment.h"
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "common/logging/log.h"
+#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/renderer_vulkan/vk_memory_manager.h"
+
+namespace Vulkan {
+
+// TODO(Rodrigo): Fine tune this number
+constexpr u64 ALLOC_CHUNK_SIZE = 64 * 1024 * 1024;
+
+class VKMemoryAllocation final {
+public:
+ explicit VKMemoryAllocation(const VKDevice& device, vk::DeviceMemory memory,
+ vk::MemoryPropertyFlags properties, u64 alloc_size, u32 type)
+ : device{device}, memory{memory}, properties{properties}, alloc_size{alloc_size},
+ shifted_type{ShiftType(type)}, is_mappable{properties &
+ vk::MemoryPropertyFlagBits::eHostVisible} {
+ if (is_mappable) {
+ const auto dev = device.GetLogical();
+ const auto& dld = device.GetDispatchLoader();
+ base_address = static_cast<u8*>(dev.mapMemory(memory, 0, alloc_size, {}, dld));
+ }
+ }
+
+ ~VKMemoryAllocation() {
+ const auto dev = device.GetLogical();
+ const auto& dld = device.GetDispatchLoader();
+ if (is_mappable)
+ dev.unmapMemory(memory, dld);
+ dev.free(memory, nullptr, dld);
+ }
+
+ VKMemoryCommit Commit(vk::DeviceSize commit_size, vk::DeviceSize alignment) {
+ auto found = TryFindFreeSection(free_iterator, alloc_size, static_cast<u64>(commit_size),
+ static_cast<u64>(alignment));
+ if (!found) {
+ found = TryFindFreeSection(0, free_iterator, static_cast<u64>(commit_size),
+ static_cast<u64>(alignment));
+ if (!found) {
+ // Signal out of memory, it'll try to do more allocations.
+ return nullptr;
+ }
+ }
+ u8* address = is_mappable ? base_address + *found : nullptr;
+ auto commit = std::make_unique<VKMemoryCommitImpl>(this, memory, address, *found,
+ *found + commit_size);
+ commits.push_back(commit.get());
+
+ // Last commit's address is highly probable to be free.
+ free_iterator = *found + commit_size;
+
+ return commit;
+ }
+
+ void Free(const VKMemoryCommitImpl* commit) {
+ ASSERT(commit);
+ const auto it =
+ std::find_if(commits.begin(), commits.end(),
+ [&](const auto& stored_commit) { return stored_commit == commit; });
+ if (it == commits.end()) {
+ LOG_CRITICAL(Render_Vulkan, "Freeing unallocated commit!");
+ UNREACHABLE();
+ return;
+ }
+ commits.erase(it);
+ }
+
+ /// Returns whether this allocation is compatible with the arguments.
+ bool IsCompatible(vk::MemoryPropertyFlags wanted_properties, u32 type_mask) const {
+ return (wanted_properties & properties) != vk::MemoryPropertyFlagBits(0) &&
+ (type_mask & shifted_type) != 0;
+ }
+
+private:
+ static constexpr u32 ShiftType(u32 type) {
+ return 1U << type;
+ }
+
+ /// A memory allocator, it may return a free region between "start" and "end" with the solicited
+ /// requeriments.
+ std::optional<u64> TryFindFreeSection(u64 start, u64 end, u64 size, u64 alignment) const {
+ u64 iterator = start;
+ while (iterator + size < end) {
+ const u64 try_left = Common::AlignUp(iterator, alignment);
+ const u64 try_right = try_left + size;
+
+ bool overlap = false;
+ for (const auto& commit : commits) {
+ const auto [commit_left, commit_right] = commit->interval;
+ if (try_left < commit_right && commit_left < try_right) {
+ // There's an overlap, continue the search where the overlapping commit ends.
+ iterator = commit_right;
+ overlap = true;
+ break;
+ }
+ }
+ if (!overlap) {
+ // A free address has been found.
+ return try_left;
+ }
+ }
+ // No free regions where found, return an empty optional.
+ return std::nullopt;
+ }
+
+ const VKDevice& device; ///< Vulkan device.
+ const vk::DeviceMemory memory; ///< Vulkan memory allocation handler.
+ const vk::MemoryPropertyFlags properties; ///< Vulkan properties.
+ const u64 alloc_size; ///< Size of this allocation.
+ const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted.
+ const bool is_mappable; ///< Whether the allocation is mappable.
+
+ /// Base address of the mapped pointer.
+ u8* base_address{};
+
+ /// Hints where the next free region is likely going to be.
+ u64 free_iterator{};
+
+ /// Stores all commits done from this allocation.
+ std::vector<const VKMemoryCommitImpl*> commits;
+};
+
+VKMemoryManager::VKMemoryManager(const VKDevice& device)
+ : device{device}, props{device.GetPhysical().getMemoryProperties(device.GetDispatchLoader())},
+ is_memory_unified{GetMemoryUnified(props)} {}
+
+VKMemoryManager::~VKMemoryManager() = default;
+
+VKMemoryCommit VKMemoryManager::Commit(const vk::MemoryRequirements& reqs, bool host_visible) {
+ ASSERT(reqs.size < ALLOC_CHUNK_SIZE);
+
+ // When a host visible commit is asked, search for host visible and coherent, otherwise search
+ // for a fast device local type.
+ const vk::MemoryPropertyFlags wanted_properties =
+ host_visible
+ ? vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent
+ : vk::MemoryPropertyFlagBits::eDeviceLocal;
+
+ const auto TryCommit = [&]() -> VKMemoryCommit {
+ for (auto& alloc : allocs) {
+ if (!alloc->IsCompatible(wanted_properties, reqs.memoryTypeBits))
+ continue;
+
+ if (auto commit = alloc->Commit(reqs.size, reqs.alignment); commit) {
+ return commit;
+ }
+ }
+ return {};
+ };
+
+ if (auto commit = TryCommit(); commit) {
+ return commit;
+ }
+
+ // Commit has failed, allocate more memory.
+ if (!AllocMemory(wanted_properties, reqs.memoryTypeBits, ALLOC_CHUNK_SIZE)) {
+ // TODO(Rodrigo): Try to use host memory.
+ LOG_CRITICAL(Render_Vulkan, "Ran out of memory!");
+ UNREACHABLE();
+ }
+
+ // Commit again, this time it won't fail since there's a fresh allocation above. If it does,
+ // there's a bug.
+ auto commit = TryCommit();
+ ASSERT(commit);
+ return commit;
+}
+
+VKMemoryCommit VKMemoryManager::Commit(vk::Buffer buffer, bool host_visible) {
+ const auto dev = device.GetLogical();
+ const auto& dld = device.GetDispatchLoader();
+ const auto requeriments = dev.getBufferMemoryRequirements(buffer, dld);
+ auto commit = Commit(requeriments, host_visible);
+ dev.bindBufferMemory(buffer, commit->GetMemory(), commit->GetOffset(), dld);
+ return commit;
+}
+
+VKMemoryCommit VKMemoryManager::Commit(vk::Image image, bool host_visible) {
+ const auto dev = device.GetLogical();
+ const auto& dld = device.GetDispatchLoader();
+ const auto requeriments = dev.getImageMemoryRequirements(image, dld);
+ auto commit = Commit(requeriments, host_visible);
+ dev.bindImageMemory(image, commit->GetMemory(), commit->GetOffset(), dld);
+ return commit;
+}
+
+bool VKMemoryManager::AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask,
+ u64 size) {
+ const u32 type = [&]() {
+ for (u32 type_index = 0; type_index < props.memoryTypeCount; ++type_index) {
+ const auto flags = props.memoryTypes[type_index].propertyFlags;
+ if ((type_mask & (1U << type_index)) && (flags & wanted_properties)) {
+ // The type matches in type and in the wanted properties.
+ return type_index;
+ }
+ }
+ LOG_CRITICAL(Render_Vulkan, "Couldn't find a compatible memory type!");
+ UNREACHABLE();
+ return 0u;
+ }();
+
+ const auto dev = device.GetLogical();
+ const auto& dld = device.GetDispatchLoader();
+
+ // Try to allocate found type.
+ const vk::MemoryAllocateInfo memory_ai(size, type);
+ vk::DeviceMemory memory;
+ if (const vk::Result res = dev.allocateMemory(&memory_ai, nullptr, &memory, dld);
+ res != vk::Result::eSuccess) {
+ LOG_CRITICAL(Render_Vulkan, "Device allocation failed with code {}!", vk::to_string(res));
+ return false;
+ }
+ allocs.push_back(
+ std::make_unique<VKMemoryAllocation>(device, memory, wanted_properties, size, type));
+ return true;
+}
+
+/*static*/ bool VKMemoryManager::GetMemoryUnified(const vk::PhysicalDeviceMemoryProperties& props) {
+ for (u32 heap_index = 0; heap_index < props.memoryHeapCount; ++heap_index) {
+ if (!(props.memoryHeaps[heap_index].flags & vk::MemoryHeapFlagBits::eDeviceLocal)) {
+ // Memory is considered unified when heaps are device local only.
+ return false;
+ }
+ }
+ return true;
+}
+
+VKMemoryCommitImpl::VKMemoryCommitImpl(VKMemoryAllocation* allocation, vk::DeviceMemory memory,
+ u8* data, u64 begin, u64 end)
+ : interval(std::make_pair(begin, end)), memory{memory}, allocation{allocation}, data{data} {}
+
+VKMemoryCommitImpl::~VKMemoryCommitImpl() {
+ allocation->Free(this);
+}
+
+u8* VKMemoryCommitImpl::GetData() const {
+ ASSERT_MSG(data != nullptr, "Trying to access an unmapped commit.");
+ return data;
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.h b/src/video_core/renderer_vulkan/vk_memory_manager.h
new file mode 100644
index 000000000..073597b35
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.h
@@ -0,0 +1,87 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <memory>
+#include <utility>
+#include <vector>
+#include "common/common_types.h"
+#include "video_core/renderer_vulkan/declarations.h"
+
+namespace Vulkan {
+
+class VKDevice;
+class VKMemoryAllocation;
+class VKMemoryCommitImpl;
+
+using VKMemoryCommit = std::unique_ptr<VKMemoryCommitImpl>;
+
+class VKMemoryManager final {
+public:
+ explicit VKMemoryManager(const VKDevice& device);
+ ~VKMemoryManager();
+
+ /**
+ * Commits a memory with the specified requeriments.
+ * @param reqs Requeriments returned from a Vulkan call.
+ * @param host_visible Signals the allocator that it *must* use host visible and coherent
+ * memory. When passing false, it will try to allocate device local memory.
+ * @returns A memory commit.
+ */
+ VKMemoryCommit Commit(const vk::MemoryRequirements& reqs, bool host_visible);
+
+ /// Commits memory required by the buffer and binds it.
+ VKMemoryCommit Commit(vk::Buffer buffer, bool host_visible);
+
+ /// Commits memory required by the image and binds it.
+ VKMemoryCommit Commit(vk::Image image, bool host_visible);
+
+ /// Returns true if the memory allocations are done always in host visible and coherent memory.
+ bool IsMemoryUnified() const {
+ return is_memory_unified;
+ }
+
+private:
+ /// Allocates a chunk of memory.
+ bool AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
+
+ /// Returns true if the device uses an unified memory model.
+ static bool GetMemoryUnified(const vk::PhysicalDeviceMemoryProperties& props);
+
+ const VKDevice& device; ///< Device handler.
+ const vk::PhysicalDeviceMemoryProperties props; ///< Physical device properties.
+ const bool is_memory_unified; ///< True if memory model is unified.
+ std::vector<std::unique_ptr<VKMemoryAllocation>> allocs; ///< Current allocations.
+};
+
+class VKMemoryCommitImpl final {
+ friend VKMemoryAllocation;
+
+public:
+ explicit VKMemoryCommitImpl(VKMemoryAllocation* allocation, vk::DeviceMemory memory, u8* data,
+ u64 begin, u64 end);
+ ~VKMemoryCommitImpl();
+
+ /// Returns the writeable memory map. The commit has to be mappable.
+ u8* GetData() const;
+
+ /// Returns the Vulkan memory handler.
+ vk::DeviceMemory GetMemory() const {
+ return memory;
+ }
+
+ /// Returns the start position of the commit relative to the allocation.
+ vk::DeviceSize GetOffset() const {
+ return static_cast<vk::DeviceSize>(interval.first);
+ }
+
+private:
+ std::pair<u64, u64> interval{}; ///< Interval where the commit exists.
+ vk::DeviceMemory memory; ///< Vulkan device memory handler.
+ VKMemoryAllocation* allocation{}; ///< Pointer to the large memory allocation.
+ u8* data{}; ///< Pointer to the host mapped memory, it has the commit offset included.
+};
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.cpp b/src/video_core/renderer_vulkan/vk_resource_manager.cpp
new file mode 100644
index 000000000..a1e117443
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_resource_manager.cpp
@@ -0,0 +1,285 @@
+// Copyright 2018 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include <optional>
+#include "common/assert.h"
+#include "common/logging/log.h"
+#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/renderer_vulkan/vk_resource_manager.h"
+
+namespace Vulkan {
+
+// TODO(Rodrigo): Fine tune these numbers.
+constexpr std::size_t COMMAND_BUFFER_POOL_SIZE = 0x1000;
+constexpr std::size_t FENCES_GROW_STEP = 0x40;
+
+class CommandBufferPool final : public VKFencedPool {
+public:
+ CommandBufferPool(const VKDevice& device)
+ : VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {}
+
+ void Allocate(std::size_t begin, std::size_t end) {
+ const auto dev = device.GetLogical();
+ const auto& dld = device.GetDispatchLoader();
+ const u32 graphics_family = device.GetGraphicsFamily();
+
+ auto pool = std::make_unique<Pool>();
+
+ // Command buffers are going to be commited, recorded, executed every single usage cycle.
+ // They are also going to be reseted when commited.
+ const auto pool_flags = vk::CommandPoolCreateFlagBits::eTransient |
+ vk::CommandPoolCreateFlagBits::eResetCommandBuffer;
+ const vk::CommandPoolCreateInfo cmdbuf_pool_ci(pool_flags, graphics_family);
+ pool->handle = dev.createCommandPoolUnique(cmdbuf_pool_ci, nullptr, dld);
+
+ const vk::CommandBufferAllocateInfo cmdbuf_ai(*pool->handle,
+ vk::CommandBufferLevel::ePrimary,
+ static_cast<u32>(COMMAND_BUFFER_POOL_SIZE));
+ pool->cmdbufs =
+ dev.allocateCommandBuffersUnique<std::allocator<UniqueCommandBuffer>>(cmdbuf_ai, dld);
+
+ pools.push_back(std::move(pool));
+ }
+
+ vk::CommandBuffer Commit(VKFence& fence) {
+ const std::size_t index = CommitResource(fence);
+ const auto pool_index = index / COMMAND_BUFFER_POOL_SIZE;
+ const auto sub_index = index % COMMAND_BUFFER_POOL_SIZE;
+ return *pools[pool_index]->cmdbufs[sub_index];
+ }
+
+private:
+ struct Pool {
+ UniqueCommandPool handle;
+ std::vector<UniqueCommandBuffer> cmdbufs;
+ };
+
+ const VKDevice& device;
+
+ std::vector<std::unique_ptr<Pool>> pools;
+};
+
+VKResource::VKResource() = default;
+
+VKResource::~VKResource() = default;
+
+VKFence::VKFence(const VKDevice& device, UniqueFence handle)
+ : device{device}, handle{std::move(handle)} {}
+
+VKFence::~VKFence() = default;
+
+void VKFence::Wait() {
+ const auto dev = device.GetLogical();
+ const auto& dld = device.GetDispatchLoader();
+ dev.waitForFences({*handle}, true, std::numeric_limits<u64>::max(), dld);
+}
+
+void VKFence::Release() {
+ is_owned = false;
+}
+
+void VKFence::Commit() {
+ is_owned = true;
+ is_used = true;
+}
+
+bool VKFence::Tick(bool gpu_wait, bool owner_wait) {
+ if (!is_used) {
+ // If a fence is not used it's always free.
+ return true;
+ }
+ if (is_owned && !owner_wait) {
+ // The fence is still being owned (Release has not been called) and ownership wait has
+ // not been asked.
+ return false;
+ }
+
+ const auto dev = device.GetLogical();
+ const auto& dld = device.GetDispatchLoader();
+ if (gpu_wait) {
+ // Wait for the fence if it has been requested.
+ dev.waitForFences({*handle}, true, std::numeric_limits<u64>::max(), dld);
+ } else {
+ if (dev.getFenceStatus(*handle, dld) != vk::Result::eSuccess) {
+ // Vulkan fence is not ready, not much it can do here
+ return false;
+ }
+ }
+
+ // Broadcast resources their free state.
+ for (auto* resource : protected_resources) {
+ resource->OnFenceRemoval(this);
+ }
+ protected_resources.clear();
+
+ // Prepare fence for reusage.
+ dev.resetFences({*handle}, dld);
+ is_used = false;
+ return true;
+}
+
+void VKFence::Protect(VKResource* resource) {
+ protected_resources.push_back(resource);
+}
+
+void VKFence::Unprotect(VKResource* resource) {
+ const auto it = std::find(protected_resources.begin(), protected_resources.end(), resource);
+ ASSERT(it != protected_resources.end());
+
+ resource->OnFenceRemoval(this);
+ protected_resources.erase(it);
+}
+
+VKFenceWatch::VKFenceWatch() = default;
+
+VKFenceWatch::~VKFenceWatch() {
+ if (fence) {
+ fence->Unprotect(this);
+ }
+}
+
+void VKFenceWatch::Wait() {
+ if (fence == nullptr) {
+ return;
+ }
+ fence->Wait();
+ fence->Unprotect(this);
+}
+
+void VKFenceWatch::Watch(VKFence& new_fence) {
+ Wait();
+ fence = &new_fence;
+ fence->Protect(this);
+}
+
+bool VKFenceWatch::TryWatch(VKFence& new_fence) {
+ if (fence) {
+ return false;
+ }
+ fence = &new_fence;
+ fence->Protect(this);
+ return true;
+}
+
+void VKFenceWatch::OnFenceRemoval(VKFence* signaling_fence) {
+ ASSERT_MSG(signaling_fence == fence, "Removing the wrong fence");
+ fence = nullptr;
+}
+
+VKFencedPool::VKFencedPool(std::size_t grow_step) : grow_step{grow_step} {}
+
+VKFencedPool::~VKFencedPool() = default;
+
+std::size_t VKFencedPool::CommitResource(VKFence& fence) {
+ const auto Search = [&](std::size_t begin, std::size_t end) -> std::optional<std::size_t> {
+ for (std::size_t iterator = begin; iterator < end; ++iterator) {
+ if (watches[iterator]->TryWatch(fence)) {
+ // The resource is now being watched, a free resource was successfully found.
+ return iterator;
+ }
+ }
+ return {};
+ };
+ // Try to find a free resource from the hinted position to the end.
+ auto found = Search(free_iterator, watches.size());
+ if (!found) {
+ // Search from beginning to the hinted position.
+ found = Search(0, free_iterator);
+ if (!found) {
+ // Both searches failed, the pool is full; handle it.
+ const std::size_t free_resource = ManageOverflow();
+
+ // Watch will wait for the resource to be free.
+ watches[free_resource]->Watch(fence);
+ found = free_resource;
+ }
+ }
+ // Free iterator is hinted to the resource after the one that's been commited.
+ free_iterator = (*found + 1) % watches.size();
+ return *found;
+}
+
+std::size_t VKFencedPool::ManageOverflow() {
+ const std::size_t old_capacity = watches.size();
+ Grow();
+
+ // The last entry is guaranted to be free, since it's the first element of the freshly
+ // allocated resources.
+ return old_capacity;
+}
+
+void VKFencedPool::Grow() {
+ const std::size_t old_capacity = watches.size();
+ watches.resize(old_capacity + grow_step);
+ std::generate(watches.begin() + old_capacity, watches.end(),
+ []() { return std::make_unique<VKFenceWatch>(); });
+ Allocate(old_capacity, old_capacity + grow_step);
+}
+
+VKResourceManager::VKResourceManager(const VKDevice& device) : device{device} {
+ GrowFences(FENCES_GROW_STEP);
+ command_buffer_pool = std::make_unique<CommandBufferPool>(device);
+}
+
+VKResourceManager::~VKResourceManager() = default;
+
+VKFence& VKResourceManager::CommitFence() {
+ const auto StepFences = [&](bool gpu_wait, bool owner_wait) -> VKFence* {
+ const auto Tick = [=](auto& fence) { return fence->Tick(gpu_wait, owner_wait); };
+ const auto hinted = fences.begin() + fences_iterator;
+
+ auto it = std::find_if(hinted, fences.end(), Tick);
+ if (it == fences.end()) {
+ it = std::find_if(fences.begin(), hinted, Tick);
+ if (it == hinted) {
+ return nullptr;
+ }
+ }
+ fences_iterator = std::distance(fences.begin(), it) + 1;
+ if (fences_iterator >= fences.size())
+ fences_iterator = 0;
+
+ auto& fence = *it;
+ fence->Commit();
+ return fence.get();
+ };
+
+ VKFence* found_fence = StepFences(false, false);
+ if (!found_fence) {
+ // Try again, this time waiting.
+ found_fence = StepFences(true, false);
+
+ if (!found_fence) {
+ // Allocate new fences and try again.
+ LOG_INFO(Render_Vulkan, "Allocating new fences {} -> {}", fences.size(),
+ fences.size() + FENCES_GROW_STEP);
+
+ GrowFences(FENCES_GROW_STEP);
+ found_fence = StepFences(true, false);
+ ASSERT(found_fence != nullptr);
+ }
+ }
+ return *found_fence;
+}
+
+vk::CommandBuffer VKResourceManager::CommitCommandBuffer(VKFence& fence) {
+ return command_buffer_pool->Commit(fence);
+}
+
+void VKResourceManager::GrowFences(std::size_t new_fences_count) {
+ const auto dev = device.GetLogical();
+ const auto& dld = device.GetDispatchLoader();
+ const vk::FenceCreateInfo fence_ci;
+
+ const std::size_t previous_size = fences.size();
+ fences.resize(previous_size + new_fences_count);
+
+ std::generate(fences.begin() + previous_size, fences.end(), [&]() {
+ return std::make_unique<VKFence>(device, dev.createFenceUnique(fence_ci, nullptr, dld));
+ });
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.h b/src/video_core/renderer_vulkan/vk_resource_manager.h
new file mode 100644
index 000000000..5bfe4cead
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_resource_manager.h
@@ -0,0 +1,180 @@
+// Copyright 2018 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <cstddef>
+#include <memory>
+#include <vector>
+#include "video_core/renderer_vulkan/declarations.h"
+
+namespace Vulkan {
+
+class VKDevice;
+class VKFence;
+class VKResourceManager;
+
+class CommandBufferPool;
+
+/// Interface for a Vulkan resource
+class VKResource {
+public:
+ explicit VKResource();
+ virtual ~VKResource();
+
+ /**
+ * Signals the object that an owning fence has been signaled.
+ * @param signaling_fence Fence that signals its usage end.
+ */
+ virtual void OnFenceRemoval(VKFence* signaling_fence) = 0;
+};
+
+/**
+ * Fences take ownership of objects, protecting them from GPU-side or driver-side concurrent access.
+ * They must be commited from the resource manager. Their usage flow is: commit the fence from the
+ * resource manager, protect resources with it and use them, send the fence to an execution queue
+ * and Wait for it if needed and then call Release. Used resources will automatically be signaled
+ * when they are free to be reused.
+ * @brief Protects resources for concurrent usage and signals its release.
+ */
+class VKFence {
+ friend class VKResourceManager;
+
+public:
+ explicit VKFence(const VKDevice& device, UniqueFence handle);
+ ~VKFence();
+
+ /**
+ * Waits for the fence to be signaled.
+ * @warning You must have ownership of the fence and it has to be previously sent to a queue to
+ * call this function.
+ */
+ void Wait();
+
+ /**
+ * Releases ownership of the fence. Pass after it has been sent to an execution queue.
+ * Unmanaged usage of the fence after the call will result in undefined behavior because it may
+ * be being used for something else.
+ */
+ void Release();
+
+ /// Protects a resource with this fence.
+ void Protect(VKResource* resource);
+
+ /// Removes protection for a resource.
+ void Unprotect(VKResource* resource);
+
+ /// Retreives the fence.
+ operator vk::Fence() const {
+ return *handle;
+ }
+
+private:
+ /// Take ownership of the fence.
+ void Commit();
+
+ /**
+ * Updates the fence status.
+ * @warning Waiting for the owner might soft lock the execution.
+ * @param gpu_wait Wait for the fence to be signaled by the driver.
+ * @param owner_wait Wait for the owner to signal its freedom.
+ * @returns True if the fence is free. Waiting for gpu and owner will always return true.
+ */
+ bool Tick(bool gpu_wait, bool owner_wait);
+
+ const VKDevice& device; ///< Device handler
+ UniqueFence handle; ///< Vulkan fence
+ std::vector<VKResource*> protected_resources; ///< List of resources protected by this fence
+ bool is_owned = false; ///< The fence has been commited but not released yet.
+ bool is_used = false; ///< The fence has been commited but it has not been checked to be free.
+};
+
+/**
+ * A fence watch is used to keep track of the usage of a fence and protect a resource or set of
+ * resources without having to inherit VKResource from their handlers.
+ */
+class VKFenceWatch final : public VKResource {
+public:
+ explicit VKFenceWatch();
+ ~VKFenceWatch();
+
+ /// Waits for the fence to be released.
+ void Wait();
+
+ /**
+ * Waits for a previous fence and watches a new one.
+ * @param new_fence New fence to wait to.
+ */
+ void Watch(VKFence& new_fence);
+
+ /**
+ * Checks if it's currently being watched and starts watching it if it's available.
+ * @returns True if a watch has started, false if it's being watched.
+ */
+ bool TryWatch(VKFence& new_fence);
+
+ void OnFenceRemoval(VKFence* signaling_fence) override;
+
+private:
+ VKFence* fence{}; ///< Fence watching this resource. nullptr when the watch is free.
+};
+
+/**
+ * Handles a pool of resources protected by fences. Manages resource overflow allocating more
+ * resources.
+ */
+class VKFencedPool {
+public:
+ explicit VKFencedPool(std::size_t grow_step);
+ virtual ~VKFencedPool();
+
+protected:
+ /**
+ * Commits a free resource and protects it with a fence. It may allocate new resources.
+ * @param fence Fence that protects the commited resource.
+ * @returns Index of the resource commited.
+ */
+ std::size_t CommitResource(VKFence& fence);
+
+ /// Called when a chunk of resources have to be allocated.
+ virtual void Allocate(std::size_t begin, std::size_t end) = 0;
+
+private:
+ /// Manages pool overflow allocating new resources.
+ std::size_t ManageOverflow();
+
+ /// Allocates a new page of resources.
+ void Grow();
+
+ std::size_t grow_step = 0; ///< Number of new resources created after an overflow
+ std::size_t free_iterator = 0; ///< Hint to where the next free resources is likely to be found
+ std::vector<std::unique_ptr<VKFenceWatch>> watches; ///< Set of watched resources
+};
+
+/**
+ * The resource manager handles all resources that can be protected with a fence avoiding
+ * driver-side or GPU-side concurrent usage. Usage is documented in VKFence.
+ */
+class VKResourceManager final {
+public:
+ explicit VKResourceManager(const VKDevice& device);
+ ~VKResourceManager();
+
+ /// Commits a fence. It has to be sent to a queue and released.
+ VKFence& CommitFence();
+
+ /// Commits an unused command buffer and protects it with a fence.
+ vk::CommandBuffer CommitCommandBuffer(VKFence& fence);
+
+private:
+ /// Allocates new fences.
+ void GrowFences(std::size_t new_fences_count);
+
+ const VKDevice& device; ///< Device handler.
+ std::size_t fences_iterator = 0; ///< Index where a free fence is likely to be found.
+ std::vector<std::unique_ptr<VKFence>> fences; ///< Pool of fences.
+ std::unique_ptr<CommandBufferPool> command_buffer_pool; ///< Pool of command buffers.
+};
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
new file mode 100644
index 000000000..ed3178f09
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
@@ -0,0 +1,81 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <cstring>
+#include <optional>
+#include <unordered_map>
+
+#include "common/assert.h"
+#include "common/cityhash.h"
+#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/maxwell_to_vk.h"
+#include "video_core/renderer_vulkan/vk_sampler_cache.h"
+#include "video_core/textures/texture.h"
+
+namespace Vulkan {
+
+static std::optional<vk::BorderColor> TryConvertBorderColor(std::array<float, 4> color) {
+ // TODO(Rodrigo): Manage integer border colors
+ if (color == std::array<float, 4>{0, 0, 0, 0}) {
+ return vk::BorderColor::eFloatTransparentBlack;
+ } else if (color == std::array<float, 4>{0, 0, 0, 1}) {
+ return vk::BorderColor::eFloatOpaqueBlack;
+ } else if (color == std::array<float, 4>{1, 1, 1, 1}) {
+ return vk::BorderColor::eFloatOpaqueWhite;
+ } else {
+ return {};
+ }
+}
+
+std::size_t SamplerCacheKey::Hash() const {
+ static_assert(sizeof(raw) % sizeof(u64) == 0);
+ return static_cast<std::size_t>(
+ Common::CityHash64(reinterpret_cast<const char*>(raw.data()), sizeof(raw) / sizeof(u64)));
+}
+
+bool SamplerCacheKey::operator==(const SamplerCacheKey& rhs) const {
+ return raw == rhs.raw;
+}
+
+VKSamplerCache::VKSamplerCache(const VKDevice& device) : device{device} {}
+
+VKSamplerCache::~VKSamplerCache() = default;
+
+vk::Sampler VKSamplerCache::GetSampler(const Tegra::Texture::TSCEntry& tsc) {
+ const auto [entry, is_cache_miss] = cache.try_emplace(SamplerCacheKey{tsc});
+ auto& sampler = entry->second;
+ if (is_cache_miss) {
+ sampler = CreateSampler(tsc);
+ }
+ return *sampler;
+}
+
+UniqueSampler VKSamplerCache::CreateSampler(const Tegra::Texture::TSCEntry& tsc) {
+ const float max_anisotropy = tsc.GetMaxAnisotropy();
+ const bool has_anisotropy = max_anisotropy > 1.0f;
+
+ const auto border_color = tsc.GetBorderColor();
+ const auto vk_border_color = TryConvertBorderColor(border_color);
+ UNIMPLEMENTED_IF_MSG(!vk_border_color, "Unimplemented border color {} {} {} {}",
+ border_color[0], border_color[1], border_color[2], border_color[3]);
+
+ constexpr bool unnormalized_coords = false;
+
+ const vk::SamplerCreateInfo sampler_ci(
+ {}, MaxwellToVK::Sampler::Filter(tsc.mag_filter),
+ MaxwellToVK::Sampler::Filter(tsc.min_filter),
+ MaxwellToVK::Sampler::MipmapMode(tsc.mipmap_filter),
+ MaxwellToVK::Sampler::WrapMode(tsc.wrap_u), MaxwellToVK::Sampler::WrapMode(tsc.wrap_v),
+ MaxwellToVK::Sampler::WrapMode(tsc.wrap_p), tsc.GetLodBias(), has_anisotropy,
+ max_anisotropy, tsc.depth_compare_enabled,
+ MaxwellToVK::Sampler::DepthCompareFunction(tsc.depth_compare_func), tsc.GetMinLod(),
+ tsc.GetMaxLod(), vk_border_color.value_or(vk::BorderColor::eFloatTransparentBlack),
+ unnormalized_coords);
+
+ const auto& dld = device.GetDispatchLoader();
+ const auto dev = device.GetLogical();
+ return dev.createSamplerUnique(sampler_ci, nullptr, dld);
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.h b/src/video_core/renderer_vulkan/vk_sampler_cache.h
new file mode 100644
index 000000000..c6394dc87
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_sampler_cache.h
@@ -0,0 +1,56 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <unordered_map>
+
+#include "common/common_types.h"
+#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/textures/texture.h"
+
+namespace Vulkan {
+
+class VKDevice;
+
+struct SamplerCacheKey final : public Tegra::Texture::TSCEntry {
+ std::size_t Hash() const;
+
+ bool operator==(const SamplerCacheKey& rhs) const;
+
+ bool operator!=(const SamplerCacheKey& rhs) const {
+ return !operator==(rhs);
+ }
+};
+
+} // namespace Vulkan
+
+namespace std {
+
+template <>
+struct hash<Vulkan::SamplerCacheKey> {
+ std::size_t operator()(const Vulkan::SamplerCacheKey& k) const noexcept {
+ return k.Hash();
+ }
+};
+
+} // namespace std
+
+namespace Vulkan {
+
+class VKSamplerCache {
+public:
+ explicit VKSamplerCache(const VKDevice& device);
+ ~VKSamplerCache();
+
+ vk::Sampler GetSampler(const Tegra::Texture::TSCEntry& tsc);
+
+private:
+ UniqueSampler CreateSampler(const Tegra::Texture::TSCEntry& tsc);
+
+ const VKDevice& device;
+ std::unordered_map<SamplerCacheKey, UniqueSampler> cache;
+};
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
new file mode 100644
index 000000000..f1fea1871
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -0,0 +1,60 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/assert.h"
+#include "common/logging/log.h"
+#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/renderer_vulkan/vk_resource_manager.h"
+#include "video_core/renderer_vulkan/vk_scheduler.h"
+
+namespace Vulkan {
+
+VKScheduler::VKScheduler(const VKDevice& device, VKResourceManager& resource_manager)
+ : device{device}, resource_manager{resource_manager} {
+ next_fence = &resource_manager.CommitFence();
+ AllocateNewContext();
+}
+
+VKScheduler::~VKScheduler() = default;
+
+VKExecutionContext VKScheduler::GetExecutionContext() const {
+ return VKExecutionContext(current_fence, current_cmdbuf);
+}
+
+VKExecutionContext VKScheduler::Flush(vk::Semaphore semaphore) {
+ SubmitExecution(semaphore);
+ current_fence->Release();
+ AllocateNewContext();
+ return GetExecutionContext();
+}
+
+VKExecutionContext VKScheduler::Finish(vk::Semaphore semaphore) {
+ SubmitExecution(semaphore);
+ current_fence->Wait();
+ current_fence->Release();
+ AllocateNewContext();
+ return GetExecutionContext();
+}
+
+void VKScheduler::SubmitExecution(vk::Semaphore semaphore) {
+ const auto& dld = device.GetDispatchLoader();
+ current_cmdbuf.end(dld);
+
+ const auto queue = device.GetGraphicsQueue();
+ const vk::SubmitInfo submit_info(0, nullptr, nullptr, 1, &current_cmdbuf, semaphore ? 1u : 0u,
+ &semaphore);
+ queue.submit({submit_info}, *current_fence, dld);
+}
+
+void VKScheduler::AllocateNewContext() {
+ current_fence = next_fence;
+ current_cmdbuf = resource_manager.CommitCommandBuffer(*current_fence);
+ next_fence = &resource_manager.CommitFence();
+
+ const auto& dld = device.GetDispatchLoader();
+ current_cmdbuf.begin({vk::CommandBufferUsageFlagBits::eOneTimeSubmit}, dld);
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h
new file mode 100644
index 000000000..cfaf5376f
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_scheduler.h
@@ -0,0 +1,69 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+#include "video_core/renderer_vulkan/declarations.h"
+
+namespace Vulkan {
+
+class VKDevice;
+class VKExecutionContext;
+class VKFence;
+class VKResourceManager;
+
+/// The scheduler abstracts command buffer and fence management with an interface that's able to do
+/// OpenGL-like operations on Vulkan command buffers.
+class VKScheduler {
+public:
+ explicit VKScheduler(const VKDevice& device, VKResourceManager& resource_manager);
+ ~VKScheduler();
+
+ /// Gets the current execution context.
+ [[nodiscard]] VKExecutionContext GetExecutionContext() const;
+
+ /// Sends the current execution context to the GPU. It invalidates the current execution context
+ /// and returns a new one.
+ VKExecutionContext Flush(vk::Semaphore semaphore = nullptr);
+
+ /// Sends the current execution context to the GPU and waits for it to complete. It invalidates
+ /// the current execution context and returns a new one.
+ VKExecutionContext Finish(vk::Semaphore semaphore = nullptr);
+
+private:
+ void SubmitExecution(vk::Semaphore semaphore);
+
+ void AllocateNewContext();
+
+ const VKDevice& device;
+ VKResourceManager& resource_manager;
+ vk::CommandBuffer current_cmdbuf;
+ VKFence* current_fence = nullptr;
+ VKFence* next_fence = nullptr;
+};
+
+class VKExecutionContext {
+ friend class VKScheduler;
+
+public:
+ VKExecutionContext() = default;
+
+ VKFence& GetFence() const {
+ return *fence;
+ }
+
+ vk::CommandBuffer GetCommandBuffer() const {
+ return cmdbuf;
+ }
+
+private:
+ explicit VKExecutionContext(VKFence* fence, vk::CommandBuffer cmdbuf)
+ : fence{fence}, cmdbuf{cmdbuf} {}
+
+ VKFence* fence{};
+ vk::CommandBuffer cmdbuf;
+};
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
new file mode 100644
index 000000000..58ffa42f2
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
@@ -0,0 +1,90 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include <memory>
+#include <optional>
+#include <vector>
+
+#include "common/assert.h"
+#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/vk_device.h"
+#include "video_core/renderer_vulkan/vk_memory_manager.h"
+#include "video_core/renderer_vulkan/vk_resource_manager.h"
+#include "video_core/renderer_vulkan/vk_scheduler.h"
+#include "video_core/renderer_vulkan/vk_stream_buffer.h"
+
+namespace Vulkan {
+
+constexpr u64 WATCHES_INITIAL_RESERVE = 0x4000;
+constexpr u64 WATCHES_RESERVE_CHUNK = 0x1000;
+
+VKStreamBuffer::VKStreamBuffer(const VKDevice& device, VKMemoryManager& memory_manager,
+ VKScheduler& scheduler, u64 size, vk::BufferUsageFlags usage,
+ vk::AccessFlags access, vk::PipelineStageFlags pipeline_stage)
+ : device{device}, scheduler{scheduler}, buffer_size{size}, access{access}, pipeline_stage{
+ pipeline_stage} {
+ CreateBuffers(memory_manager, usage);
+ ReserveWatches(WATCHES_INITIAL_RESERVE);
+}
+
+VKStreamBuffer::~VKStreamBuffer() = default;
+
+std::tuple<u8*, u64, bool> VKStreamBuffer::Reserve(u64 size) {
+ ASSERT(size <= buffer_size);
+ mapped_size = size;
+
+ if (offset + size > buffer_size) {
+ // The buffer would overflow, save the amount of used buffers, signal an invalidation and
+ // reset the state.
+ invalidation_mark = used_watches;
+ used_watches = 0;
+ offset = 0;
+ }
+
+ return {mapped_pointer + offset, offset, invalidation_mark.has_value()};
+}
+
+VKExecutionContext VKStreamBuffer::Send(VKExecutionContext exctx, u64 size) {
+ ASSERT_MSG(size <= mapped_size, "Reserved size is too small");
+
+ if (invalidation_mark) {
+ // TODO(Rodrigo): Find a better way to invalidate than waiting for all watches to finish.
+ exctx = scheduler.Flush();
+ std::for_each(watches.begin(), watches.begin() + *invalidation_mark,
+ [&](auto& resource) { resource->Wait(); });
+ invalidation_mark = std::nullopt;
+ }
+
+ if (used_watches + 1 >= watches.size()) {
+ // Ensure that there are enough watches.
+ ReserveWatches(WATCHES_RESERVE_CHUNK);
+ }
+ // Add a watch for this allocation.
+ watches[used_watches++]->Watch(exctx.GetFence());
+
+ offset += size;
+
+ return exctx;
+}
+
+void VKStreamBuffer::CreateBuffers(VKMemoryManager& memory_manager, vk::BufferUsageFlags usage) {
+ const vk::BufferCreateInfo buffer_ci({}, buffer_size, usage, vk::SharingMode::eExclusive, 0,
+ nullptr);
+
+ const auto dev = device.GetLogical();
+ const auto& dld = device.GetDispatchLoader();
+ buffer = dev.createBufferUnique(buffer_ci, nullptr, dld);
+ commit = memory_manager.Commit(*buffer, true);
+ mapped_pointer = commit->GetData();
+}
+
+void VKStreamBuffer::ReserveWatches(std::size_t grow_size) {
+ const std::size_t previous_size = watches.size();
+ watches.resize(previous_size + grow_size);
+ std::generate(watches.begin() + previous_size, watches.end(),
+ []() { return std::make_unique<VKFenceWatch>(); });
+}
+
+} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.h b/src/video_core/renderer_vulkan/vk_stream_buffer.h
new file mode 100644
index 000000000..69d036ccd
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_stream_buffer.h
@@ -0,0 +1,72 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <memory>
+#include <optional>
+#include <tuple>
+#include <vector>
+
+#include "common/common_types.h"
+#include "video_core/renderer_vulkan/declarations.h"
+#include "video_core/renderer_vulkan/vk_memory_manager.h"
+
+namespace Vulkan {
+
+class VKDevice;
+class VKFence;
+class VKFenceWatch;
+class VKResourceManager;
+class VKScheduler;
+
+class VKStreamBuffer {
+public:
+ explicit VKStreamBuffer(const VKDevice& device, VKMemoryManager& memory_manager,
+ VKScheduler& scheduler, u64 size, vk::BufferUsageFlags usage,
+ vk::AccessFlags access, vk::PipelineStageFlags pipeline_stage);
+ ~VKStreamBuffer();
+
+ /**
+ * Reserves a region of memory from the stream buffer.
+ * @param size Size to reserve.
+ * @returns A tuple in the following order: Raw memory pointer (with offset added), buffer
+ * offset and a boolean that's true when buffer has been invalidated.
+ */
+ std::tuple<u8*, u64, bool> Reserve(u64 size);
+
+ /// Ensures that "size" bytes of memory are available to the GPU, potentially recording a copy.
+ [[nodiscard]] VKExecutionContext Send(VKExecutionContext exctx, u64 size);
+
+ vk::Buffer GetBuffer() const {
+ return *buffer;
+ }
+
+private:
+ /// Creates Vulkan buffer handles committing the required the required memory.
+ void CreateBuffers(VKMemoryManager& memory_manager, vk::BufferUsageFlags usage);
+
+ /// Increases the amount of watches available.
+ void ReserveWatches(std::size_t grow_size);
+
+ const VKDevice& device; ///< Vulkan device manager.
+ VKScheduler& scheduler; ///< Command scheduler.
+ const u64 buffer_size; ///< Total size of the stream buffer.
+ const vk::AccessFlags access; ///< Access usage of this stream buffer.
+ const vk::PipelineStageFlags pipeline_stage; ///< Pipeline usage of this stream buffer.
+
+ UniqueBuffer buffer; ///< Mapped buffer.
+ VKMemoryCommit commit; ///< Memory commit.
+ u8* mapped_pointer{}; ///< Pointer to the host visible commit
+
+ u64 offset{}; ///< Buffer iterator.
+ u64 mapped_size{}; ///< Size reserved for the current copy.
+
+ std::vector<std::unique_ptr<VKFenceWatch>> watches; ///< Total watches
+ std::size_t used_watches{}; ///< Count of watches, reset on invalidation.
+ std::optional<std::size_t>
+ invalidation_mark{}; ///< Number of watches used in the current invalidation.
+};
+
+} // namespace Vulkan
diff --git a/src/video_core/shader/decode.cpp b/src/video_core/shader/decode.cpp
index 740ac3118..e4c438792 100644
--- a/src/video_core/shader/decode.cpp
+++ b/src/video_core/shader/decode.cpp
@@ -165,6 +165,7 @@ u32 ShaderIR::DecodeInstr(NodeBlock& bb, u32 pc) {
{OpCode::Type::Hfma2, &ShaderIR::DecodeHfma2},
{OpCode::Type::Conversion, &ShaderIR::DecodeConversion},
{OpCode::Type::Memory, &ShaderIR::DecodeMemory},
+ {OpCode::Type::Texture, &ShaderIR::DecodeTexture},
{OpCode::Type::FloatSetPredicate, &ShaderIR::DecodeFloatSetPredicate},
{OpCode::Type::IntegerSetPredicate, &ShaderIR::DecodeIntegerSetPredicate},
{OpCode::Type::HalfSetPredicate, &ShaderIR::DecodeHalfSetPredicate},
diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp
index 55ec601ff..ea3c71eed 100644
--- a/src/video_core/shader/decode/memory.cpp
+++ b/src/video_core/shader/decode/memory.cpp
@@ -17,24 +17,6 @@ using Tegra::Shader::Attribute;
using Tegra::Shader::Instruction;
using Tegra::Shader::OpCode;
using Tegra::Shader::Register;
-using Tegra::Shader::TextureMiscMode;
-using Tegra::Shader::TextureProcessMode;
-using Tegra::Shader::TextureType;
-
-static std::size_t GetCoordCount(TextureType texture_type) {
- switch (texture_type) {
- case TextureType::Texture1D:
- return 1;
- case TextureType::Texture2D:
- return 2;
- case TextureType::Texture3D:
- case TextureType::TextureCube:
- return 3;
- default:
- UNIMPLEMENTED_MSG("Unhandled texture type: {}", static_cast<u32>(texture_type));
- return 0;
- }
-}
u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
@@ -48,7 +30,7 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
UNIMPLEMENTED_IF_MSG((instr.attribute.fmt20.immediate.Value() % sizeof(u32)) != 0,
"Unaligned attribute loads are not supported");
- Tegra::Shader::IpaMode input_mode{Tegra::Shader::IpaInterpMode::Perspective,
+ Tegra::Shader::IpaMode input_mode{Tegra::Shader::IpaInterpMode::Pass,
Tegra::Shader::IpaSampleMode::Default};
u64 next_element = instr.attribute.fmt20.element;
@@ -247,194 +229,6 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
}
break;
}
- case OpCode::Id::TEX: {
- UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(TextureMiscMode::AOFFI),
- "AOFFI is not implemented");
-
- if (instr.tex.UsesMiscMode(TextureMiscMode::NODEP)) {
- LOG_WARNING(HW_GPU, "TEX.NODEP implementation is incomplete");
- }
-
- const TextureType texture_type{instr.tex.texture_type};
- const bool is_array = instr.tex.array != 0;
- const bool depth_compare = instr.tex.UsesMiscMode(TextureMiscMode::DC);
- const auto process_mode = instr.tex.GetTextureProcessMode();
- WriteTexInstructionFloat(
- bb, instr, GetTexCode(instr, texture_type, process_mode, depth_compare, is_array));
- break;
- }
- case OpCode::Id::TEXS: {
- const TextureType texture_type{instr.texs.GetTextureType()};
- const bool is_array{instr.texs.IsArrayTexture()};
- const bool depth_compare = instr.texs.UsesMiscMode(TextureMiscMode::DC);
- const auto process_mode = instr.texs.GetTextureProcessMode();
-
- if (instr.texs.UsesMiscMode(TextureMiscMode::NODEP)) {
- LOG_WARNING(HW_GPU, "TEXS.NODEP implementation is incomplete");
- }
-
- const Node4 components =
- GetTexsCode(instr, texture_type, process_mode, depth_compare, is_array);
-
- if (instr.texs.fp32_flag) {
- WriteTexsInstructionFloat(bb, instr, components);
- } else {
- WriteTexsInstructionHalfFloat(bb, instr, components);
- }
- break;
- }
- case OpCode::Id::TLD4: {
- ASSERT(instr.tld4.array == 0);
- UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::AOFFI),
- "AOFFI is not implemented");
- UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::NDV),
- "NDV is not implemented");
- UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::PTP),
- "PTP is not implemented");
-
- if (instr.tld4.UsesMiscMode(TextureMiscMode::NODEP)) {
- LOG_WARNING(HW_GPU, "TLD4.NODEP implementation is incomplete");
- }
-
- const auto texture_type = instr.tld4.texture_type.Value();
- const bool depth_compare = instr.tld4.UsesMiscMode(TextureMiscMode::DC);
- const bool is_array = instr.tld4.array != 0;
- WriteTexInstructionFloat(bb, instr,
- GetTld4Code(instr, texture_type, depth_compare, is_array));
- break;
- }
- case OpCode::Id::TLD4S: {
- UNIMPLEMENTED_IF_MSG(instr.tld4s.UsesMiscMode(TextureMiscMode::AOFFI),
- "AOFFI is not implemented");
- if (instr.tld4s.UsesMiscMode(TextureMiscMode::NODEP)) {
- LOG_WARNING(HW_GPU, "TLD4S.NODEP implementation is incomplete");
- }
-
- const bool depth_compare = instr.tld4s.UsesMiscMode(TextureMiscMode::DC);
- const Node op_a = GetRegister(instr.gpr8);
- const Node op_b = GetRegister(instr.gpr20);
-
- // TODO(Subv): Figure out how the sampler type is encoded in the TLD4S instruction.
- std::vector<Node> coords;
- if (depth_compare) {
- // Note: TLD4S coordinate encoding works just like TEXS's
- const Node op_y = GetRegister(instr.gpr8.Value() + 1);
- coords.push_back(op_a);
- coords.push_back(op_y);
- coords.push_back(op_b);
- } else {
- coords.push_back(op_a);
- coords.push_back(op_b);
- }
- std::vector<Node> extras;
- extras.push_back(Immediate(static_cast<u32>(instr.tld4s.component)));
-
- const auto& sampler =
- GetSampler(instr.sampler, TextureType::Texture2D, false, depth_compare);
-
- Node4 values;
- for (u32 element = 0; element < values.size(); ++element) {
- auto coords_copy = coords;
- MetaTexture meta{sampler, {}, {}, extras, element};
- values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy));
- }
-
- WriteTexsInstructionFloat(bb, instr, values);
- break;
- }
- case OpCode::Id::TXQ: {
- if (instr.txq.UsesMiscMode(TextureMiscMode::NODEP)) {
- LOG_WARNING(HW_GPU, "TXQ.NODEP implementation is incomplete");
- }
-
- // TODO: The new commits on the texture refactor, change the way samplers work.
- // Sadly, not all texture instructions specify the type of texture their sampler
- // uses. This must be fixed at a later instance.
- const auto& sampler =
- GetSampler(instr.sampler, Tegra::Shader::TextureType::Texture2D, false, false);
-
- u32 indexer = 0;
- switch (instr.txq.query_type) {
- case Tegra::Shader::TextureQueryType::Dimension: {
- for (u32 element = 0; element < 4; ++element) {
- if (!instr.txq.IsComponentEnabled(element)) {
- continue;
- }
- MetaTexture meta{sampler, {}, {}, {}, element};
- const Node value =
- Operation(OperationCode::TextureQueryDimensions, meta, GetRegister(instr.gpr8));
- SetTemporal(bb, indexer++, value);
- }
- for (u32 i = 0; i < indexer; ++i) {
- SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i));
- }
- break;
- }
- default:
- UNIMPLEMENTED_MSG("Unhandled texture query type: {}",
- static_cast<u32>(instr.txq.query_type.Value()));
- }
- break;
- }
- case OpCode::Id::TMML: {
- UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV),
- "NDV is not implemented");
-
- if (instr.tmml.UsesMiscMode(TextureMiscMode::NODEP)) {
- LOG_WARNING(HW_GPU, "TMML.NODEP implementation is incomplete");
- }
-
- auto texture_type = instr.tmml.texture_type.Value();
- const bool is_array = instr.tmml.array != 0;
- const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false);
-
- std::vector<Node> coords;
-
- // TODO: Add coordinates for different samplers once other texture types are implemented.
- switch (texture_type) {
- case TextureType::Texture1D:
- coords.push_back(GetRegister(instr.gpr8));
- break;
- case TextureType::Texture2D:
- coords.push_back(GetRegister(instr.gpr8.Value() + 0));
- coords.push_back(GetRegister(instr.gpr8.Value() + 1));
- break;
- default:
- UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<u32>(texture_type));
-
- // Fallback to interpreting as a 2D texture for now
- coords.push_back(GetRegister(instr.gpr8.Value() + 0));
- coords.push_back(GetRegister(instr.gpr8.Value() + 1));
- texture_type = TextureType::Texture2D;
- }
-
- for (u32 element = 0; element < 2; ++element) {
- auto params = coords;
- MetaTexture meta{sampler, {}, {}, {}, element};
- const Node value = Operation(OperationCode::TextureQueryLod, meta, std::move(params));
- SetTemporal(bb, element, value);
- }
- for (u32 element = 0; element < 2; ++element) {
- SetRegister(bb, instr.gpr0.Value() + element, GetTemporal(element));
- }
-
- break;
- }
- case OpCode::Id::TLDS: {
- const Tegra::Shader::TextureType texture_type{instr.tlds.GetTextureType()};
- const bool is_array{instr.tlds.IsArrayTexture()};
-
- UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::AOFFI),
- "AOFFI is not implemented");
- UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::MZ), "MZ is not implemented");
-
- if (instr.tlds.UsesMiscMode(TextureMiscMode::NODEP)) {
- LOG_WARNING(HW_GPU, "TLDS.NODEP implementation is incomplete");
- }
-
- WriteTexsInstructionFloat(bb, instr, GetTldsCode(instr, texture_type, is_array));
- break;
- }
default:
UNIMPLEMENTED_MSG("Unhandled memory instruction: {}", opcode->get().GetName());
}
@@ -442,291 +236,4 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) {
return pc;
}
-const Sampler& ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, TextureType type,
- bool is_array, bool is_shadow) {
- const auto offset = static_cast<std::size_t>(sampler.index.Value());
-
- // If this sampler has already been used, return the existing mapping.
- const auto itr =
- std::find_if(used_samplers.begin(), used_samplers.end(),
- [&](const Sampler& entry) { return entry.GetOffset() == offset; });
- if (itr != used_samplers.end()) {
- ASSERT(itr->GetType() == type && itr->IsArray() == is_array &&
- itr->IsShadow() == is_shadow);
- return *itr;
- }
-
- // Otherwise create a new mapping for this sampler
- const std::size_t next_index = used_samplers.size();
- const Sampler entry{offset, next_index, type, is_array, is_shadow};
- return *used_samplers.emplace(entry).first;
-}
-
-void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) {
- u32 dest_elem = 0;
- for (u32 elem = 0; elem < 4; ++elem) {
- if (!instr.tex.IsComponentEnabled(elem)) {
- // Skip disabled components
- continue;
- }
- SetTemporal(bb, dest_elem++, components[elem]);
- }
- // After writing values in temporals, move them to the real registers
- for (u32 i = 0; i < dest_elem; ++i) {
- SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i));
- }
-}
-
-void ShaderIR::WriteTexsInstructionFloat(NodeBlock& bb, Instruction instr,
- const Node4& components) {
- // TEXS has two destination registers and a swizzle. The first two elements in the swizzle
- // go into gpr0+0 and gpr0+1, and the rest goes into gpr28+0 and gpr28+1
-
- u32 dest_elem = 0;
- for (u32 component = 0; component < 4; ++component) {
- if (!instr.texs.IsComponentEnabled(component))
- continue;
- SetTemporal(bb, dest_elem++, components[component]);
- }
-
- for (u32 i = 0; i < dest_elem; ++i) {
- if (i < 2) {
- // Write the first two swizzle components to gpr0 and gpr0+1
- SetRegister(bb, instr.gpr0.Value() + i % 2, GetTemporal(i));
- } else {
- ASSERT(instr.texs.HasTwoDestinations());
- // Write the rest of the swizzle components to gpr28 and gpr28+1
- SetRegister(bb, instr.gpr28.Value() + i % 2, GetTemporal(i));
- }
- }
-}
-
-void ShaderIR::WriteTexsInstructionHalfFloat(NodeBlock& bb, Instruction instr,
- const Node4& components) {
- // TEXS.F16 destionation registers are packed in two registers in pairs (just like any half
- // float instruction).
-
- Node4 values;
- u32 dest_elem = 0;
- for (u32 component = 0; component < 4; ++component) {
- if (!instr.texs.IsComponentEnabled(component))
- continue;
- values[dest_elem++] = components[component];
- }
- if (dest_elem == 0)
- return;
-
- std::generate(values.begin() + dest_elem, values.end(), [&]() { return Immediate(0); });
-
- const Node first_value = Operation(OperationCode::HPack2, values[0], values[1]);
- if (dest_elem <= 2) {
- SetRegister(bb, instr.gpr0, first_value);
- return;
- }
-
- SetTemporal(bb, 0, first_value);
- SetTemporal(bb, 1, Operation(OperationCode::HPack2, values[2], values[3]));
-
- SetRegister(bb, instr.gpr0, GetTemporal(0));
- SetRegister(bb, instr.gpr28, GetTemporal(1));
-}
-
-Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type,
- TextureProcessMode process_mode, std::vector<Node> coords,
- Node array, Node depth_compare, u32 bias_offset) {
- const bool is_array = array;
- const bool is_shadow = depth_compare;
-
- UNIMPLEMENTED_IF_MSG((texture_type == TextureType::Texture3D && (is_array || is_shadow)) ||
- (texture_type == TextureType::TextureCube && is_array && is_shadow),
- "This method is not supported.");
-
- const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, is_shadow);
-
- const bool lod_needed = process_mode == TextureProcessMode::LZ ||
- process_mode == TextureProcessMode::LL ||
- process_mode == TextureProcessMode::LLA;
-
- // LOD selection (either via bias or explicit textureLod) not supported in GL for
- // sampler2DArrayShadow and samplerCubeArrayShadow.
- const bool gl_lod_supported =
- !((texture_type == Tegra::Shader::TextureType::Texture2D && is_array && is_shadow) ||
- (texture_type == Tegra::Shader::TextureType::TextureCube && is_array && is_shadow));
-
- const OperationCode read_method =
- lod_needed && gl_lod_supported ? OperationCode::TextureLod : OperationCode::Texture;
-
- UNIMPLEMENTED_IF(process_mode != TextureProcessMode::None && !gl_lod_supported);
-
- std::vector<Node> extras;
- if (process_mode != TextureProcessMode::None && gl_lod_supported) {
- if (process_mode == TextureProcessMode::LZ) {
- extras.push_back(Immediate(0.0f));
- } else {
- // If present, lod or bias are always stored in the register indexed by the gpr20
- // field with an offset depending on the usage of the other registers
- extras.push_back(GetRegister(instr.gpr20.Value() + bias_offset));
- }
- }
-
- Node4 values;
- for (u32 element = 0; element < values.size(); ++element) {
- auto copy_coords = coords;
- MetaTexture meta{sampler, array, depth_compare, extras, element};
- values[element] = Operation(read_method, meta, std::move(copy_coords));
- }
-
- return values;
-}
-
-Node4 ShaderIR::GetTexCode(Instruction instr, TextureType texture_type,
- TextureProcessMode process_mode, bool depth_compare, bool is_array) {
- const bool lod_bias_enabled =
- (process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ);
-
- const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement(
- texture_type, depth_compare, is_array, lod_bias_enabled, 4, 5);
- // If enabled arrays index is always stored in the gpr8 field
- const u64 array_register = instr.gpr8.Value();
- // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used
- const u64 coord_register = array_register + (is_array ? 1 : 0);
-
- std::vector<Node> coords;
- for (std::size_t i = 0; i < coord_count; ++i) {
- coords.push_back(GetRegister(coord_register + i));
- }
- // 1D.DC in OpenGL the 2nd component is ignored.
- if (depth_compare && !is_array && texture_type == TextureType::Texture1D) {
- coords.push_back(Immediate(0.0f));
- }
-
- const Node array = is_array ? GetRegister(array_register) : nullptr;
-
- Node dc{};
- if (depth_compare) {
- // Depth is always stored in the register signaled by gpr20 or in the next register if lod
- // or bias are used
- const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0);
- dc = GetRegister(depth_register);
- }
-
- return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, 0);
-}
-
-Node4 ShaderIR::GetTexsCode(Instruction instr, TextureType texture_type,
- TextureProcessMode process_mode, bool depth_compare, bool is_array) {
- const bool lod_bias_enabled =
- (process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ);
-
- const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement(
- texture_type, depth_compare, is_array, lod_bias_enabled, 4, 4);
- // If enabled arrays index is always stored in the gpr8 field
- const u64 array_register = instr.gpr8.Value();
- // First coordinate index is stored in gpr8 field or (gpr8 + 1) when arrays are used
- const u64 coord_register = array_register + (is_array ? 1 : 0);
- const u64 last_coord_register =
- (is_array || !(lod_bias_enabled || depth_compare) || (coord_count > 2))
- ? static_cast<u64>(instr.gpr20.Value())
- : coord_register + 1;
- const u32 bias_offset = coord_count > 2 ? 1 : 0;
-
- std::vector<Node> coords;
- for (std::size_t i = 0; i < coord_count; ++i) {
- const bool last = (i == (coord_count - 1)) && (coord_count > 1);
- coords.push_back(GetRegister(last ? last_coord_register : coord_register + i));
- }
-
- const Node array = is_array ? GetRegister(array_register) : nullptr;
-
- Node dc{};
- if (depth_compare) {
- // Depth is always stored in the register signaled by gpr20 or in the next register if lod
- // or bias are used
- const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0);
- dc = GetRegister(depth_register);
- }
-
- return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, bias_offset);
-}
-
-Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool depth_compare,
- bool is_array) {
- const std::size_t coord_count = GetCoordCount(texture_type);
- const std::size_t total_coord_count = coord_count + (is_array ? 1 : 0);
- const std::size_t total_reg_count = total_coord_count + (depth_compare ? 1 : 0);
-
- // If enabled arrays index is always stored in the gpr8 field
- const u64 array_register = instr.gpr8.Value();
- // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used
- const u64 coord_register = array_register + (is_array ? 1 : 0);
-
- std::vector<Node> coords;
- for (size_t i = 0; i < coord_count; ++i)
- coords.push_back(GetRegister(coord_register + i));
-
- const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, depth_compare);
-
- Node4 values;
- for (u32 element = 0; element < values.size(); ++element) {
- auto coords_copy = coords;
- MetaTexture meta{sampler, GetRegister(array_register), {}, {}, element};
- values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy));
- }
-
- return values;
-}
-
-Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) {
- const std::size_t type_coord_count = GetCoordCount(texture_type);
- const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL;
-
- // If enabled arrays index is always stored in the gpr8 field
- const u64 array_register = instr.gpr8.Value();
- // if is array gpr20 is used
- const u64 coord_register = is_array ? instr.gpr20.Value() : instr.gpr8.Value();
-
- const u64 last_coord_register =
- ((type_coord_count > 2) || (type_coord_count == 2 && !lod_enabled)) && !is_array
- ? static_cast<u64>(instr.gpr20.Value())
- : coord_register + 1;
-
- std::vector<Node> coords;
- for (std::size_t i = 0; i < type_coord_count; ++i) {
- const bool last = (i == (type_coord_count - 1)) && (type_coord_count > 1);
- coords.push_back(GetRegister(last ? last_coord_register : coord_register + i));
- }
-
- const Node array = is_array ? GetRegister(array_register) : nullptr;
- // When lod is used always is in gpr20
- const Node lod = lod_enabled ? GetRegister(instr.gpr20) : Immediate(0);
-
- const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false);
-
- Node4 values;
- for (u32 element = 0; element < values.size(); ++element) {
- auto coords_copy = coords;
- MetaTexture meta{sampler, array, {}, {lod}, element};
- values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy));
- }
- return values;
-}
-
-std::tuple<std::size_t, std::size_t> ShaderIR::ValidateAndGetCoordinateElement(
- TextureType texture_type, bool depth_compare, bool is_array, bool lod_bias_enabled,
- std::size_t max_coords, std::size_t max_inputs) {
- const std::size_t coord_count = GetCoordCount(texture_type);
-
- std::size_t total_coord_count = coord_count + (is_array ? 1 : 0) + (depth_compare ? 1 : 0);
- const std::size_t total_reg_count = total_coord_count + (lod_bias_enabled ? 1 : 0);
- if (total_coord_count > max_coords || total_reg_count > max_inputs) {
- UNIMPLEMENTED_MSG("Unsupported Texture operation");
- total_coord_count = std::min(total_coord_count, max_coords);
- }
- // 1D.DC OpenGL is using a vec3 but 2nd component is ignored later.
- total_coord_count +=
- (depth_compare && !is_array && texture_type == TextureType::Texture1D) ? 1 : 0;
-
- return {coord_count, total_coord_count};
-}
-
} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/decode/other.cpp b/src/video_core/shader/decode/other.cpp
index f9502e3d0..d750a2936 100644
--- a/src/video_core/shader/decode/other.cpp
+++ b/src/video_core/shader/decode/other.cpp
@@ -135,7 +135,18 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
instr.ipa.sample_mode.Value()};
const Node attr = GetInputAttribute(attribute.index, attribute.element, input_mode);
- const Node value = GetSaturatedFloat(attr, instr.ipa.saturate);
+ Node value = attr;
+ const Tegra::Shader::Attribute::Index index = attribute.index.Value();
+ if (index >= Tegra::Shader::Attribute::Index::Attribute_0 &&
+ index <= Tegra::Shader::Attribute::Index::Attribute_31) {
+ // TODO(Blinkhawk): There are cases where a perspective attribute use PASS.
+ // In theory by setting them as perspective, OpenGL does the perspective correction.
+ // A way must figured to reverse the last step of it.
+ if (input_mode.interpolation_mode == Tegra::Shader::IpaInterpMode::Multiply) {
+ value = Operation(OperationCode::FMul, PRECISE, value, GetRegister(instr.gpr20));
+ }
+ }
+ value = GetSaturatedFloat(value, instr.ipa.saturate);
SetRegister(bb, instr.gpr0, value);
break;
@@ -175,4 +186,4 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
return pc;
}
-} // namespace VideoCommon::Shader \ No newline at end of file
+} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp
new file mode 100644
index 000000000..a99ae19bf
--- /dev/null
+++ b/src/video_core/shader/decode/texture.cpp
@@ -0,0 +1,534 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include <vector>
+#include <fmt/format.h>
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "video_core/engines/shader_bytecode.h"
+#include "video_core/shader/shader_ir.h"
+
+namespace VideoCommon::Shader {
+
+using Tegra::Shader::Instruction;
+using Tegra::Shader::OpCode;
+using Tegra::Shader::Register;
+using Tegra::Shader::TextureMiscMode;
+using Tegra::Shader::TextureProcessMode;
+using Tegra::Shader::TextureType;
+
+static std::size_t GetCoordCount(TextureType texture_type) {
+ switch (texture_type) {
+ case TextureType::Texture1D:
+ return 1;
+ case TextureType::Texture2D:
+ return 2;
+ case TextureType::Texture3D:
+ case TextureType::TextureCube:
+ return 3;
+ default:
+ UNIMPLEMENTED_MSG("Unhandled texture type: {}", static_cast<u32>(texture_type));
+ return 0;
+ }
+}
+
+u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
+ const Instruction instr = {program_code[pc]};
+ const auto opcode = OpCode::Decode(instr);
+
+ switch (opcode->get().GetId()) {
+ case OpCode::Id::TEX: {
+ UNIMPLEMENTED_IF_MSG(instr.tex.UsesMiscMode(TextureMiscMode::AOFFI),
+ "AOFFI is not implemented");
+
+ if (instr.tex.UsesMiscMode(TextureMiscMode::NODEP)) {
+ LOG_WARNING(HW_GPU, "TEX.NODEP implementation is incomplete");
+ }
+
+ const TextureType texture_type{instr.tex.texture_type};
+ const bool is_array = instr.tex.array != 0;
+ const bool depth_compare = instr.tex.UsesMiscMode(TextureMiscMode::DC);
+ const auto process_mode = instr.tex.GetTextureProcessMode();
+ WriteTexInstructionFloat(
+ bb, instr, GetTexCode(instr, texture_type, process_mode, depth_compare, is_array));
+ break;
+ }
+ case OpCode::Id::TEXS: {
+ const TextureType texture_type{instr.texs.GetTextureType()};
+ const bool is_array{instr.texs.IsArrayTexture()};
+ const bool depth_compare = instr.texs.UsesMiscMode(TextureMiscMode::DC);
+ const auto process_mode = instr.texs.GetTextureProcessMode();
+
+ if (instr.texs.UsesMiscMode(TextureMiscMode::NODEP)) {
+ LOG_WARNING(HW_GPU, "TEXS.NODEP implementation is incomplete");
+ }
+
+ const Node4 components =
+ GetTexsCode(instr, texture_type, process_mode, depth_compare, is_array);
+
+ if (instr.texs.fp32_flag) {
+ WriteTexsInstructionFloat(bb, instr, components);
+ } else {
+ WriteTexsInstructionHalfFloat(bb, instr, components);
+ }
+ break;
+ }
+ case OpCode::Id::TLD4: {
+ ASSERT(instr.tld4.array == 0);
+ UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::AOFFI),
+ "AOFFI is not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::NDV),
+ "NDV is not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.tld4.UsesMiscMode(TextureMiscMode::PTP),
+ "PTP is not implemented");
+
+ if (instr.tld4.UsesMiscMode(TextureMiscMode::NODEP)) {
+ LOG_WARNING(HW_GPU, "TLD4.NODEP implementation is incomplete");
+ }
+
+ const auto texture_type = instr.tld4.texture_type.Value();
+ const bool depth_compare = instr.tld4.UsesMiscMode(TextureMiscMode::DC);
+ const bool is_array = instr.tld4.array != 0;
+ WriteTexInstructionFloat(bb, instr,
+ GetTld4Code(instr, texture_type, depth_compare, is_array));
+ break;
+ }
+ case OpCode::Id::TLD4S: {
+ UNIMPLEMENTED_IF_MSG(instr.tld4s.UsesMiscMode(TextureMiscMode::AOFFI),
+ "AOFFI is not implemented");
+ if (instr.tld4s.UsesMiscMode(TextureMiscMode::NODEP)) {
+ LOG_WARNING(HW_GPU, "TLD4S.NODEP implementation is incomplete");
+ }
+
+ const bool depth_compare = instr.tld4s.UsesMiscMode(TextureMiscMode::DC);
+ const Node op_a = GetRegister(instr.gpr8);
+ const Node op_b = GetRegister(instr.gpr20);
+
+ // TODO(Subv): Figure out how the sampler type is encoded in the TLD4S instruction.
+ std::vector<Node> coords;
+ if (depth_compare) {
+ // Note: TLD4S coordinate encoding works just like TEXS's
+ const Node op_y = GetRegister(instr.gpr8.Value() + 1);
+ coords.push_back(op_a);
+ coords.push_back(op_y);
+ coords.push_back(op_b);
+ } else {
+ coords.push_back(op_a);
+ coords.push_back(op_b);
+ }
+ const Node component = Immediate(static_cast<u32>(instr.tld4s.component));
+
+ const auto& sampler =
+ GetSampler(instr.sampler, TextureType::Texture2D, false, depth_compare);
+
+ Node4 values;
+ for (u32 element = 0; element < values.size(); ++element) {
+ auto coords_copy = coords;
+ MetaTexture meta{sampler, {}, {}, {}, {}, component, element};
+ values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy));
+ }
+
+ WriteTexsInstructionFloat(bb, instr, values);
+ break;
+ }
+ case OpCode::Id::TXQ: {
+ if (instr.txq.UsesMiscMode(TextureMiscMode::NODEP)) {
+ LOG_WARNING(HW_GPU, "TXQ.NODEP implementation is incomplete");
+ }
+
+ // TODO: The new commits on the texture refactor, change the way samplers work.
+ // Sadly, not all texture instructions specify the type of texture their sampler
+ // uses. This must be fixed at a later instance.
+ const auto& sampler =
+ GetSampler(instr.sampler, Tegra::Shader::TextureType::Texture2D, false, false);
+
+ u32 indexer = 0;
+ switch (instr.txq.query_type) {
+ case Tegra::Shader::TextureQueryType::Dimension: {
+ for (u32 element = 0; element < 4; ++element) {
+ if (!instr.txq.IsComponentEnabled(element)) {
+ continue;
+ }
+ MetaTexture meta{sampler, {}, {}, {}, {}, {}, element};
+ const Node value =
+ Operation(OperationCode::TextureQueryDimensions, meta, GetRegister(instr.gpr8));
+ SetTemporal(bb, indexer++, value);
+ }
+ for (u32 i = 0; i < indexer; ++i) {
+ SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i));
+ }
+ break;
+ }
+ default:
+ UNIMPLEMENTED_MSG("Unhandled texture query type: {}",
+ static_cast<u32>(instr.txq.query_type.Value()));
+ }
+ break;
+ }
+ case OpCode::Id::TMML: {
+ UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV),
+ "NDV is not implemented");
+
+ if (instr.tmml.UsesMiscMode(TextureMiscMode::NODEP)) {
+ LOG_WARNING(HW_GPU, "TMML.NODEP implementation is incomplete");
+ }
+
+ auto texture_type = instr.tmml.texture_type.Value();
+ const bool is_array = instr.tmml.array != 0;
+ const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false);
+
+ std::vector<Node> coords;
+
+ // TODO: Add coordinates for different samplers once other texture types are implemented.
+ switch (texture_type) {
+ case TextureType::Texture1D:
+ coords.push_back(GetRegister(instr.gpr8));
+ break;
+ case TextureType::Texture2D:
+ coords.push_back(GetRegister(instr.gpr8.Value() + 0));
+ coords.push_back(GetRegister(instr.gpr8.Value() + 1));
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<u32>(texture_type));
+
+ // Fallback to interpreting as a 2D texture for now
+ coords.push_back(GetRegister(instr.gpr8.Value() + 0));
+ coords.push_back(GetRegister(instr.gpr8.Value() + 1));
+ texture_type = TextureType::Texture2D;
+ }
+
+ for (u32 element = 0; element < 2; ++element) {
+ auto params = coords;
+ MetaTexture meta{sampler, {}, {}, {}, {}, {}, element};
+ const Node value = Operation(OperationCode::TextureQueryLod, meta, std::move(params));
+ SetTemporal(bb, element, value);
+ }
+ for (u32 element = 0; element < 2; ++element) {
+ SetRegister(bb, instr.gpr0.Value() + element, GetTemporal(element));
+ }
+
+ break;
+ }
+ case OpCode::Id::TLDS: {
+ const Tegra::Shader::TextureType texture_type{instr.tlds.GetTextureType()};
+ const bool is_array{instr.tlds.IsArrayTexture()};
+
+ UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::AOFFI),
+ "AOFFI is not implemented");
+ UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::MZ), "MZ is not implemented");
+
+ if (instr.tlds.UsesMiscMode(TextureMiscMode::NODEP)) {
+ LOG_WARNING(HW_GPU, "TLDS.NODEP implementation is incomplete");
+ }
+
+ WriteTexsInstructionFloat(bb, instr, GetTldsCode(instr, texture_type, is_array));
+ break;
+ }
+ default:
+ UNIMPLEMENTED_MSG("Unhandled memory instruction: {}", opcode->get().GetName());
+ }
+
+ return pc;
+}
+
+const Sampler& ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, TextureType type,
+ bool is_array, bool is_shadow) {
+ const auto offset = static_cast<std::size_t>(sampler.index.Value());
+
+ // If this sampler has already been used, return the existing mapping.
+ const auto itr =
+ std::find_if(used_samplers.begin(), used_samplers.end(),
+ [&](const Sampler& entry) { return entry.GetOffset() == offset; });
+ if (itr != used_samplers.end()) {
+ ASSERT(itr->GetType() == type && itr->IsArray() == is_array &&
+ itr->IsShadow() == is_shadow);
+ return *itr;
+ }
+
+ // Otherwise create a new mapping for this sampler
+ const std::size_t next_index = used_samplers.size();
+ const Sampler entry{offset, next_index, type, is_array, is_shadow};
+ return *used_samplers.emplace(entry).first;
+}
+
+void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) {
+ u32 dest_elem = 0;
+ for (u32 elem = 0; elem < 4; ++elem) {
+ if (!instr.tex.IsComponentEnabled(elem)) {
+ // Skip disabled components
+ continue;
+ }
+ SetTemporal(bb, dest_elem++, components[elem]);
+ }
+ // After writing values in temporals, move them to the real registers
+ for (u32 i = 0; i < dest_elem; ++i) {
+ SetRegister(bb, instr.gpr0.Value() + i, GetTemporal(i));
+ }
+}
+
+void ShaderIR::WriteTexsInstructionFloat(NodeBlock& bb, Instruction instr,
+ const Node4& components) {
+ // TEXS has two destination registers and a swizzle. The first two elements in the swizzle
+ // go into gpr0+0 and gpr0+1, and the rest goes into gpr28+0 and gpr28+1
+
+ u32 dest_elem = 0;
+ for (u32 component = 0; component < 4; ++component) {
+ if (!instr.texs.IsComponentEnabled(component))
+ continue;
+ SetTemporal(bb, dest_elem++, components[component]);
+ }
+
+ for (u32 i = 0; i < dest_elem; ++i) {
+ if (i < 2) {
+ // Write the first two swizzle components to gpr0 and gpr0+1
+ SetRegister(bb, instr.gpr0.Value() + i % 2, GetTemporal(i));
+ } else {
+ ASSERT(instr.texs.HasTwoDestinations());
+ // Write the rest of the swizzle components to gpr28 and gpr28+1
+ SetRegister(bb, instr.gpr28.Value() + i % 2, GetTemporal(i));
+ }
+ }
+}
+
+void ShaderIR::WriteTexsInstructionHalfFloat(NodeBlock& bb, Instruction instr,
+ const Node4& components) {
+ // TEXS.F16 destionation registers are packed in two registers in pairs (just like any half
+ // float instruction).
+
+ Node4 values;
+ u32 dest_elem = 0;
+ for (u32 component = 0; component < 4; ++component) {
+ if (!instr.texs.IsComponentEnabled(component))
+ continue;
+ values[dest_elem++] = components[component];
+ }
+ if (dest_elem == 0)
+ return;
+
+ std::generate(values.begin() + dest_elem, values.end(), [&]() { return Immediate(0); });
+
+ const Node first_value = Operation(OperationCode::HPack2, values[0], values[1]);
+ if (dest_elem <= 2) {
+ SetRegister(bb, instr.gpr0, first_value);
+ return;
+ }
+
+ SetTemporal(bb, 0, first_value);
+ SetTemporal(bb, 1, Operation(OperationCode::HPack2, values[2], values[3]));
+
+ SetRegister(bb, instr.gpr0, GetTemporal(0));
+ SetRegister(bb, instr.gpr28, GetTemporal(1));
+}
+
+Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type,
+ TextureProcessMode process_mode, std::vector<Node> coords,
+ Node array, Node depth_compare, u32 bias_offset) {
+ const bool is_array = array;
+ const bool is_shadow = depth_compare;
+
+ UNIMPLEMENTED_IF_MSG((texture_type == TextureType::Texture3D && (is_array || is_shadow)) ||
+ (texture_type == TextureType::TextureCube && is_array && is_shadow),
+ "This method is not supported.");
+
+ const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, is_shadow);
+
+ const bool lod_needed = process_mode == TextureProcessMode::LZ ||
+ process_mode == TextureProcessMode::LL ||
+ process_mode == TextureProcessMode::LLA;
+
+ // LOD selection (either via bias or explicit textureLod) not supported in GL for
+ // sampler2DArrayShadow and samplerCubeArrayShadow.
+ const bool gl_lod_supported =
+ !((texture_type == Tegra::Shader::TextureType::Texture2D && is_array && is_shadow) ||
+ (texture_type == Tegra::Shader::TextureType::TextureCube && is_array && is_shadow));
+
+ const OperationCode read_method =
+ (lod_needed && gl_lod_supported) ? OperationCode::TextureLod : OperationCode::Texture;
+
+ UNIMPLEMENTED_IF(process_mode != TextureProcessMode::None && !gl_lod_supported);
+
+ Node bias = {};
+ Node lod = {};
+ if (process_mode != TextureProcessMode::None && gl_lod_supported) {
+ switch (process_mode) {
+ case TextureProcessMode::LZ:
+ lod = Immediate(0.0f);
+ break;
+ case TextureProcessMode::LB:
+ // If present, lod or bias are always stored in the register indexed by the gpr20
+ // field with an offset depending on the usage of the other registers
+ bias = GetRegister(instr.gpr20.Value() + bias_offset);
+ break;
+ case TextureProcessMode::LL:
+ lod = GetRegister(instr.gpr20.Value() + bias_offset);
+ break;
+ default:
+ UNIMPLEMENTED_MSG("Unimplemented process mode={}", static_cast<u32>(process_mode));
+ break;
+ }
+ }
+
+ Node4 values;
+ for (u32 element = 0; element < values.size(); ++element) {
+ auto copy_coords = coords;
+ MetaTexture meta{sampler, array, depth_compare, bias, lod, {}, element};
+ values[element] = Operation(read_method, meta, std::move(copy_coords));
+ }
+
+ return values;
+}
+
+Node4 ShaderIR::GetTexCode(Instruction instr, TextureType texture_type,
+ TextureProcessMode process_mode, bool depth_compare, bool is_array) {
+ const bool lod_bias_enabled =
+ (process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ);
+
+ const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement(
+ texture_type, depth_compare, is_array, lod_bias_enabled, 4, 5);
+ // If enabled arrays index is always stored in the gpr8 field
+ const u64 array_register = instr.gpr8.Value();
+ // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used
+ const u64 coord_register = array_register + (is_array ? 1 : 0);
+
+ std::vector<Node> coords;
+ for (std::size_t i = 0; i < coord_count; ++i) {
+ coords.push_back(GetRegister(coord_register + i));
+ }
+ // 1D.DC in OpenGL the 2nd component is ignored.
+ if (depth_compare && !is_array && texture_type == TextureType::Texture1D) {
+ coords.push_back(Immediate(0.0f));
+ }
+
+ const Node array = is_array ? GetRegister(array_register) : nullptr;
+
+ Node dc{};
+ if (depth_compare) {
+ // Depth is always stored in the register signaled by gpr20 or in the next register if lod
+ // or bias are used
+ const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0);
+ dc = GetRegister(depth_register);
+ }
+
+ return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, 0);
+}
+
+Node4 ShaderIR::GetTexsCode(Instruction instr, TextureType texture_type,
+ TextureProcessMode process_mode, bool depth_compare, bool is_array) {
+ const bool lod_bias_enabled =
+ (process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ);
+
+ const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement(
+ texture_type, depth_compare, is_array, lod_bias_enabled, 4, 4);
+ // If enabled arrays index is always stored in the gpr8 field
+ const u64 array_register = instr.gpr8.Value();
+ // First coordinate index is stored in gpr8 field or (gpr8 + 1) when arrays are used
+ const u64 coord_register = array_register + (is_array ? 1 : 0);
+ const u64 last_coord_register =
+ (is_array || !(lod_bias_enabled || depth_compare) || (coord_count > 2))
+ ? static_cast<u64>(instr.gpr20.Value())
+ : coord_register + 1;
+ const u32 bias_offset = coord_count > 2 ? 1 : 0;
+
+ std::vector<Node> coords;
+ for (std::size_t i = 0; i < coord_count; ++i) {
+ const bool last = (i == (coord_count - 1)) && (coord_count > 1);
+ coords.push_back(GetRegister(last ? last_coord_register : coord_register + i));
+ }
+
+ const Node array = is_array ? GetRegister(array_register) : nullptr;
+
+ Node dc{};
+ if (depth_compare) {
+ // Depth is always stored in the register signaled by gpr20 or in the next register if lod
+ // or bias are used
+ const u64 depth_register = instr.gpr20.Value() + (lod_bias_enabled ? 1 : 0);
+ dc = GetRegister(depth_register);
+ }
+
+ return GetTextureCode(instr, texture_type, process_mode, coords, array, dc, bias_offset);
+}
+
+Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool depth_compare,
+ bool is_array) {
+ const std::size_t coord_count = GetCoordCount(texture_type);
+ const std::size_t total_coord_count = coord_count + (is_array ? 1 : 0);
+ const std::size_t total_reg_count = total_coord_count + (depth_compare ? 1 : 0);
+
+ // If enabled arrays index is always stored in the gpr8 field
+ const u64 array_register = instr.gpr8.Value();
+ // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used
+ const u64 coord_register = array_register + (is_array ? 1 : 0);
+
+ std::vector<Node> coords;
+ for (size_t i = 0; i < coord_count; ++i)
+ coords.push_back(GetRegister(coord_register + i));
+
+ const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, depth_compare);
+
+ Node4 values;
+ for (u32 element = 0; element < values.size(); ++element) {
+ auto coords_copy = coords;
+ MetaTexture meta{sampler, GetRegister(array_register), {}, {}, {}, {}, element};
+ values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy));
+ }
+
+ return values;
+}
+
+Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) {
+ const std::size_t type_coord_count = GetCoordCount(texture_type);
+ const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL;
+
+ // If enabled arrays index is always stored in the gpr8 field
+ const u64 array_register = instr.gpr8.Value();
+ // if is array gpr20 is used
+ const u64 coord_register = is_array ? instr.gpr20.Value() : instr.gpr8.Value();
+
+ const u64 last_coord_register =
+ ((type_coord_count > 2) || (type_coord_count == 2 && !lod_enabled)) && !is_array
+ ? static_cast<u64>(instr.gpr20.Value())
+ : coord_register + 1;
+
+ std::vector<Node> coords;
+ for (std::size_t i = 0; i < type_coord_count; ++i) {
+ const bool last = (i == (type_coord_count - 1)) && (type_coord_count > 1);
+ coords.push_back(GetRegister(last ? last_coord_register : coord_register + i));
+ }
+
+ const Node array = is_array ? GetRegister(array_register) : nullptr;
+ // When lod is used always is in gpr20
+ const Node lod = lod_enabled ? GetRegister(instr.gpr20) : Immediate(0);
+
+ const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false);
+
+ Node4 values;
+ for (u32 element = 0; element < values.size(); ++element) {
+ auto coords_copy = coords;
+ MetaTexture meta{sampler, array, {}, {}, lod, {}, element};
+ values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy));
+ }
+ return values;
+}
+
+std::tuple<std::size_t, std::size_t> ShaderIR::ValidateAndGetCoordinateElement(
+ TextureType texture_type, bool depth_compare, bool is_array, bool lod_bias_enabled,
+ std::size_t max_coords, std::size_t max_inputs) {
+ const std::size_t coord_count = GetCoordCount(texture_type);
+
+ std::size_t total_coord_count = coord_count + (is_array ? 1 : 0) + (depth_compare ? 1 : 0);
+ const std::size_t total_reg_count = total_coord_count + (lod_bias_enabled ? 1 : 0);
+ if (total_coord_count > max_coords || total_reg_count > max_inputs) {
+ UNIMPLEMENTED_MSG("Unsupported Texture operation");
+ total_coord_count = std::min(total_coord_count, max_coords);
+ }
+ // 1D.DC OpenGL is using a vec3 but 2nd component is ignored later.
+ total_coord_count +=
+ (depth_compare && !is_array && texture_type == TextureType::Texture1D) ? 1 : 0;
+
+ return {coord_count, total_coord_count};
+}
+
+} // namespace VideoCommon::Shader \ No newline at end of file
diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h
index 52c7f2c4e..5bc3a3900 100644
--- a/src/video_core/shader/shader_ir.h
+++ b/src/video_core/shader/shader_ir.h
@@ -290,7 +290,9 @@ struct MetaTexture {
const Sampler& sampler;
Node array{};
Node depth_compare{};
- std::vector<Node> extras;
+ Node bias{};
+ Node lod{};
+ Node component{};
u32 element{};
};
@@ -614,6 +616,7 @@ private:
u32 DecodeHfma2(NodeBlock& bb, u32 pc);
u32 DecodeConversion(NodeBlock& bb, u32 pc);
u32 DecodeMemory(NodeBlock& bb, u32 pc);
+ u32 DecodeTexture(NodeBlock& bb, u32 pc);
u32 DecodeFloatSetPredicate(NodeBlock& bb, u32 pc);
u32 DecodeIntegerSetPredicate(NodeBlock& bb, u32 pc);
u32 DecodeHalfSetPredicate(NodeBlock& bb, u32 pc);
diff --git a/src/video_core/shader/track.cpp b/src/video_core/shader/track.cpp
index be4635342..33b071747 100644
--- a/src/video_core/shader/track.cpp
+++ b/src/video_core/shader/track.cpp
@@ -20,9 +20,9 @@ std::pair<Node, s64> FindOperation(const NodeBlock& code, s64 cursor,
return {node, cursor};
}
if (const auto conditional = std::get_if<ConditionalNode>(node)) {
- const auto& code = conditional->GetCode();
- const auto [found, internal_cursor] =
- FindOperation(code, static_cast<s64>(code.size() - 1), operation_code);
+ const auto& conditional_code = conditional->GetCode();
+ const auto [found, internal_cursor] = FindOperation(
+ conditional_code, static_cast<s64>(conditional_code.size() - 1), operation_code);
if (found)
return {found, cursor};
}
@@ -58,8 +58,8 @@ Node ShaderIR::TrackCbuf(Node tracked, const NodeBlock& code, s64 cursor) {
return nullptr;
}
if (const auto conditional = std::get_if<ConditionalNode>(tracked)) {
- const auto& code = conditional->GetCode();
- return TrackCbuf(tracked, code, static_cast<s64>(code.size()));
+ const auto& conditional_code = conditional->GetCode();
+ return TrackCbuf(tracked, conditional_code, static_cast<s64>(conditional_code.size()));
}
return nullptr;
}
diff --git a/src/video_core/surface.cpp b/src/video_core/surface.cpp
index 044ba116a..a7ac26d71 100644
--- a/src/video_core/surface.cpp
+++ b/src/video_core/surface.cpp
@@ -89,8 +89,6 @@ PixelFormat PixelFormatFromDepthFormat(Tegra::DepthFormat format) {
PixelFormat PixelFormatFromRenderTargetFormat(Tegra::RenderTargetFormat format) {
switch (format) {
- // TODO (Hexagon12): Converting SRGBA to RGBA is a hack and doesn't completely correct the
- // gamma.
case Tegra::RenderTargetFormat::RGBA8_SRGB:
return PixelFormat::RGBA8_SRGB;
case Tegra::RenderTargetFormat::RGBA8_UNORM:
diff --git a/src/video_core/textures/astc.cpp b/src/video_core/textures/astc.cpp
index bc50a4876..b508d64e9 100644
--- a/src/video_core/textures/astc.cpp
+++ b/src/video_core/textures/astc.cpp
@@ -23,28 +23,12 @@
#include "video_core/textures/astc.h"
-class BitStream {
+class InputBitStream {
public:
- explicit BitStream(unsigned char* ptr, int nBits = 0, int start_offset = 0)
+ explicit InputBitStream(const unsigned char* ptr, int nBits = 0, int start_offset = 0)
: m_NumBits(nBits), m_CurByte(ptr), m_NextBit(start_offset % 8) {}
- ~BitStream() = default;
-
- int GetBitsWritten() const {
- return m_BitsWritten;
- }
-
- void WriteBitsR(unsigned int val, unsigned int nBits) {
- for (unsigned int i = 0; i < nBits; i++) {
- WriteBit((val >> (nBits - i - 1)) & 1);
- }
- }
-
- void WriteBits(unsigned int val, unsigned int nBits) {
- for (unsigned int i = 0; i < nBits; i++) {
- WriteBit((val >> i) & 1);
- }
- }
+ ~InputBitStream() = default;
int GetBitsRead() const {
return m_BitsRead;
@@ -71,6 +55,38 @@ public:
}
private:
+ const int m_NumBits;
+ const unsigned char* m_CurByte;
+ int m_NextBit = 0;
+ int m_BitsRead = 0;
+
+ bool done = false;
+};
+
+class OutputBitStream {
+public:
+ explicit OutputBitStream(unsigned char* ptr, int nBits = 0, int start_offset = 0)
+ : m_NumBits(nBits), m_CurByte(ptr), m_NextBit(start_offset % 8) {}
+
+ ~OutputBitStream() = default;
+
+ int GetBitsWritten() const {
+ return m_BitsWritten;
+ }
+
+ void WriteBitsR(unsigned int val, unsigned int nBits) {
+ for (unsigned int i = 0; i < nBits; i++) {
+ WriteBit((val >> (nBits - i - 1)) & 1);
+ }
+ }
+
+ void WriteBits(unsigned int val, unsigned int nBits) {
+ for (unsigned int i = 0; i < nBits; i++) {
+ WriteBit((val >> i) & 1);
+ }
+ }
+
+private:
void WriteBit(int b) {
if (done)
@@ -238,8 +254,8 @@ public:
// Fills result with the values that are encoded in the given
// bitstream. We must know beforehand what the maximum possible
// value is, and how many values we're decoding.
- static void DecodeIntegerSequence(std::vector<IntegerEncodedValue>& result, BitStream& bits,
- uint32_t maxRange, uint32_t nValues) {
+ static void DecodeIntegerSequence(std::vector<IntegerEncodedValue>& result,
+ InputBitStream& bits, uint32_t maxRange, uint32_t nValues) {
// Determine encoding parameters
IntegerEncodedValue val = IntegerEncodedValue::CreateEncoding(maxRange);
@@ -267,7 +283,7 @@ public:
}
private:
- static void DecodeTritBlock(BitStream& bits, std::vector<IntegerEncodedValue>& result,
+ static void DecodeTritBlock(InputBitStream& bits, std::vector<IntegerEncodedValue>& result,
uint32_t nBitsPerValue) {
// Implement the algorithm in section C.2.12
uint32_t m[5];
@@ -327,7 +343,7 @@ private:
}
}
- static void DecodeQuintBlock(BitStream& bits, std::vector<IntegerEncodedValue>& result,
+ static void DecodeQuintBlock(InputBitStream& bits, std::vector<IntegerEncodedValue>& result,
uint32_t nBitsPerValue) {
// Implement the algorithm in section C.2.12
uint32_t m[3];
@@ -406,7 +422,7 @@ struct TexelWeightParams {
}
};
-static TexelWeightParams DecodeBlockInfo(BitStream& strm) {
+static TexelWeightParams DecodeBlockInfo(InputBitStream& strm) {
TexelWeightParams params;
// Read the entire block mode all at once
@@ -605,7 +621,7 @@ static TexelWeightParams DecodeBlockInfo(BitStream& strm) {
return params;
}
-static void FillVoidExtentLDR(BitStream& strm, uint32_t* const outBuf, uint32_t blockWidth,
+static void FillVoidExtentLDR(InputBitStream& strm, uint32_t* const outBuf, uint32_t blockWidth,
uint32_t blockHeight) {
// Don't actually care about the void extent, just read the bits...
for (int i = 0; i < 4; ++i) {
@@ -821,7 +837,7 @@ static void DecodeColorValues(uint32_t* out, uint8_t* data, const uint32_t* mode
// We now have enough to decode our integer sequence.
std::vector<IntegerEncodedValue> decodedColorValues;
- BitStream colorStream(data);
+ InputBitStream colorStream(data);
IntegerEncodedValue::DecodeIntegerSequence(decodedColorValues, colorStream, range, nValues);
// Once we have the decoded values, we need to dequantize them to the 0-255 range
@@ -1365,9 +1381,9 @@ static void ComputeEndpoints(Pixel& ep1, Pixel& ep2, const uint32_t*& colorValue
#undef READ_INT_VALUES
}
-static void DecompressBlock(uint8_t inBuf[16], const uint32_t blockWidth,
+static void DecompressBlock(const uint8_t inBuf[16], const uint32_t blockWidth,
const uint32_t blockHeight, uint32_t* outBuf) {
- BitStream strm(inBuf);
+ InputBitStream strm(inBuf);
TexelWeightParams weightParams = DecodeBlockInfo(strm);
// Was there an error?
@@ -1421,7 +1437,7 @@ static void DecompressBlock(uint8_t inBuf[16], const uint32_t blockWidth,
// Define color data.
uint8_t colorEndpointData[16];
memset(colorEndpointData, 0, sizeof(colorEndpointData));
- BitStream colorEndpointStream(colorEndpointData, 16 * 8, 0);
+ OutputBitStream colorEndpointStream(colorEndpointData, 16 * 8, 0);
// Read extra config data...
uint32_t baseCEM = 0;
@@ -1549,7 +1565,7 @@ static void DecompressBlock(uint8_t inBuf[16], const uint32_t blockWidth,
memset(texelWeightData + clearByteStart, 0, 16 - clearByteStart);
std::vector<IntegerEncodedValue> texelWeightValues;
- BitStream weightStream(texelWeightData);
+ InputBitStream weightStream(texelWeightData);
IntegerEncodedValue::DecodeIntegerSequence(texelWeightValues, weightStream,
weightParams.m_MaxWeight,
@@ -1597,7 +1613,7 @@ static void DecompressBlock(uint8_t inBuf[16], const uint32_t blockWidth,
namespace Tegra::Texture::ASTC {
-std::vector<uint8_t> Decompress(std::vector<uint8_t>& data, uint32_t width, uint32_t height,
+std::vector<uint8_t> Decompress(const uint8_t* data, uint32_t width, uint32_t height,
uint32_t depth, uint32_t block_width, uint32_t block_height) {
uint32_t blockIdx = 0;
std::vector<uint8_t> outData(height * width * depth * 4);
@@ -1605,7 +1621,7 @@ std::vector<uint8_t> Decompress(std::vector<uint8_t>& data, uint32_t width, uint
for (uint32_t j = 0; j < height; j += block_height) {
for (uint32_t i = 0; i < width; i += block_width) {
- uint8_t* blockPtr = data.data() + blockIdx * 16;
+ const uint8_t* blockPtr = data + blockIdx * 16;
// Blocks can be at most 12x12
uint32_t uncompData[144];
diff --git a/src/video_core/textures/astc.h b/src/video_core/textures/astc.h
index d419dd025..991cdba72 100644
--- a/src/video_core/textures/astc.h
+++ b/src/video_core/textures/astc.h
@@ -9,7 +9,7 @@
namespace Tegra::Texture::ASTC {
-std::vector<uint8_t> Decompress(std::vector<uint8_t>& data, uint32_t width, uint32_t height,
+std::vector<uint8_t> Decompress(const uint8_t* data, uint32_t width, uint32_t height,
uint32_t depth, uint32_t block_width, uint32_t block_height);
} // namespace Tegra::Texture::ASTC
diff --git a/src/video_core/textures/convert.cpp b/src/video_core/textures/convert.cpp
new file mode 100644
index 000000000..5e439f036
--- /dev/null
+++ b/src/video_core/textures/convert.cpp
@@ -0,0 +1,92 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include <algorithm>
+#include <cstring>
+#include <tuple>
+#include <vector>
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "common/logging/log.h"
+#include "video_core/textures/astc.h"
+#include "video_core/textures/convert.h"
+
+namespace Tegra::Texture {
+
+using VideoCore::Surface::PixelFormat;
+
+template <bool reverse>
+void SwapS8Z24ToZ24S8(u8* data, u32 width, u32 height) {
+ union S8Z24 {
+ BitField<0, 24, u32> z24;
+ BitField<24, 8, u32> s8;
+ };
+ static_assert(sizeof(S8Z24) == 4, "S8Z24 is incorrect size");
+
+ union Z24S8 {
+ BitField<0, 8, u32> s8;
+ BitField<8, 24, u32> z24;
+ };
+ static_assert(sizeof(Z24S8) == 4, "Z24S8 is incorrect size");
+
+ S8Z24 s8z24_pixel{};
+ Z24S8 z24s8_pixel{};
+ constexpr auto bpp{
+ VideoCore::Surface::GetBytesPerPixel(VideoCore::Surface::PixelFormat::S8Z24)};
+ for (std::size_t y = 0; y < height; ++y) {
+ for (std::size_t x = 0; x < width; ++x) {
+ const std::size_t offset{bpp * (y * width + x)};
+ if constexpr (reverse) {
+ std::memcpy(&z24s8_pixel, &data[offset], sizeof(Z24S8));
+ s8z24_pixel.s8.Assign(z24s8_pixel.s8);
+ s8z24_pixel.z24.Assign(z24s8_pixel.z24);
+ std::memcpy(&data[offset], &s8z24_pixel, sizeof(S8Z24));
+ } else {
+ std::memcpy(&s8z24_pixel, &data[offset], sizeof(S8Z24));
+ z24s8_pixel.s8.Assign(s8z24_pixel.s8);
+ z24s8_pixel.z24.Assign(s8z24_pixel.z24);
+ std::memcpy(&data[offset], &z24s8_pixel, sizeof(Z24S8));
+ }
+ }
+ }
+}
+
+static void ConvertS8Z24ToZ24S8(u8* data, u32 width, u32 height) {
+ SwapS8Z24ToZ24S8<false>(data, width, height);
+}
+
+static void ConvertZ24S8ToS8Z24(u8* data, u32 width, u32 height) {
+ SwapS8Z24ToZ24S8<true>(data, width, height);
+}
+
+void ConvertFromGuestToHost(u8* data, PixelFormat pixel_format, u32 width, u32 height, u32 depth,
+ bool convert_astc, bool convert_s8z24) {
+ if (convert_astc && IsPixelFormatASTC(pixel_format)) {
+ // Convert ASTC pixel formats to RGBA8, as most desktop GPUs do not support ASTC.
+ u32 block_width{};
+ u32 block_height{};
+ std::tie(block_width, block_height) = GetASTCBlockSize(pixel_format);
+ const std::vector<u8> rgba8_data =
+ Tegra::Texture::ASTC::Decompress(data, width, height, depth, block_width, block_height);
+ std::copy(rgba8_data.begin(), rgba8_data.end(), data);
+
+ } else if (convert_s8z24 && pixel_format == PixelFormat::S8Z24) {
+ Tegra::Texture::ConvertS8Z24ToZ24S8(data, width, height);
+ }
+}
+
+void ConvertFromHostToGuest(u8* data, PixelFormat pixel_format, u32 width, u32 height, u32 depth,
+ bool convert_astc, bool convert_s8z24) {
+ if (convert_astc && IsPixelFormatASTC(pixel_format)) {
+ LOG_CRITICAL(HW_GPU, "Conversion of format {} after texture flushing is not implemented",
+ static_cast<u32>(pixel_format));
+ UNREACHABLE();
+
+ } else if (convert_s8z24 && pixel_format == PixelFormat::S8Z24) {
+ Tegra::Texture::ConvertZ24S8ToS8Z24(data, width, height);
+ }
+}
+
+} // namespace Tegra::Texture \ No newline at end of file
diff --git a/src/video_core/textures/convert.h b/src/video_core/textures/convert.h
new file mode 100644
index 000000000..07cd8b5da
--- /dev/null
+++ b/src/video_core/textures/convert.h
@@ -0,0 +1,18 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+#include "video_core/surface.h"
+
+namespace Tegra::Texture {
+
+void ConvertFromGuestToHost(u8* data, VideoCore::Surface::PixelFormat pixel_format, u32 width,
+ u32 height, u32 depth, bool convert_astc, bool convert_s8z24);
+
+void ConvertFromHostToGuest(u8* data, VideoCore::Surface::PixelFormat pixel_format, u32 width,
+ u32 height, u32 depth, bool convert_astc, bool convert_s8z24);
+
+} // namespace Tegra::Texture \ No newline at end of file
diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp
index 5db75de22..cad7340f5 100644
--- a/src/video_core/textures/decoders.cpp
+++ b/src/video_core/textures/decoders.cpp
@@ -103,8 +103,8 @@ void FastProcessBlock(u8* const swizzled_data, u8* const unswizzled_data, const
const u32 swizzle_offset{y_address + table[(xb / fast_swizzle_align) % 4]};
const u32 out_x = xb * out_bytes_per_pixel / bytes_per_pixel;
const u32 pixel_index{out_x + pixel_base};
- data_ptrs[unswizzle] = swizzled_data + swizzle_offset;
- data_ptrs[!unswizzle] = unswizzled_data + pixel_index;
+ data_ptrs[unswizzle ? 1 : 0] = swizzled_data + swizzle_offset;
+ data_ptrs[unswizzle ? 0 : 1] = unswizzled_data + pixel_index;
std::memcpy(data_ptrs[0], data_ptrs[1], fast_swizzle_align);
}
pixel_base += stride_x;
@@ -154,7 +154,7 @@ void SwizzledData(u8* const swizzled_data, u8* const unswizzled_data, const bool
for (u32 xb = 0; xb < blocks_on_x; xb++) {
const u32 x_start = xb * block_x_elements;
const u32 x_end = std::min(width, x_start + block_x_elements);
- if (fast) {
+ if constexpr (fast) {
FastProcessBlock(swizzled_data, unswizzled_data, unswizzle, x_start, y_start,
z_start, x_end, y_end, z_end, tile_offset, xy_block_size,
layer_z, stride_x, bytes_per_pixel, out_bytes_per_pixel);
diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h
index 85b7e9f7b..65df86890 100644
--- a/src/video_core/textures/decoders.h
+++ b/src/video_core/textures/decoders.h
@@ -16,16 +16,13 @@ inline std::size_t GetGOBSize() {
return 512;
}
-/**
- * Unswizzles a swizzled texture without changing its format.
- */
+/// Unswizzles a swizzled texture without changing its format.
void UnswizzleTexture(u8* unswizzled_data, VAddr address, u32 tile_size_x, u32 tile_size_y,
u32 bytes_per_pixel, u32 width, u32 height, u32 depth,
u32 block_height = TICEntry::DefaultBlockHeight,
u32 block_depth = TICEntry::DefaultBlockHeight, u32 width_spacing = 0);
-/**
- * Unswizzles a swizzled texture without changing its format.
- */
+
+/// Unswizzles a swizzled texture without changing its format.
std::vector<u8> UnswizzleTexture(VAddr address, u32 tile_size_x, u32 tile_size_y,
u32 bytes_per_pixel, u32 width, u32 height, u32 depth,
u32 block_height = TICEntry::DefaultBlockHeight,
@@ -37,15 +34,11 @@ void CopySwizzledData(u32 width, u32 height, u32 depth, u32 bytes_per_pixel,
u32 out_bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data,
bool unswizzle, u32 block_height, u32 block_depth, u32 width_spacing);
-/**
- * Decodes an unswizzled texture into a A8R8G8B8 texture.
- */
+/// Decodes an unswizzled texture into a A8R8G8B8 texture.
std::vector<u8> DecodeTexture(const std::vector<u8>& texture_data, TextureFormat format, u32 width,
u32 height);
-/**
- * This function calculates the correct size of a texture depending if it's tiled or not.
- */
+/// This function calculates the correct size of a texture depending if it's tiled or not.
std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth,
u32 block_height, u32 block_depth);
@@ -53,6 +46,7 @@ std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height
void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width,
u32 bytes_per_pixel, VAddr swizzled_data, VAddr unswizzled_data,
u32 block_height);
+
/// Copies a tiled subrectangle into a linear surface.
void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32 swizzled_width,
u32 bytes_per_pixel, VAddr swizzled_data, VAddr unswizzled_data,
diff --git a/src/video_core/textures/texture.h b/src/video_core/textures/texture.h
index 0fc5530f2..93ecc6e31 100644
--- a/src/video_core/textures/texture.h
+++ b/src/video_core/textures/texture.h
@@ -4,6 +4,7 @@
#pragma once
+#include <array>
#include "common/assert.h"
#include "common/bit_field.h"
#include "common/common_funcs.h"
@@ -282,34 +283,62 @@ enum class TextureMipmapFilter : u32 {
struct TSCEntry {
union {
- BitField<0, 3, WrapMode> wrap_u;
- BitField<3, 3, WrapMode> wrap_v;
- BitField<6, 3, WrapMode> wrap_p;
- BitField<9, 1, u32> depth_compare_enabled;
- BitField<10, 3, DepthCompareFunc> depth_compare_func;
- BitField<13, 1, u32> srgb_conversion;
- BitField<20, 3, u32> max_anisotropy;
+ struct {
+ union {
+ BitField<0, 3, WrapMode> wrap_u;
+ BitField<3, 3, WrapMode> wrap_v;
+ BitField<6, 3, WrapMode> wrap_p;
+ BitField<9, 1, u32> depth_compare_enabled;
+ BitField<10, 3, DepthCompareFunc> depth_compare_func;
+ BitField<13, 1, u32> srgb_conversion;
+ BitField<20, 3, u32> max_anisotropy;
+ };
+ union {
+ BitField<0, 2, TextureFilter> mag_filter;
+ BitField<4, 2, TextureFilter> min_filter;
+ BitField<6, 2, TextureMipmapFilter> mipmap_filter;
+ BitField<9, 1, u32> cubemap_interface_filtering;
+ BitField<12, 13, u32> mip_lod_bias;
+ };
+ union {
+ BitField<0, 12, u32> min_lod_clamp;
+ BitField<12, 12, u32> max_lod_clamp;
+ BitField<24, 8, u32> srgb_border_color_r;
+ };
+ union {
+ BitField<12, 8, u32> srgb_border_color_g;
+ BitField<20, 8, u32> srgb_border_color_b;
+ };
+ std::array<f32, 4> border_color;
+ };
+ std::array<u8, 0x20> raw;
};
- union {
- BitField<0, 2, TextureFilter> mag_filter;
- BitField<4, 2, TextureFilter> min_filter;
- BitField<6, 2, TextureMipmapFilter> mip_filter;
- BitField<9, 1, u32> cubemap_interface_filtering;
- BitField<12, 13, u32> mip_lod_bias;
- };
- union {
- BitField<0, 12, u32> min_lod_clamp;
- BitField<12, 12, u32> max_lod_clamp;
- BitField<24, 8, u32> srgb_border_color_r;
- };
- union {
- BitField<12, 8, u32> srgb_border_color_g;
- BitField<20, 8, u32> srgb_border_color_b;
- };
- float border_color_r;
- float border_color_g;
- float border_color_b;
- float border_color_a;
+
+ float GetMaxAnisotropy() const {
+ return static_cast<float>(1U << max_anisotropy);
+ }
+
+ float GetMinLod() const {
+ return static_cast<float>(min_lod_clamp) / 256.0f;
+ }
+
+ float GetMaxLod() const {
+ return static_cast<float>(max_lod_clamp) / 256.0f;
+ }
+
+ float GetLodBias() const {
+ // Sign extend the 13-bit value.
+ constexpr u32 mask = 1U << (13 - 1);
+ return static_cast<s32>((mip_lod_bias ^ mask) - mask) / 256.0f;
+ }
+
+ std::array<float, 4> GetBorderColor() const {
+ if (srgb_conversion) {
+ return {srgb_border_color_r / 255.0f, srgb_border_color_g / 255.0f,
+ srgb_border_color_b / 255.0f, border_color[3]};
+ }
+ return border_color;
+ }
};
static_assert(sizeof(TSCEntry) == 0x20, "TSCEntry has wrong size");