2020-07-10 03:36:38 +00:00
|
|
|
// Copyright 2020 yuzu Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2020-07-18 04:24:32 +00:00
|
|
|
#include <condition_variable>
|
|
|
|
#include <mutex>
|
|
|
|
#include <thread>
|
|
|
|
#include <vector>
|
2020-07-10 03:36:38 +00:00
|
|
|
#include "video_core/engines/maxwell_3d.h"
|
|
|
|
#include "video_core/renderer_base.h"
|
|
|
|
#include "video_core/renderer_opengl/gl_shader_cache.h"
|
|
|
|
#include "video_core/shader/async_shaders.h"
|
|
|
|
|
|
|
|
namespace VideoCommon::Shader {
|
2020-07-18 04:24:32 +00:00
|
|
|
|
2020-07-10 03:36:38 +00:00
|
|
|
AsyncShaders::AsyncShaders(Core::Frontend::EmuWindow& emu_window) : emu_window(emu_window) {}
|
2020-07-18 04:24:32 +00:00
|
|
|
|
2020-07-10 03:36:38 +00:00
|
|
|
AsyncShaders::~AsyncShaders() {
|
|
|
|
KillWorkers();
|
|
|
|
}
|
|
|
|
|
2020-08-05 16:53:26 +00:00
|
|
|
void AsyncShaders::AllocateWorkers() {
|
|
|
|
// Max worker threads we should allow
|
|
|
|
constexpr u32 MAX_THREADS = 4;
|
|
|
|
// Deduce how many threads we can use
|
|
|
|
const u32 threads_used = std::thread::hardware_concurrency() / 4;
|
|
|
|
// Always allow at least 1 thread regardless of our settings
|
|
|
|
const auto max_worker_count = std::max(1U, threads_used);
|
|
|
|
// Don't use more than MAX_THREADS
|
|
|
|
const auto num_workers = std::min(max_worker_count, MAX_THREADS);
|
|
|
|
|
2020-08-05 20:41:22 +00:00
|
|
|
// If we already have workers queued, ignore
|
2020-08-05 16:53:26 +00:00
|
|
|
if (num_workers == worker_threads.size()) {
|
2020-07-10 03:36:38 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If workers already exist, clear them
|
|
|
|
if (!worker_threads.empty()) {
|
|
|
|
FreeWorkers();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create workers
|
|
|
|
for (std::size_t i = 0; i < num_workers; i++) {
|
|
|
|
context_list.push_back(emu_window.CreateSharedContext());
|
2020-08-14 12:16:03 +00:00
|
|
|
worker_threads.push_back(
|
|
|
|
std::thread(&AsyncShaders::ShaderCompilerThread, this, context_list[i].get()));
|
2020-07-10 03:36:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void AsyncShaders::FreeWorkers() {
|
|
|
|
// Mark all threads to quit
|
|
|
|
is_thread_exiting.store(true);
|
2020-07-16 08:51:32 +00:00
|
|
|
cv.notify_all();
|
2020-07-10 03:36:38 +00:00
|
|
|
for (auto& thread : worker_threads) {
|
|
|
|
thread.join();
|
|
|
|
}
|
|
|
|
// Clear our shared contexts
|
|
|
|
context_list.clear();
|
|
|
|
|
|
|
|
// Clear our worker threads
|
|
|
|
worker_threads.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
void AsyncShaders::KillWorkers() {
|
|
|
|
is_thread_exiting.store(true);
|
|
|
|
for (auto& thread : worker_threads) {
|
|
|
|
thread.detach();
|
|
|
|
}
|
|
|
|
// Clear our shared contexts
|
|
|
|
context_list.clear();
|
|
|
|
|
|
|
|
// Clear our worker threads
|
|
|
|
worker_threads.clear();
|
|
|
|
}
|
|
|
|
|
2020-08-24 05:15:48 +00:00
|
|
|
bool AsyncShaders::HasWorkQueued() const {
|
2020-07-10 03:36:38 +00:00
|
|
|
return !pending_queue.empty();
|
|
|
|
}
|
|
|
|
|
2020-08-24 05:15:48 +00:00
|
|
|
bool AsyncShaders::HasCompletedWork() const {
|
2020-07-18 04:24:32 +00:00
|
|
|
std::shared_lock lock{completed_mutex};
|
2020-07-10 03:36:38 +00:00
|
|
|
return !finished_work.empty();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AsyncShaders::IsShaderAsync(const Tegra::GPU& gpu) const {
|
|
|
|
const auto& regs = gpu.Maxwell3D().regs;
|
|
|
|
|
|
|
|
// If something is using depth, we can assume that games are not rendering anything which will
|
|
|
|
// be used one time.
|
|
|
|
if (regs.zeta_enable) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If games are using a small index count, we can assume these are full screen quads. Usually
|
|
|
|
// these shaders are only used once for building textures so we can assume they can't be built
|
|
|
|
// async
|
|
|
|
if (regs.index_array.count <= 6 || regs.vertex_buffer.count <= 6) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<AsyncShaders::Result> AsyncShaders::GetCompletedWork() {
|
2020-08-24 05:15:48 +00:00
|
|
|
std::vector<Result> results;
|
2020-07-10 03:36:38 +00:00
|
|
|
{
|
2020-07-18 04:24:32 +00:00
|
|
|
std::unique_lock lock{completed_mutex};
|
2020-07-10 03:36:38 +00:00
|
|
|
results.assign(std::make_move_iterator(finished_work.begin()),
|
|
|
|
std::make_move_iterator(finished_work.end()));
|
|
|
|
finished_work.clear();
|
|
|
|
}
|
|
|
|
return results;
|
|
|
|
}
|
|
|
|
|
|
|
|
void AsyncShaders::QueueOpenGLShader(const OpenGL::Device& device,
|
|
|
|
Tegra::Engines::ShaderType shader_type, u64 uid,
|
|
|
|
std::vector<u64> code, std::vector<u64> code_b,
|
|
|
|
u32 main_offset,
|
|
|
|
VideoCommon::Shader::CompilerSettings compiler_settings,
|
|
|
|
const VideoCommon::Shader::Registry& registry,
|
|
|
|
VAddr cpu_addr) {
|
2020-08-02 17:05:41 +00:00
|
|
|
WorkerParams params{
|
|
|
|
.backend = device.UseAssemblyShaders() ? Backend::GLASM : Backend::OpenGL,
|
|
|
|
.device = &device,
|
|
|
|
.shader_type = shader_type,
|
|
|
|
.uid = uid,
|
|
|
|
.code = std::move(code),
|
|
|
|
.code_b = std::move(code_b),
|
|
|
|
.main_offset = main_offset,
|
|
|
|
.compiler_settings = compiler_settings,
|
2020-08-16 20:33:21 +00:00
|
|
|
.registry = registry,
|
2020-08-02 17:05:41 +00:00
|
|
|
.cpu_address = cpu_addr,
|
|
|
|
};
|
2020-07-10 03:36:38 +00:00
|
|
|
std::unique_lock lock(queue_mutex);
|
2020-07-31 21:30:05 +00:00
|
|
|
pending_queue.push(std::move(params));
|
2020-07-28 04:08:02 +00:00
|
|
|
cv.notify_one();
|
|
|
|
}
|
|
|
|
|
2020-08-02 17:05:41 +00:00
|
|
|
void AsyncShaders::QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache,
|
|
|
|
const Vulkan::VKDevice& device, Vulkan::VKScheduler& scheduler,
|
|
|
|
Vulkan::VKDescriptorPool& descriptor_pool,
|
|
|
|
Vulkan::VKUpdateDescriptorQueue& update_descriptor_queue,
|
|
|
|
Vulkan::VKRenderPassCache& renderpass_cache,
|
|
|
|
std::vector<VkDescriptorSetLayoutBinding> bindings,
|
|
|
|
Vulkan::SPIRVProgram program,
|
|
|
|
Vulkan::GraphicsPipelineCacheKey key) {
|
|
|
|
WorkerParams params{
|
|
|
|
.backend = Backend::Vulkan,
|
|
|
|
.pp_cache = pp_cache,
|
|
|
|
.vk_device = &device,
|
|
|
|
.scheduler = &scheduler,
|
|
|
|
.descriptor_pool = &descriptor_pool,
|
|
|
|
.update_descriptor_queue = &update_descriptor_queue,
|
|
|
|
.renderpass_cache = &renderpass_cache,
|
|
|
|
.bindings = bindings,
|
|
|
|
.program = program,
|
|
|
|
.key = key,
|
|
|
|
};
|
2020-07-28 04:08:02 +00:00
|
|
|
|
2020-07-10 03:36:38 +00:00
|
|
|
std::unique_lock lock(queue_mutex);
|
2020-07-31 21:30:05 +00:00
|
|
|
pending_queue.push(std::move(params));
|
2020-07-16 08:38:35 +00:00
|
|
|
cv.notify_one();
|
2020-07-10 03:36:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context) {
|
|
|
|
while (!is_thread_exiting.load(std::memory_order_relaxed)) {
|
2020-07-18 04:24:32 +00:00
|
|
|
std::unique_lock lock{queue_mutex};
|
|
|
|
cv.wait(lock, [this] { return HasWorkQueued() || is_thread_exiting; });
|
2020-07-16 08:38:35 +00:00
|
|
|
if (is_thread_exiting) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-07-10 03:36:38 +00:00
|
|
|
// Partial lock to allow all threads to read at the same time
|
|
|
|
if (!HasWorkQueued()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Another thread beat us, just unlock and wait for the next load
|
|
|
|
if (pending_queue.empty()) {
|
|
|
|
continue;
|
|
|
|
}
|
2020-07-16 08:38:35 +00:00
|
|
|
|
2020-07-10 03:36:38 +00:00
|
|
|
// Pull work from queue
|
|
|
|
WorkerParams work = std::move(pending_queue.front());
|
2020-07-30 19:41:11 +00:00
|
|
|
pending_queue.pop();
|
2020-07-16 08:38:35 +00:00
|
|
|
lock.unlock();
|
2020-07-10 03:36:38 +00:00
|
|
|
|
2020-08-02 17:05:41 +00:00
|
|
|
if (work.backend == Backend::OpenGL || work.backend == Backend::GLASM) {
|
2020-08-16 20:33:21 +00:00
|
|
|
const ShaderIR ir(work.code, work.main_offset, work.compiler_settings, *work.registry);
|
2020-07-10 03:36:38 +00:00
|
|
|
const auto scope = context->Acquire();
|
|
|
|
auto program =
|
2020-08-16 20:33:21 +00:00
|
|
|
OpenGL::BuildShader(*work.device, work.shader_type, work.uid, ir, *work.registry);
|
2020-07-10 03:36:38 +00:00
|
|
|
Result result{};
|
|
|
|
result.backend = work.backend;
|
|
|
|
result.cpu_address = work.cpu_address;
|
|
|
|
result.uid = work.uid;
|
|
|
|
result.code = std::move(work.code);
|
|
|
|
result.code_b = std::move(work.code_b);
|
|
|
|
result.shader_type = work.shader_type;
|
|
|
|
|
2020-08-02 17:05:41 +00:00
|
|
|
if (work.backend == Backend::OpenGL) {
|
2020-07-10 03:36:38 +00:00
|
|
|
result.program.opengl = std::move(program->source_program);
|
2020-08-02 17:05:41 +00:00
|
|
|
} else if (work.backend == Backend::GLASM) {
|
2020-07-10 03:36:38 +00:00
|
|
|
result.program.glasm = std::move(program->assembly_program);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
std::unique_lock complete_lock(completed_mutex);
|
|
|
|
finished_work.push_back(std::move(result));
|
|
|
|
}
|
2020-08-02 17:05:41 +00:00
|
|
|
} else if (work.backend == Backend::Vulkan) {
|
2020-07-30 19:41:11 +00:00
|
|
|
auto pipeline = std::make_unique<Vulkan::VKGraphicsPipeline>(
|
2020-08-02 17:05:41 +00:00
|
|
|
*work.vk_device, *work.scheduler, *work.descriptor_pool,
|
|
|
|
*work.update_descriptor_queue, *work.renderpass_cache, work.key, work.bindings,
|
|
|
|
work.program);
|
2020-07-28 04:08:02 +00:00
|
|
|
|
2020-08-02 17:05:41 +00:00
|
|
|
work.pp_cache->EmplacePipeline(std::move(pipeline));
|
2020-07-10 03:36:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace VideoCommon::Shader
|