remove old files

This commit is contained in:
mgthepro
2022-01-24 11:43:50 +01:00
parent 87def5b55b
commit ae416cfe9f
8874 changed files with 0 additions and 2090184 deletions

View File

@@ -1,220 +0,0 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#ifdef HAS_NSIGHT_AFTERMATH
#include <mutex>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <fmt/format.h>
#define VK_NO_PROTOTYPES
#include <vulkan/vulkan.h>
#include <GFSDK_Aftermath.h>
#include <GFSDK_Aftermath_Defines.h>
#include <GFSDK_Aftermath_GpuCrashDump.h>
#include <GFSDK_Aftermath_GpuCrashDumpDecoding.h>
#include "common/common_paths.h"
#include "common/common_types.h"
#include "common/file_util.h"
#include "common/logging/log.h"
#include "common/scope_exit.h"
#include "video_core/renderer_vulkan/nsight_aftermath_tracker.h"
namespace Vulkan {
static constexpr char AFTERMATH_LIB_NAME[] = "GFSDK_Aftermath_Lib.x64.dll";
NsightAftermathTracker::NsightAftermathTracker() = default;
NsightAftermathTracker::~NsightAftermathTracker() {
if (initialized) {
(void)GFSDK_Aftermath_DisableGpuCrashDumps();
}
}
bool NsightAftermathTracker::Initialize() {
if (!dl.Open(AFTERMATH_LIB_NAME)) {
LOG_ERROR(Render_Vulkan, "Failed to load Nsight Aftermath DLL");
return false;
}
if (!dl.GetSymbol("GFSDK_Aftermath_DisableGpuCrashDumps",
&GFSDK_Aftermath_DisableGpuCrashDumps) ||
!dl.GetSymbol("GFSDK_Aftermath_EnableGpuCrashDumps",
&GFSDK_Aftermath_EnableGpuCrashDumps) ||
!dl.GetSymbol("GFSDK_Aftermath_GetShaderDebugInfoIdentifier",
&GFSDK_Aftermath_GetShaderDebugInfoIdentifier) ||
!dl.GetSymbol("GFSDK_Aftermath_GetShaderHashSpirv", &GFSDK_Aftermath_GetShaderHashSpirv) ||
!dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_CreateDecoder",
&GFSDK_Aftermath_GpuCrashDump_CreateDecoder) ||
!dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_DestroyDecoder",
&GFSDK_Aftermath_GpuCrashDump_DestroyDecoder) ||
!dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_GenerateJSON",
&GFSDK_Aftermath_GpuCrashDump_GenerateJSON) ||
!dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_GetJSON",
&GFSDK_Aftermath_GpuCrashDump_GetJSON)) {
LOG_ERROR(Render_Vulkan, "Failed to load Nsight Aftermath function pointers");
return false;
}
dump_dir = Common::FS::GetUserPath(Common::FS::UserPath::LogDir) + "gpucrash";
(void)Common::FS::DeleteDirRecursively(dump_dir);
if (!Common::FS::CreateDir(dump_dir)) {
LOG_ERROR(Render_Vulkan, "Failed to create Nsight Aftermath dump directory");
return false;
}
if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_EnableGpuCrashDumps(
GFSDK_Aftermath_Version_API, GFSDK_Aftermath_GpuCrashDumpWatchedApiFlags_Vulkan,
GFSDK_Aftermath_GpuCrashDumpFeatureFlags_Default, GpuCrashDumpCallback,
ShaderDebugInfoCallback, CrashDumpDescriptionCallback, this))) {
LOG_ERROR(Render_Vulkan, "GFSDK_Aftermath_EnableGpuCrashDumps failed");
return false;
}
LOG_INFO(Render_Vulkan, "Nsight Aftermath dump directory is \"{}\"", dump_dir);
initialized = true;
return true;
}
void NsightAftermathTracker::SaveShader(const std::vector<u32>& spirv) const {
if (!initialized) {
return;
}
std::vector<u32> spirv_copy = spirv;
GFSDK_Aftermath_SpirvCode shader;
shader.pData = spirv_copy.data();
shader.size = static_cast<u32>(spirv_copy.size() * 4);
std::scoped_lock lock{mutex};
GFSDK_Aftermath_ShaderHash hash;
if (!GFSDK_Aftermath_SUCCEED(
GFSDK_Aftermath_GetShaderHashSpirv(GFSDK_Aftermath_Version_API, &shader, &hash))) {
LOG_ERROR(Render_Vulkan, "Failed to hash SPIR-V module");
return;
}
Common::FS::IOFile file(fmt::format("{}/source_{:016x}.spv", dump_dir, hash.hash), "wb");
if (!file.IsOpen()) {
LOG_ERROR(Render_Vulkan, "Failed to dump SPIR-V module with hash={:016x}", hash.hash);
return;
}
if (file.WriteArray(spirv.data(), spirv.size()) != spirv.size()) {
LOG_ERROR(Render_Vulkan, "Failed to write SPIR-V module with hash={:016x}", hash.hash);
return;
}
}
void NsightAftermathTracker::OnGpuCrashDumpCallback(const void* gpu_crash_dump,
u32 gpu_crash_dump_size) {
std::scoped_lock lock{mutex};
LOG_CRITICAL(Render_Vulkan, "called");
GFSDK_Aftermath_GpuCrashDump_Decoder decoder;
if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_GpuCrashDump_CreateDecoder(
GFSDK_Aftermath_Version_API, gpu_crash_dump, gpu_crash_dump_size, &decoder))) {
LOG_ERROR(Render_Vulkan, "Failed to create decoder");
return;
}
SCOPE_EXIT({ GFSDK_Aftermath_GpuCrashDump_DestroyDecoder(decoder); });
u32 json_size = 0;
if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_GpuCrashDump_GenerateJSON(
decoder, GFSDK_Aftermath_GpuCrashDumpDecoderFlags_ALL_INFO,
GFSDK_Aftermath_GpuCrashDumpFormatterFlags_NONE, nullptr, nullptr, nullptr, nullptr,
this, &json_size))) {
LOG_ERROR(Render_Vulkan, "Failed to generate JSON");
return;
}
std::vector<char> json(json_size);
if (!GFSDK_Aftermath_SUCCEED(
GFSDK_Aftermath_GpuCrashDump_GetJSON(decoder, json_size, json.data()))) {
LOG_ERROR(Render_Vulkan, "Failed to query JSON");
return;
}
const std::string base_name = [this] {
const int id = dump_id++;
if (id == 0) {
return fmt::format("{}/crash.nv-gpudmp", dump_dir);
} else {
return fmt::format("{}/crash_{}.nv-gpudmp", dump_dir, id);
}
}();
std::string_view dump_view(static_cast<const char*>(gpu_crash_dump), gpu_crash_dump_size);
if (Common::FS::WriteStringToFile(false, base_name, dump_view) != gpu_crash_dump_size) {
LOG_ERROR(Render_Vulkan, "Failed to write dump file");
return;
}
const std::string_view json_view(json.data(), json.size());
if (Common::FS::WriteStringToFile(true, base_name + ".json", json_view) != json.size()) {
LOG_ERROR(Render_Vulkan, "Failed to write JSON");
return;
}
}
void NsightAftermathTracker::OnShaderDebugInfoCallback(const void* shader_debug_info,
u32 shader_debug_info_size) {
std::scoped_lock lock{mutex};
GFSDK_Aftermath_ShaderDebugInfoIdentifier identifier;
if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_GetShaderDebugInfoIdentifier(
GFSDK_Aftermath_Version_API, shader_debug_info, shader_debug_info_size, &identifier))) {
LOG_ERROR(Render_Vulkan, "GFSDK_Aftermath_GetShaderDebugInfoIdentifier failed");
return;
}
const std::string path =
fmt::format("{}/shader_{:016x}{:016x}.nvdbg", dump_dir, identifier.id[0], identifier.id[1]);
Common::FS::IOFile file(path, "wb");
if (!file.IsOpen()) {
LOG_ERROR(Render_Vulkan, "Failed to create file {}", path);
return;
}
if (file.WriteBytes(static_cast<const u8*>(shader_debug_info), shader_debug_info_size) !=
shader_debug_info_size) {
LOG_ERROR(Render_Vulkan, "Failed to write file {}", path);
return;
}
}
void NsightAftermathTracker::OnCrashDumpDescriptionCallback(
PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description) {
add_description(GFSDK_Aftermath_GpuCrashDumpDescriptionKey_ApplicationName, "yuzu");
}
void NsightAftermathTracker::GpuCrashDumpCallback(const void* gpu_crash_dump,
u32 gpu_crash_dump_size, void* user_data) {
static_cast<NsightAftermathTracker*>(user_data)->OnGpuCrashDumpCallback(gpu_crash_dump,
gpu_crash_dump_size);
}
void NsightAftermathTracker::ShaderDebugInfoCallback(const void* shader_debug_info,
u32 shader_debug_info_size, void* user_data) {
static_cast<NsightAftermathTracker*>(user_data)->OnShaderDebugInfoCallback(
shader_debug_info, shader_debug_info_size);
}
void NsightAftermathTracker::CrashDumpDescriptionCallback(
PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description, void* user_data) {
static_cast<NsightAftermathTracker*>(user_data)->OnCrashDumpDescriptionCallback(
add_description);
}
} // namespace Vulkan
#endif // HAS_NSIGHT_AFTERMATH

View File

@@ -1,87 +0,0 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <mutex>
#include <string>
#include <vector>
#define VK_NO_PROTOTYPES
#include <vulkan/vulkan.h>
#ifdef HAS_NSIGHT_AFTERMATH
#include <GFSDK_Aftermath_Defines.h>
#include <GFSDK_Aftermath_GpuCrashDump.h>
#include <GFSDK_Aftermath_GpuCrashDumpDecoding.h>
#endif
#include "common/common_types.h"
#include "common/dynamic_library.h"
namespace Vulkan {
class NsightAftermathTracker {
public:
NsightAftermathTracker();
~NsightAftermathTracker();
NsightAftermathTracker(const NsightAftermathTracker&) = delete;
NsightAftermathTracker& operator=(const NsightAftermathTracker&) = delete;
// Delete move semantics because Aftermath initialization uses a pointer to this.
NsightAftermathTracker(NsightAftermathTracker&&) = delete;
NsightAftermathTracker& operator=(NsightAftermathTracker&&) = delete;
bool Initialize();
void SaveShader(const std::vector<u32>& spirv) const;
private:
#ifdef HAS_NSIGHT_AFTERMATH
static void GpuCrashDumpCallback(const void* gpu_crash_dump, u32 gpu_crash_dump_size,
void* user_data);
static void ShaderDebugInfoCallback(const void* shader_debug_info, u32 shader_debug_info_size,
void* user_data);
static void CrashDumpDescriptionCallback(
PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description, void* user_data);
void OnGpuCrashDumpCallback(const void* gpu_crash_dump, u32 gpu_crash_dump_size);
void OnShaderDebugInfoCallback(const void* shader_debug_info, u32 shader_debug_info_size);
void OnCrashDumpDescriptionCallback(
PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description);
mutable std::mutex mutex;
std::string dump_dir;
int dump_id = 0;
bool initialized = false;
Common::DynamicLibrary dl;
PFN_GFSDK_Aftermath_DisableGpuCrashDumps GFSDK_Aftermath_DisableGpuCrashDumps;
PFN_GFSDK_Aftermath_EnableGpuCrashDumps GFSDK_Aftermath_EnableGpuCrashDumps;
PFN_GFSDK_Aftermath_GetShaderDebugInfoIdentifier GFSDK_Aftermath_GetShaderDebugInfoIdentifier;
PFN_GFSDK_Aftermath_GetShaderHashSpirv GFSDK_Aftermath_GetShaderHashSpirv;
PFN_GFSDK_Aftermath_GpuCrashDump_CreateDecoder GFSDK_Aftermath_GpuCrashDump_CreateDecoder;
PFN_GFSDK_Aftermath_GpuCrashDump_DestroyDecoder GFSDK_Aftermath_GpuCrashDump_DestroyDecoder;
PFN_GFSDK_Aftermath_GpuCrashDump_GenerateJSON GFSDK_Aftermath_GpuCrashDump_GenerateJSON;
PFN_GFSDK_Aftermath_GpuCrashDump_GetJSON GFSDK_Aftermath_GpuCrashDump_GetJSON;
#endif
};
#ifndef HAS_NSIGHT_AFTERMATH
inline NsightAftermathTracker::NsightAftermathTracker() = default;
inline NsightAftermathTracker::~NsightAftermathTracker() = default;
inline bool NsightAftermathTracker::Initialize() {
return false;
}
inline void NsightAftermathTracker::SaveShader(const std::vector<u32>&) const {}
#endif
} // namespace Vulkan

View File

@@ -1,883 +0,0 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <bitset>
#include <chrono>
#include <optional>
#include <string_view>
#include <thread>
#include <unordered_set>
#include <utility>
#include <vector>
#include "common/assert.h"
#include "core/settings.h"
#include "video_core/renderer_vulkan/vk_device.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
namespace {
namespace Alternatives {
constexpr std::array Depth24UnormS8_UINT{
VK_FORMAT_D32_SFLOAT_S8_UINT,
VK_FORMAT_D16_UNORM_S8_UINT,
VkFormat{},
};
constexpr std::array Depth16UnormS8_UINT{
VK_FORMAT_D24_UNORM_S8_UINT,
VK_FORMAT_D32_SFLOAT_S8_UINT,
VkFormat{},
};
} // namespace Alternatives
constexpr std::array REQUIRED_EXTENSIONS{
VK_KHR_SWAPCHAIN_EXTENSION_NAME,
VK_KHR_MAINTENANCE1_EXTENSION_NAME,
VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME,
VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME,
VK_KHR_16BIT_STORAGE_EXTENSION_NAME,
VK_KHR_8BIT_STORAGE_EXTENSION_NAME,
VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME,
VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME,
VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME,
VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME,
VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME,
VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME,
VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME,
VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME,
};
template <typename T>
void SetNext(void**& next, T& data) {
*next = &data;
next = &data.pNext;
}
constexpr const VkFormat* GetFormatAlternatives(VkFormat format) {
switch (format) {
case VK_FORMAT_D24_UNORM_S8_UINT:
return Alternatives::Depth24UnormS8_UINT.data();
case VK_FORMAT_D16_UNORM_S8_UINT:
return Alternatives::Depth16UnormS8_UINT.data();
default:
return nullptr;
}
}
VkFormatFeatureFlags GetFormatFeatures(VkFormatProperties properties, FormatType format_type) {
switch (format_type) {
case FormatType::Linear:
return properties.linearTilingFeatures;
case FormatType::Optimal:
return properties.optimalTilingFeatures;
case FormatType::Buffer:
return properties.bufferFeatures;
default:
return {};
}
}
[[nodiscard]] bool IsRDNA(std::string_view device_name, VkDriverIdKHR driver_id) {
static constexpr std::array RDNA_DEVICES{
"5700",
"5600",
"5500",
"5300",
};
if (driver_id != VK_DRIVER_ID_AMD_PROPRIETARY_KHR) {
return false;
}
return std::any_of(RDNA_DEVICES.begin(), RDNA_DEVICES.end(), [device_name](const char* name) {
return device_name.find(name) != std::string_view::npos;
});
}
std::unordered_map<VkFormat, VkFormatProperties> GetFormatProperties(
vk::PhysicalDevice physical, const vk::InstanceDispatch& dld) {
static constexpr std::array formats{
VK_FORMAT_A8B8G8R8_UNORM_PACK32,
VK_FORMAT_A8B8G8R8_UINT_PACK32,
VK_FORMAT_A8B8G8R8_SNORM_PACK32,
VK_FORMAT_A8B8G8R8_SINT_PACK32,
VK_FORMAT_A8B8G8R8_SRGB_PACK32,
VK_FORMAT_B5G6R5_UNORM_PACK16,
VK_FORMAT_A2B10G10R10_UNORM_PACK32,
VK_FORMAT_A2B10G10R10_UINT_PACK32,
VK_FORMAT_A1R5G5B5_UNORM_PACK16,
VK_FORMAT_R32G32B32A32_SFLOAT,
VK_FORMAT_R32G32B32A32_SINT,
VK_FORMAT_R32G32B32A32_UINT,
VK_FORMAT_R32G32_SFLOAT,
VK_FORMAT_R32G32_SINT,
VK_FORMAT_R32G32_UINT,
VK_FORMAT_R16G16B16A16_SINT,
VK_FORMAT_R16G16B16A16_UINT,
VK_FORMAT_R16G16B16A16_SNORM,
VK_FORMAT_R16G16B16A16_UNORM,
VK_FORMAT_R16G16_UNORM,
VK_FORMAT_R16G16_SNORM,
VK_FORMAT_R16G16_SFLOAT,
VK_FORMAT_R16G16_SINT,
VK_FORMAT_R16_UNORM,
VK_FORMAT_R16_UINT,
VK_FORMAT_R8G8B8A8_SRGB,
VK_FORMAT_R8G8_UNORM,
VK_FORMAT_R8G8_SNORM,
VK_FORMAT_R8G8_SINT,
VK_FORMAT_R8G8_UINT,
VK_FORMAT_R8_UNORM,
VK_FORMAT_R8_SNORM,
VK_FORMAT_R8_SINT,
VK_FORMAT_R8_UINT,
VK_FORMAT_B10G11R11_UFLOAT_PACK32,
VK_FORMAT_R32_SFLOAT,
VK_FORMAT_R32_UINT,
VK_FORMAT_R32_SINT,
VK_FORMAT_R16_SFLOAT,
VK_FORMAT_R16G16B16A16_SFLOAT,
VK_FORMAT_B8G8R8A8_UNORM,
VK_FORMAT_B8G8R8A8_SRGB,
VK_FORMAT_R4G4B4A4_UNORM_PACK16,
VK_FORMAT_D32_SFLOAT,
VK_FORMAT_D16_UNORM,
VK_FORMAT_D16_UNORM_S8_UINT,
VK_FORMAT_D24_UNORM_S8_UINT,
VK_FORMAT_D32_SFLOAT_S8_UINT,
VK_FORMAT_BC1_RGBA_UNORM_BLOCK,
VK_FORMAT_BC2_UNORM_BLOCK,
VK_FORMAT_BC3_UNORM_BLOCK,
VK_FORMAT_BC4_UNORM_BLOCK,
VK_FORMAT_BC4_SNORM_BLOCK,
VK_FORMAT_BC5_UNORM_BLOCK,
VK_FORMAT_BC5_SNORM_BLOCK,
VK_FORMAT_BC7_UNORM_BLOCK,
VK_FORMAT_BC6H_UFLOAT_BLOCK,
VK_FORMAT_BC6H_SFLOAT_BLOCK,
VK_FORMAT_BC1_RGBA_SRGB_BLOCK,
VK_FORMAT_BC2_SRGB_BLOCK,
VK_FORMAT_BC3_SRGB_BLOCK,
VK_FORMAT_BC7_SRGB_BLOCK,
VK_FORMAT_ASTC_4x4_UNORM_BLOCK,
VK_FORMAT_ASTC_4x4_SRGB_BLOCK,
VK_FORMAT_ASTC_5x4_UNORM_BLOCK,
VK_FORMAT_ASTC_5x4_SRGB_BLOCK,
VK_FORMAT_ASTC_5x5_UNORM_BLOCK,
VK_FORMAT_ASTC_5x5_SRGB_BLOCK,
VK_FORMAT_ASTC_6x5_UNORM_BLOCK,
VK_FORMAT_ASTC_6x5_SRGB_BLOCK,
VK_FORMAT_ASTC_6x6_UNORM_BLOCK,
VK_FORMAT_ASTC_6x6_SRGB_BLOCK,
VK_FORMAT_ASTC_8x5_UNORM_BLOCK,
VK_FORMAT_ASTC_8x5_SRGB_BLOCK,
VK_FORMAT_ASTC_8x6_UNORM_BLOCK,
VK_FORMAT_ASTC_8x6_SRGB_BLOCK,
VK_FORMAT_ASTC_8x8_UNORM_BLOCK,
VK_FORMAT_ASTC_8x8_SRGB_BLOCK,
VK_FORMAT_ASTC_10x5_UNORM_BLOCK,
VK_FORMAT_ASTC_10x5_SRGB_BLOCK,
VK_FORMAT_ASTC_10x6_UNORM_BLOCK,
VK_FORMAT_ASTC_10x6_SRGB_BLOCK,
VK_FORMAT_ASTC_10x8_UNORM_BLOCK,
VK_FORMAT_ASTC_10x8_SRGB_BLOCK,
VK_FORMAT_ASTC_10x10_UNORM_BLOCK,
VK_FORMAT_ASTC_10x10_SRGB_BLOCK,
VK_FORMAT_ASTC_12x10_UNORM_BLOCK,
VK_FORMAT_ASTC_12x10_SRGB_BLOCK,
VK_FORMAT_ASTC_12x12_UNORM_BLOCK,
VK_FORMAT_ASTC_12x12_SRGB_BLOCK,
VK_FORMAT_ASTC_8x6_UNORM_BLOCK,
VK_FORMAT_ASTC_8x6_SRGB_BLOCK,
VK_FORMAT_ASTC_6x5_UNORM_BLOCK,
VK_FORMAT_ASTC_6x5_SRGB_BLOCK,
VK_FORMAT_E5B9G9R9_UFLOAT_PACK32,
};
std::unordered_map<VkFormat, VkFormatProperties> format_properties;
for (const auto format : formats) {
format_properties.emplace(format, physical.GetFormatProperties(format));
}
return format_properties;
}
} // Anonymous namespace
Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR surface,
const vk::InstanceDispatch& dld_)
: instance{instance_}, dld{dld_}, physical{physical_}, properties{physical.GetProperties()},
format_properties{GetFormatProperties(physical, dld)} {
CheckSuitability();
SetupFamilies(surface);
SetupFeatures();
const auto queue_cis = GetDeviceQueueCreateInfos();
const std::vector extensions = LoadExtensions();
VkPhysicalDeviceFeatures2 features2{
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
.pNext = nullptr,
};
const void* first_next = &features2;
void** next = &features2.pNext;
features2.features = {
.robustBufferAccess = false,
.fullDrawIndexUint32 = false,
.imageCubeArray = true,
.independentBlend = true,
.geometryShader = true,
.tessellationShader = true,
.sampleRateShading = false,
.dualSrcBlend = false,
.logicOp = false,
.multiDrawIndirect = false,
.drawIndirectFirstInstance = false,
.depthClamp = true,
.depthBiasClamp = true,
.fillModeNonSolid = false,
.depthBounds = false,
.wideLines = false,
.largePoints = true,
.alphaToOne = false,
.multiViewport = true,
.samplerAnisotropy = true,
.textureCompressionETC2 = false,
.textureCompressionASTC_LDR = is_optimal_astc_supported,
.textureCompressionBC = false,
.occlusionQueryPrecise = true,
.pipelineStatisticsQuery = false,
.vertexPipelineStoresAndAtomics = true,
.fragmentStoresAndAtomics = true,
.shaderTessellationAndGeometryPointSize = false,
.shaderImageGatherExtended = true,
.shaderStorageImageExtendedFormats = false,
.shaderStorageImageMultisample = true,
.shaderStorageImageReadWithoutFormat = is_formatless_image_load_supported,
.shaderStorageImageWriteWithoutFormat = true,
.shaderUniformBufferArrayDynamicIndexing = false,
.shaderSampledImageArrayDynamicIndexing = false,
.shaderStorageBufferArrayDynamicIndexing = false,
.shaderStorageImageArrayDynamicIndexing = false,
.shaderClipDistance = false,
.shaderCullDistance = false,
.shaderFloat64 = false,
.shaderInt64 = false,
.shaderInt16 = false,
.shaderResourceResidency = false,
.shaderResourceMinLod = false,
.sparseBinding = false,
.sparseResidencyBuffer = false,
.sparseResidencyImage2D = false,
.sparseResidencyImage3D = false,
.sparseResidency2Samples = false,
.sparseResidency4Samples = false,
.sparseResidency8Samples = false,
.sparseResidency16Samples = false,
.sparseResidencyAliased = false,
.variableMultisampleRate = false,
.inheritedQueries = false,
};
VkPhysicalDeviceTimelineSemaphoreFeaturesKHR timeline_semaphore{
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR,
.pNext = nullptr,
.timelineSemaphore = true,
};
SetNext(next, timeline_semaphore);
VkPhysicalDevice16BitStorageFeaturesKHR bit16_storage{
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR,
.pNext = nullptr,
.storageBuffer16BitAccess = false,
.uniformAndStorageBuffer16BitAccess = true,
.storagePushConstant16 = false,
.storageInputOutput16 = false,
};
SetNext(next, bit16_storage);
VkPhysicalDevice8BitStorageFeaturesKHR bit8_storage{
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR,
.pNext = nullptr,
.storageBuffer8BitAccess = false,
.uniformAndStorageBuffer8BitAccess = true,
.storagePushConstant8 = false,
};
SetNext(next, bit8_storage);
VkPhysicalDeviceHostQueryResetFeaturesEXT host_query_reset{
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT,
.hostQueryReset = true,
};
SetNext(next, host_query_reset);
VkPhysicalDeviceFloat16Int8FeaturesKHR float16_int8;
if (is_float16_supported) {
float16_int8 = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR,
.pNext = nullptr,
.shaderFloat16 = true,
.shaderInt8 = false,
};
SetNext(next, float16_int8);
} else {
LOG_INFO(Render_Vulkan, "Device doesn't support float16 natively");
}
if (!nv_viewport_swizzle) {
LOG_INFO(Render_Vulkan, "Device doesn't support viewport swizzles");
}
VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR std430_layout;
if (khr_uniform_buffer_standard_layout) {
std430_layout = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR,
.pNext = nullptr,
.uniformBufferStandardLayout = true,
};
SetNext(next, std430_layout);
} else {
LOG_INFO(Render_Vulkan, "Device doesn't support packed UBOs");
}
VkPhysicalDeviceIndexTypeUint8FeaturesEXT index_type_uint8;
if (ext_index_type_uint8) {
index_type_uint8 = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT,
.pNext = nullptr,
.indexTypeUint8 = true,
};
SetNext(next, index_type_uint8);
} else {
LOG_INFO(Render_Vulkan, "Device doesn't support uint8 indexes");
}
VkPhysicalDeviceTransformFeedbackFeaturesEXT transform_feedback;
if (ext_transform_feedback) {
transform_feedback = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT,
.pNext = nullptr,
.transformFeedback = true,
.geometryStreams = true,
};
SetNext(next, transform_feedback);
} else {
LOG_INFO(Render_Vulkan, "Device doesn't support transform feedbacks");
}
VkPhysicalDeviceCustomBorderColorFeaturesEXT custom_border;
if (ext_custom_border_color) {
custom_border = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT,
.pNext = nullptr,
.customBorderColors = VK_TRUE,
.customBorderColorWithoutFormat = VK_TRUE,
};
SetNext(next, custom_border);
} else {
LOG_INFO(Render_Vulkan, "Device doesn't support custom border colors");
}
VkPhysicalDeviceExtendedDynamicStateFeaturesEXT dynamic_state;
if (ext_extended_dynamic_state) {
dynamic_state = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT,
.pNext = nullptr,
.extendedDynamicState = VK_TRUE,
};
SetNext(next, dynamic_state);
} else {
LOG_INFO(Render_Vulkan, "Device doesn't support extended dynamic state");
}
VkPhysicalDeviceRobustness2FeaturesEXT robustness2;
if (ext_robustness2) {
robustness2 = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT,
.pNext = nullptr,
.robustBufferAccess2 = false,
.robustImageAccess2 = true,
.nullDescriptor = true,
};
SetNext(next, robustness2);
} else {
LOG_INFO(Render_Vulkan, "Device doesn't support robustness2");
}
if (!ext_depth_range_unrestricted) {
LOG_INFO(Render_Vulkan, "Device doesn't support depth range unrestricted");
}
VkDeviceDiagnosticsConfigCreateInfoNV diagnostics_nv;
if (nv_device_diagnostics_config) {
nsight_aftermath_tracker.Initialize();
diagnostics_nv = {
.sType = VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV,
.pNext = &features2,
.flags = VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_DEBUG_INFO_BIT_NV |
VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_RESOURCE_TRACKING_BIT_NV |
VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_AUTOMATIC_CHECKPOINTS_BIT_NV,
};
first_next = &diagnostics_nv;
}
logical = vk::Device::Create(physical, queue_cis, extensions, first_next, dld);
CollectTelemetryParameters();
CollectToolingInfo();
if (ext_extended_dynamic_state && driver_id == VK_DRIVER_ID_MESA_RADV) {
LOG_WARNING(
Render_Vulkan,
"Blacklisting RADV for VK_EXT_extended_dynamic state, likely due to a bug in yuzu");
ext_extended_dynamic_state = false;
}
if (ext_extended_dynamic_state && IsRDNA(properties.deviceName, driver_id)) {
// AMD's proprietary driver supports VK_EXT_extended_dynamic_state but on RDNA devices it
// seems to cause stability issues
LOG_WARNING(
Render_Vulkan,
"Blacklisting AMD proprietary on RDNA devices from VK_EXT_extended_dynamic_state");
ext_extended_dynamic_state = false;
}
graphics_queue = logical.GetQueue(graphics_family);
present_queue = logical.GetQueue(present_family);
use_asynchronous_shaders = Settings::values.use_asynchronous_shaders.GetValue();
}
Device::~Device() = default;
VkFormat Device::GetSupportedFormat(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
FormatType format_type) const {
if (IsFormatSupported(wanted_format, wanted_usage, format_type)) {
return wanted_format;
}
// The wanted format is not supported by hardware, search for alternatives
const VkFormat* alternatives = GetFormatAlternatives(wanted_format);
if (alternatives == nullptr) {
UNREACHABLE_MSG("Format={} with usage={} and type={} has no defined alternatives and host "
"hardware does not support it",
wanted_format, wanted_usage, format_type);
return wanted_format;
}
std::size_t i = 0;
for (VkFormat alternative = *alternatives; alternative; alternative = alternatives[++i]) {
if (!IsFormatSupported(alternative, wanted_usage, format_type)) {
continue;
}
LOG_WARNING(Render_Vulkan,
"Emulating format={} with alternative format={} with usage={} and type={}",
wanted_format, alternative, wanted_usage, format_type);
return alternative;
}
// No alternatives found, panic
UNREACHABLE_MSG("Format={} with usage={} and type={} is not supported by the host hardware and "
"doesn't support any of the alternatives",
wanted_format, wanted_usage, format_type);
return wanted_format;
}
void Device::ReportLoss() const {
LOG_CRITICAL(Render_Vulkan, "Device loss occured!");
// Wait for the log to flush and for Nsight Aftermath to dump the results
std::this_thread::sleep_for(std::chrono::seconds{15});
}
void Device::SaveShader(const std::vector<u32>& spirv) const {
nsight_aftermath_tracker.SaveShader(spirv);
}
bool Device::IsOptimalAstcSupported(const VkPhysicalDeviceFeatures& features) const {
// Disable for now to avoid converting ASTC twice.
static constexpr std::array astc_formats = {
VK_FORMAT_ASTC_4x4_UNORM_BLOCK, VK_FORMAT_ASTC_4x4_SRGB_BLOCK,
VK_FORMAT_ASTC_5x4_UNORM_BLOCK, VK_FORMAT_ASTC_5x4_SRGB_BLOCK,
VK_FORMAT_ASTC_5x5_UNORM_BLOCK, VK_FORMAT_ASTC_5x5_SRGB_BLOCK,
VK_FORMAT_ASTC_6x5_UNORM_BLOCK, VK_FORMAT_ASTC_6x5_SRGB_BLOCK,
VK_FORMAT_ASTC_6x6_UNORM_BLOCK, VK_FORMAT_ASTC_6x6_SRGB_BLOCK,
VK_FORMAT_ASTC_8x5_UNORM_BLOCK, VK_FORMAT_ASTC_8x5_SRGB_BLOCK,
VK_FORMAT_ASTC_8x6_UNORM_BLOCK, VK_FORMAT_ASTC_8x6_SRGB_BLOCK,
VK_FORMAT_ASTC_8x8_UNORM_BLOCK, VK_FORMAT_ASTC_8x8_SRGB_BLOCK,
VK_FORMAT_ASTC_10x5_UNORM_BLOCK, VK_FORMAT_ASTC_10x5_SRGB_BLOCK,
VK_FORMAT_ASTC_10x6_UNORM_BLOCK, VK_FORMAT_ASTC_10x6_SRGB_BLOCK,
VK_FORMAT_ASTC_10x8_UNORM_BLOCK, VK_FORMAT_ASTC_10x8_SRGB_BLOCK,
VK_FORMAT_ASTC_10x10_UNORM_BLOCK, VK_FORMAT_ASTC_10x10_SRGB_BLOCK,
VK_FORMAT_ASTC_12x10_UNORM_BLOCK, VK_FORMAT_ASTC_12x10_SRGB_BLOCK,
VK_FORMAT_ASTC_12x12_UNORM_BLOCK, VK_FORMAT_ASTC_12x12_SRGB_BLOCK,
};
if (!features.textureCompressionASTC_LDR) {
return false;
}
const auto format_feature_usage{
VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | VK_FORMAT_FEATURE_BLIT_SRC_BIT |
VK_FORMAT_FEATURE_BLIT_DST_BIT | VK_FORMAT_FEATURE_TRANSFER_SRC_BIT |
VK_FORMAT_FEATURE_TRANSFER_DST_BIT};
for (const auto format : astc_formats) {
const auto physical_format_properties{physical.GetFormatProperties(format)};
if ((physical_format_properties.optimalTilingFeatures & format_feature_usage) == 0) {
return false;
}
}
return true;
}
bool Device::TestDepthStencilBlits() const {
static constexpr VkFormatFeatureFlags required_features =
VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT;
const auto test_features = [](VkFormatProperties props) {
return (props.optimalTilingFeatures & required_features) == required_features;
};
return test_features(format_properties.at(VK_FORMAT_D32_SFLOAT_S8_UINT)) &&
test_features(format_properties.at(VK_FORMAT_D24_UNORM_S8_UINT));
}
bool Device::IsFormatSupported(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
FormatType format_type) const {
const auto it = format_properties.find(wanted_format);
if (it == format_properties.end()) {
UNIMPLEMENTED_MSG("Unimplemented format query={}", wanted_format);
return true;
}
const auto supported_usage = GetFormatFeatures(it->second, format_type);
return (supported_usage & wanted_usage) == wanted_usage;
}
void Device::CheckSuitability() const {
std::bitset<REQUIRED_EXTENSIONS.size()> available_extensions;
for (const VkExtensionProperties& property : physical.EnumerateDeviceExtensionProperties()) {
for (std::size_t i = 0; i < REQUIRED_EXTENSIONS.size(); ++i) {
if (available_extensions[i]) {
continue;
}
const std::string_view name{property.extensionName};
available_extensions[i] = name == REQUIRED_EXTENSIONS[i];
}
}
for (size_t i = 0; i < REQUIRED_EXTENSIONS.size(); ++i) {
if (available_extensions[i]) {
continue;
}
LOG_ERROR(Render_Vulkan, "Missing required extension: {}", REQUIRED_EXTENSIONS[i]);
throw vk::Exception(VK_ERROR_EXTENSION_NOT_PRESENT);
}
struct LimitTuple {
u32 minimum;
u32 value;
const char* name;
};
const VkPhysicalDeviceLimits& limits{properties.limits};
const std::array limits_report{
LimitTuple{65536, limits.maxUniformBufferRange, "maxUniformBufferRange"},
LimitTuple{16, limits.maxViewports, "maxViewports"},
LimitTuple{8, limits.maxColorAttachments, "maxColorAttachments"},
LimitTuple{8, limits.maxClipDistances, "maxClipDistances"},
};
for (const auto& tuple : limits_report) {
if (tuple.value < tuple.minimum) {
LOG_ERROR(Render_Vulkan, "{} has to be {} or greater but it is {}", tuple.name,
tuple.minimum, tuple.value);
throw vk::Exception(VK_ERROR_FEATURE_NOT_PRESENT);
}
}
const VkPhysicalDeviceFeatures features{physical.GetFeatures()};
const std::array feature_report{
std::make_pair(features.vertexPipelineStoresAndAtomics, "vertexPipelineStoresAndAtomics"),
std::make_pair(features.imageCubeArray, "imageCubeArray"),
std::make_pair(features.independentBlend, "independentBlend"),
std::make_pair(features.depthClamp, "depthClamp"),
std::make_pair(features.samplerAnisotropy, "samplerAnisotropy"),
std::make_pair(features.largePoints, "largePoints"),
std::make_pair(features.multiViewport, "multiViewport"),
std::make_pair(features.depthBiasClamp, "depthBiasClamp"),
std::make_pair(features.geometryShader, "geometryShader"),
std::make_pair(features.tessellationShader, "tessellationShader"),
std::make_pair(features.occlusionQueryPrecise, "occlusionQueryPrecise"),
std::make_pair(features.fragmentStoresAndAtomics, "fragmentStoresAndAtomics"),
std::make_pair(features.shaderImageGatherExtended, "shaderImageGatherExtended"),
std::make_pair(features.shaderStorageImageMultisample, "shaderStorageImageMultisample"),
std::make_pair(features.shaderStorageImageWriteWithoutFormat,
"shaderStorageImageWriteWithoutFormat"),
};
for (const auto& [is_supported, name] : feature_report) {
if (is_supported) {
continue;
}
LOG_ERROR(Render_Vulkan, "Missing required feature: {}", name);
throw vk::Exception(VK_ERROR_FEATURE_NOT_PRESENT);
}
}
std::vector<const char*> Device::LoadExtensions() {
std::vector<const char*> extensions;
extensions.reserve(7 + REQUIRED_EXTENSIONS.size());
extensions.insert(extensions.begin(), REQUIRED_EXTENSIONS.begin(), REQUIRED_EXTENSIONS.end());
bool has_khr_shader_float16_int8{};
bool has_ext_subgroup_size_control{};
bool has_ext_transform_feedback{};
bool has_ext_custom_border_color{};
bool has_ext_extended_dynamic_state{};
bool has_ext_robustness2{};
for (const VkExtensionProperties& extension : physical.EnumerateDeviceExtensionProperties()) {
const auto test = [&](std::optional<std::reference_wrapper<bool>> status, const char* name,
bool push) {
if (extension.extensionName != std::string_view(name)) {
return;
}
if (push) {
extensions.push_back(name);
}
if (status) {
status->get() = true;
}
};
test(nv_viewport_swizzle, VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME, true);
test(khr_uniform_buffer_standard_layout,
VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME, true);
test(has_khr_shader_float16_int8, VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME, false);
test(ext_depth_range_unrestricted, VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME, true);
test(ext_index_type_uint8, VK_EXT_INDEX_TYPE_UINT8_EXTENSION_NAME, true);
test(ext_sampler_filter_minmax, VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME, true);
test(ext_shader_viewport_index_layer, VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME,
true);
test(ext_tooling_info, VK_EXT_TOOLING_INFO_EXTENSION_NAME, true);
test(ext_shader_stencil_export, VK_EXT_SHADER_STENCIL_EXPORT_EXTENSION_NAME, true);
test(has_ext_transform_feedback, VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME, false);
test(has_ext_custom_border_color, VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME, false);
test(has_ext_extended_dynamic_state, VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME, false);
test(has_ext_robustness2, VK_EXT_ROBUSTNESS_2_EXTENSION_NAME, false);
test(has_ext_subgroup_size_control, VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME, false);
if (Settings::values.renderer_debug) {
test(nv_device_diagnostics_config, VK_NV_DEVICE_DIAGNOSTICS_CONFIG_EXTENSION_NAME,
true);
}
}
VkPhysicalDeviceFeatures2KHR features;
features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR;
VkPhysicalDeviceProperties2KHR physical_properties;
physical_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR;
if (has_khr_shader_float16_int8) {
VkPhysicalDeviceFloat16Int8FeaturesKHR float16_int8_features;
float16_int8_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR;
float16_int8_features.pNext = nullptr;
features.pNext = &float16_int8_features;
physical.GetFeatures2KHR(features);
is_float16_supported = float16_int8_features.shaderFloat16;
extensions.push_back(VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME);
}
if (has_ext_subgroup_size_control) {
VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroup_features;
subgroup_features.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
subgroup_features.pNext = nullptr;
features.pNext = &subgroup_features;
physical.GetFeatures2KHR(features);
VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroup_properties;
subgroup_properties.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
subgroup_properties.pNext = nullptr;
physical_properties.pNext = &subgroup_properties;
physical.GetProperties2KHR(physical_properties);
is_warp_potentially_bigger = subgroup_properties.maxSubgroupSize > GuestWarpSize;
if (subgroup_features.subgroupSizeControl &&
subgroup_properties.minSubgroupSize <= GuestWarpSize &&
subgroup_properties.maxSubgroupSize >= GuestWarpSize) {
extensions.push_back(VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME);
guest_warp_stages = subgroup_properties.requiredSubgroupSizeStages;
}
} else {
is_warp_potentially_bigger = true;
}
if (has_ext_transform_feedback) {
VkPhysicalDeviceTransformFeedbackFeaturesEXT tfb_features;
tfb_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT;
tfb_features.pNext = nullptr;
features.pNext = &tfb_features;
physical.GetFeatures2KHR(features);
VkPhysicalDeviceTransformFeedbackPropertiesEXT tfb_properties;
tfb_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT;
tfb_properties.pNext = nullptr;
physical_properties.pNext = &tfb_properties;
physical.GetProperties2KHR(physical_properties);
if (tfb_features.transformFeedback && tfb_features.geometryStreams &&
tfb_properties.maxTransformFeedbackStreams >= 4 &&
tfb_properties.maxTransformFeedbackBuffers && tfb_properties.transformFeedbackQueries &&
tfb_properties.transformFeedbackDraw) {
extensions.push_back(VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME);
ext_transform_feedback = true;
}
}
if (has_ext_custom_border_color) {
VkPhysicalDeviceCustomBorderColorFeaturesEXT border_features;
border_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT;
border_features.pNext = nullptr;
features.pNext = &border_features;
physical.GetFeatures2KHR(features);
if (border_features.customBorderColors && border_features.customBorderColorWithoutFormat) {
extensions.push_back(VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME);
ext_custom_border_color = true;
}
}
if (has_ext_extended_dynamic_state) {
VkPhysicalDeviceExtendedDynamicStateFeaturesEXT dynamic_state;
dynamic_state.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT;
dynamic_state.pNext = nullptr;
features.pNext = &dynamic_state;
physical.GetFeatures2KHR(features);
if (dynamic_state.extendedDynamicState) {
extensions.push_back(VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME);
ext_extended_dynamic_state = true;
}
}
if (has_ext_robustness2) {
VkPhysicalDeviceRobustness2FeaturesEXT robustness2;
robustness2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT;
robustness2.pNext = nullptr;
features.pNext = &robustness2;
physical.GetFeatures2KHR(features);
if (robustness2.nullDescriptor && robustness2.robustImageAccess2) {
extensions.push_back(VK_EXT_ROBUSTNESS_2_EXTENSION_NAME);
ext_robustness2 = true;
}
}
return extensions;
}
void Device::SetupFamilies(VkSurfaceKHR surface) {
const std::vector queue_family_properties = physical.GetQueueFamilyProperties();
std::optional<u32> graphics;
std::optional<u32> present;
for (u32 index = 0; index < static_cast<u32>(queue_family_properties.size()); ++index) {
if (graphics && present) {
break;
}
const VkQueueFamilyProperties& queue_family = queue_family_properties[index];
if (queue_family.queueCount == 0) {
continue;
}
if (queue_family.queueFlags & VK_QUEUE_GRAPHICS_BIT) {
graphics = index;
}
if (physical.GetSurfaceSupportKHR(index, surface)) {
present = index;
}
}
if (!graphics) {
LOG_ERROR(Render_Vulkan, "Device lacks a graphics queue");
throw vk::Exception(VK_ERROR_FEATURE_NOT_PRESENT);
}
if (!present) {
LOG_ERROR(Render_Vulkan, "Device lacks a present queue");
throw vk::Exception(VK_ERROR_FEATURE_NOT_PRESENT);
}
graphics_family = *graphics;
present_family = *present;
}
void Device::SetupFeatures() {
const auto supported_features{physical.GetFeatures()};
is_formatless_image_load_supported = supported_features.shaderStorageImageReadWithoutFormat;
is_blit_depth_stencil_supported = TestDepthStencilBlits();
is_optimal_astc_supported = IsOptimalAstcSupported(supported_features);
}
void Device::CollectTelemetryParameters() {
VkPhysicalDeviceDriverPropertiesKHR driver{
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR,
.pNext = nullptr,
.driverID = {},
.driverName = {},
.driverInfo = {},
.conformanceVersion = {},
};
VkPhysicalDeviceProperties2KHR device_properties{
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR,
.pNext = &driver,
.properties = {},
};
physical.GetProperties2KHR(device_properties);
driver_id = driver.driverID;
vendor_name = driver.driverName;
const std::vector extensions = physical.EnumerateDeviceExtensionProperties();
reported_extensions.reserve(std::size(extensions));
for (const auto& extension : extensions) {
reported_extensions.emplace_back(extension.extensionName);
}
}
void Device::CollectToolingInfo() {
if (!ext_tooling_info) {
return;
}
const auto vkGetPhysicalDeviceToolPropertiesEXT =
reinterpret_cast<PFN_vkGetPhysicalDeviceToolPropertiesEXT>(
dld.vkGetInstanceProcAddr(instance, "vkGetPhysicalDeviceToolPropertiesEXT"));
if (!vkGetPhysicalDeviceToolPropertiesEXT) {
return;
}
u32 tool_count = 0;
if (vkGetPhysicalDeviceToolPropertiesEXT(physical, &tool_count, nullptr) != VK_SUCCESS) {
return;
}
std::vector<VkPhysicalDeviceToolPropertiesEXT> tools(tool_count);
if (vkGetPhysicalDeviceToolPropertiesEXT(physical, &tool_count, tools.data()) != VK_SUCCESS) {
return;
}
for (const VkPhysicalDeviceToolPropertiesEXT& tool : tools) {
const std::string_view name = tool.name;
LOG_INFO(Render_Vulkan, "{}", name);
has_renderdoc = has_renderdoc || name == "RenderDoc";
has_nsight_graphics = has_nsight_graphics || name == "NVIDIA Nsight Graphics";
}
}
std::vector<VkDeviceQueueCreateInfo> Device::GetDeviceQueueCreateInfos() const {
static constexpr float QUEUE_PRIORITY = 1.0f;
std::unordered_set<u32> unique_queue_families{graphics_family, present_family};
std::vector<VkDeviceQueueCreateInfo> queue_cis;
queue_cis.reserve(unique_queue_families.size());
for (const u32 queue_family : unique_queue_families) {
auto& ci = queue_cis.emplace_back(VkDeviceQueueCreateInfo{
.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.queueFamilyIndex = queue_family,
.queueCount = 1,
.pQueuePriorities = nullptr,
});
ci.pQueuePriorities = &QUEUE_PRIORITY;
}
return queue_cis;
}
} // namespace Vulkan

View File

@@ -1,306 +0,0 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include <string_view>
#include <unordered_map>
#include <vector>
#include "common/common_types.h"
#include "video_core/renderer_vulkan/nsight_aftermath_tracker.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
/// Format usage descriptor.
enum class FormatType { Linear, Optimal, Buffer };
/// Subgroup size of the guest emulated hardware (Nvidia has 32 threads per subgroup).
const u32 GuestWarpSize = 32;
/// Handles data specific to a physical device.
class Device final {
public:
explicit Device(VkInstance instance, vk::PhysicalDevice physical, VkSurfaceKHR surface,
const vk::InstanceDispatch& dld);
~Device();
/**
* Returns a format supported by the device for the passed requeriments.
* @param wanted_format The ideal format to be returned. It may not be the returned format.
* @param wanted_usage The usage that must be fulfilled even if the format is not supported.
* @param format_type Format type usage.
* @returns A format supported by the device.
*/
VkFormat GetSupportedFormat(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
FormatType format_type) const;
/// Reports a device loss.
void ReportLoss() const;
/// Reports a shader to Nsight Aftermath.
void SaveShader(const std::vector<u32>& spirv) const;
/// Returns the dispatch loader with direct function pointers of the device.
const vk::DeviceDispatch& GetDispatchLoader() const {
return dld;
}
/// Returns the logical device.
const vk::Device& GetLogical() const {
return logical;
}
/// Returns the physical device.
vk::PhysicalDevice GetPhysical() const {
return physical;
}
/// Returns the main graphics queue.
vk::Queue GetGraphicsQueue() const {
return graphics_queue;
}
/// Returns the main present queue.
vk::Queue GetPresentQueue() const {
return present_queue;
}
/// Returns main graphics queue family index.
u32 GetGraphicsFamily() const {
return graphics_family;
}
/// Returns main present queue family index.
u32 GetPresentFamily() const {
return present_family;
}
/// Returns the current Vulkan API version provided in Vulkan-formatted version numbers.
u32 ApiVersion() const {
return properties.apiVersion;
}
/// Returns the current driver version provided in Vulkan-formatted version numbers.
u32 GetDriverVersion() const {
return properties.driverVersion;
}
/// Returns the device name.
std::string_view GetModelName() const {
return properties.deviceName;
}
/// Returns the driver ID.
VkDriverIdKHR GetDriverID() const {
return driver_id;
}
/// Returns uniform buffer alignment requeriment.
VkDeviceSize GetUniformBufferAlignment() const {
return properties.limits.minUniformBufferOffsetAlignment;
}
/// Returns storage alignment requeriment.
VkDeviceSize GetStorageBufferAlignment() const {
return properties.limits.minStorageBufferOffsetAlignment;
}
/// Returns the maximum range for storage buffers.
VkDeviceSize GetMaxStorageBufferRange() const {
return properties.limits.maxStorageBufferRange;
}
/// Returns the maximum size for push constants.
VkDeviceSize GetMaxPushConstantsSize() const {
return properties.limits.maxPushConstantsSize;
}
/// Returns the maximum size for shared memory.
u32 GetMaxComputeSharedMemorySize() const {
return properties.limits.maxComputeSharedMemorySize;
}
/// Returns true if ASTC is natively supported.
bool IsOptimalAstcSupported() const {
return is_optimal_astc_supported;
}
/// Returns true if the device supports float16 natively
bool IsFloat16Supported() const {
return is_float16_supported;
}
/// Returns true if the device warp size can potentially be bigger than guest's warp size.
bool IsWarpSizePotentiallyBiggerThanGuest() const {
return is_warp_potentially_bigger;
}
/// Returns true if the device can be forced to use the guest warp size.
bool IsGuestWarpSizeSupported(VkShaderStageFlagBits stage) const {
return guest_warp_stages & stage;
}
/// Returns true if formatless image load is supported.
bool IsFormatlessImageLoadSupported() const {
return is_formatless_image_load_supported;
}
/// Returns true when blitting from and to depth stencil images is supported.
bool IsBlitDepthStencilSupported() const {
return is_blit_depth_stencil_supported;
}
/// Returns true if the device supports VK_NV_viewport_swizzle.
bool IsNvViewportSwizzleSupported() const {
return nv_viewport_swizzle;
}
/// Returns true if the device supports VK_EXT_scalar_block_layout.
bool IsKhrUniformBufferStandardLayoutSupported() const {
return khr_uniform_buffer_standard_layout;
}
/// Returns true if the device supports VK_EXT_index_type_uint8.
bool IsExtIndexTypeUint8Supported() const {
return ext_index_type_uint8;
}
/// Returns true if the device supports VK_EXT_sampler_filter_minmax.
bool IsExtSamplerFilterMinmaxSupported() const {
return ext_sampler_filter_minmax;
}
/// Returns true if the device supports VK_EXT_depth_range_unrestricted.
bool IsExtDepthRangeUnrestrictedSupported() const {
return ext_depth_range_unrestricted;
}
/// Returns true if the device supports VK_EXT_shader_viewport_index_layer.
bool IsExtShaderViewportIndexLayerSupported() const {
return ext_shader_viewport_index_layer;
}
/// Returns true if the device supports VK_EXT_transform_feedback.
bool IsExtTransformFeedbackSupported() const {
return ext_transform_feedback;
}
/// Returns true if the device supports VK_EXT_custom_border_color.
bool IsExtCustomBorderColorSupported() const {
return ext_custom_border_color;
}
/// Returns true if the device supports VK_EXT_extended_dynamic_state.
bool IsExtExtendedDynamicStateSupported() const {
return ext_extended_dynamic_state;
}
/// Returns true if the device supports VK_EXT_shader_stencil_export.
bool IsExtShaderStencilExportSupported() const {
return ext_shader_stencil_export;
}
/// Returns true when a known debugging tool is attached.
bool HasDebuggingToolAttached() const {
return has_renderdoc || has_nsight_graphics;
}
/// Returns the vendor name reported from Vulkan.
std::string_view GetVendorName() const {
return vendor_name;
}
/// Returns the list of available extensions.
const std::vector<std::string>& GetAvailableExtensions() const {
return reported_extensions;
}
/// Returns true if the setting for async shader compilation is enabled.
bool UseAsynchronousShaders() const {
return use_asynchronous_shaders;
}
private:
/// Checks if the physical device is suitable.
void CheckSuitability() const;
/// Loads extensions into a vector and stores available ones in this object.
std::vector<const char*> LoadExtensions();
/// Sets up queue families.
void SetupFamilies(VkSurfaceKHR surface);
/// Sets up device features.
void SetupFeatures();
/// Collects telemetry information from the device.
void CollectTelemetryParameters();
/// Collects information about attached tools.
void CollectToolingInfo();
/// Returns a list of queue initialization descriptors.
std::vector<VkDeviceQueueCreateInfo> GetDeviceQueueCreateInfos() const;
/// Returns true if ASTC textures are natively supported.
bool IsOptimalAstcSupported(const VkPhysicalDeviceFeatures& features) const;
/// Returns true if the device natively supports blitting depth stencil images.
bool TestDepthStencilBlits() const;
/// Returns true if a format is supported.
bool IsFormatSupported(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
FormatType format_type) const;
VkInstance instance; ///< Vulkan instance.
vk::DeviceDispatch dld; ///< Device function pointers.
vk::PhysicalDevice physical; ///< Physical device.
VkPhysicalDeviceProperties properties; ///< Device properties.
vk::Device logical; ///< Logical device.
vk::Queue graphics_queue; ///< Main graphics queue.
vk::Queue present_queue; ///< Main present queue.
u32 instance_version{}; ///< Vulkan onstance version.
u32 graphics_family{}; ///< Main graphics queue family index.
u32 present_family{}; ///< Main present queue family index.
VkDriverIdKHR driver_id{}; ///< Driver ID.
VkShaderStageFlags guest_warp_stages{}; ///< Stages where the guest warp size can be forced.ed
bool is_optimal_astc_supported{}; ///< Support for native ASTC.
bool is_float16_supported{}; ///< Support for float16 arithmetics.
bool is_warp_potentially_bigger{}; ///< Host warp size can be bigger than guest.
bool is_formatless_image_load_supported{}; ///< Support for shader image read without format.
bool is_blit_depth_stencil_supported{}; ///< Support for blitting from and to depth stencil.
bool nv_viewport_swizzle{}; ///< Support for VK_NV_viewport_swizzle.
bool khr_uniform_buffer_standard_layout{}; ///< Support for std430 on UBOs.
bool ext_index_type_uint8{}; ///< Support for VK_EXT_index_type_uint8.
bool ext_sampler_filter_minmax{}; ///< Support for VK_EXT_sampler_filter_minmax.
bool ext_depth_range_unrestricted{}; ///< Support for VK_EXT_depth_range_unrestricted.
bool ext_shader_viewport_index_layer{}; ///< Support for VK_EXT_shader_viewport_index_layer.
bool ext_tooling_info{}; ///< Support for VK_EXT_tooling_info.
bool ext_transform_feedback{}; ///< Support for VK_EXT_transform_feedback.
bool ext_custom_border_color{}; ///< Support for VK_EXT_custom_border_color.
bool ext_extended_dynamic_state{}; ///< Support for VK_EXT_extended_dynamic_state.
bool ext_robustness2{}; ///< Support for VK_EXT_robustness2.
bool ext_shader_stencil_export{}; ///< Support for VK_EXT_shader_stencil_export.
bool nv_device_diagnostics_config{}; ///< Support for VK_NV_device_diagnostics_config.
bool has_renderdoc{}; ///< Has RenderDoc attached
bool has_nsight_graphics{}; ///< Has Nsight Graphics attached
// Asynchronous Graphics Pipeline setting
bool use_asynchronous_shaders{}; ///< Setting to use asynchronous shaders/graphics pipeline
// Telemetry parameters
std::string vendor_name; ///< Device's driver name.
std::vector<std::string> reported_extensions; ///< Reported Vulkan extensions.
/// Format properties dictionary.
std::unordered_map<VkFormat, VkFormatProperties> format_properties;
/// Nsight Aftermath GPU crash tracker
NsightAftermathTracker nsight_aftermath_tracker;
};
} // namespace Vulkan

View File

@@ -1,230 +0,0 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <optional>
#include <tuple>
#include <vector>
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "video_core/renderer_vulkan/vk_memory_manager.h"
#include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
namespace {
u64 GetAllocationChunkSize(u64 required_size) {
static constexpr u64 sizes[] = {16ULL << 20, 32ULL << 20, 64ULL << 20, 128ULL << 20};
auto it = std::lower_bound(std::begin(sizes), std::end(sizes), required_size);
return it != std::end(sizes) ? *it : Common::AlignUp(required_size, 256ULL << 20);
}
} // Anonymous namespace
class VKMemoryAllocation final {
public:
explicit VKMemoryAllocation(const Device& device_, vk::DeviceMemory memory_,
VkMemoryPropertyFlags properties_, u64 allocation_size_, u32 type_)
: device{device_}, memory{std::move(memory_)}, properties{properties_},
allocation_size{allocation_size_}, shifted_type{ShiftType(type_)} {}
VKMemoryCommit Commit(VkDeviceSize commit_size, VkDeviceSize alignment) {
auto found = TryFindFreeSection(free_iterator, allocation_size,
static_cast<u64>(commit_size), static_cast<u64>(alignment));
if (!found) {
found = TryFindFreeSection(0, free_iterator, static_cast<u64>(commit_size),
static_cast<u64>(alignment));
if (!found) {
// Signal out of memory, it'll try to do more allocations.
return nullptr;
}
}
auto commit = std::make_unique<VKMemoryCommitImpl>(device, this, memory, *found,
*found + commit_size);
commits.push_back(commit.get());
// Last commit's address is highly probable to be free.
free_iterator = *found + commit_size;
return commit;
}
void Free(const VKMemoryCommitImpl* commit) {
ASSERT(commit);
const auto it = std::find(std::begin(commits), std::end(commits), commit);
if (it == commits.end()) {
UNREACHABLE_MSG("Freeing unallocated commit!");
return;
}
commits.erase(it);
}
/// Returns whether this allocation is compatible with the arguments.
bool IsCompatible(VkMemoryPropertyFlags wanted_properties, u32 type_mask) const {
return (wanted_properties & properties) && (type_mask & shifted_type) != 0;
}
private:
static constexpr u32 ShiftType(u32 type) {
return 1U << type;
}
/// A memory allocator, it may return a free region between "start" and "end" with the solicited
/// requirements.
std::optional<u64> TryFindFreeSection(u64 start, u64 end, u64 size, u64 alignment) const {
u64 iterator = Common::AlignUp(start, alignment);
while (iterator + size <= end) {
const u64 try_left = iterator;
const u64 try_right = try_left + size;
bool overlap = false;
for (const auto& commit : commits) {
const auto [commit_left, commit_right] = commit->interval;
if (try_left < commit_right && commit_left < try_right) {
// There's an overlap, continue the search where the overlapping commit ends.
iterator = Common::AlignUp(commit_right, alignment);
overlap = true;
break;
}
}
if (!overlap) {
// A free address has been found.
return try_left;
}
}
// No free regions where found, return an empty optional.
return std::nullopt;
}
const Device& device; ///< Vulkan device.
const vk::DeviceMemory memory; ///< Vulkan memory allocation handler.
const VkMemoryPropertyFlags properties; ///< Vulkan properties.
const u64 allocation_size; ///< Size of this allocation.
const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted.
/// Hints where the next free region is likely going to be.
u64 free_iterator{};
/// Stores all commits done from this allocation.
std::vector<const VKMemoryCommitImpl*> commits;
};
VKMemoryManager::VKMemoryManager(const Device& device_)
: device{device_}, properties{device_.GetPhysical().GetMemoryProperties()} {}
VKMemoryManager::~VKMemoryManager() = default;
VKMemoryCommit VKMemoryManager::Commit(const VkMemoryRequirements& requirements,
bool host_visible) {
const u64 chunk_size = GetAllocationChunkSize(requirements.size);
// When a host visible commit is asked, search for host visible and coherent, otherwise search
// for a fast device local type.
const VkMemoryPropertyFlags wanted_properties =
host_visible ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
: VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
if (auto commit = TryAllocCommit(requirements, wanted_properties)) {
return commit;
}
// Commit has failed, allocate more memory.
if (!AllocMemory(wanted_properties, requirements.memoryTypeBits, chunk_size)) {
// TODO(Rodrigo): Handle these situations in some way like flushing to guest memory.
// Allocation has failed, panic.
UNREACHABLE_MSG("Ran out of VRAM!");
return {};
}
// Commit again, this time it won't fail since there's a fresh allocation above. If it does,
// there's a bug.
auto commit = TryAllocCommit(requirements, wanted_properties);
ASSERT(commit);
return commit;
}
VKMemoryCommit VKMemoryManager::Commit(const vk::Buffer& buffer, bool host_visible) {
auto commit = Commit(device.GetLogical().GetBufferMemoryRequirements(*buffer), host_visible);
buffer.BindMemory(commit->GetMemory(), commit->GetOffset());
return commit;
}
VKMemoryCommit VKMemoryManager::Commit(const vk::Image& image, bool host_visible) {
auto commit = Commit(device.GetLogical().GetImageMemoryRequirements(*image), host_visible);
image.BindMemory(commit->GetMemory(), commit->GetOffset());
return commit;
}
bool VKMemoryManager::AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask,
u64 size) {
const u32 type = [&] {
for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) {
const auto flags = properties.memoryTypes[type_index].propertyFlags;
if ((type_mask & (1U << type_index)) && (flags & wanted_properties)) {
// The type matches in type and in the wanted properties.
return type_index;
}
}
UNREACHABLE_MSG("Couldn't find a compatible memory type!");
return 0U;
}();
// Try to allocate found type.
vk::DeviceMemory memory = device.GetLogical().TryAllocateMemory({
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.pNext = nullptr,
.allocationSize = size,
.memoryTypeIndex = type,
});
if (!memory) {
LOG_CRITICAL(Render_Vulkan, "Device allocation failed!");
return false;
}
allocations.push_back(std::make_unique<VKMemoryAllocation>(device, std::move(memory),
wanted_properties, size, type));
return true;
}
VKMemoryCommit VKMemoryManager::TryAllocCommit(const VkMemoryRequirements& requirements,
VkMemoryPropertyFlags wanted_properties) {
for (auto& allocation : allocations) {
if (!allocation->IsCompatible(wanted_properties, requirements.memoryTypeBits)) {
continue;
}
if (auto commit = allocation->Commit(requirements.size, requirements.alignment)) {
return commit;
}
}
return {};
}
VKMemoryCommitImpl::VKMemoryCommitImpl(const Device& device_, VKMemoryAllocation* allocation_,
const vk::DeviceMemory& memory_, u64 begin_, u64 end_)
: device{device_}, memory{memory_}, interval{begin_, end_}, allocation{allocation_} {}
VKMemoryCommitImpl::~VKMemoryCommitImpl() {
allocation->Free(this);
}
MemoryMap VKMemoryCommitImpl::Map(u64 size, u64 offset_) const {
return MemoryMap(this, std::span<u8>(memory.Map(interval.first + offset_, size), size));
}
void VKMemoryCommitImpl::Unmap() const {
memory.Unmap();
}
MemoryMap VKMemoryCommitImpl::Map() const {
return Map(interval.second - interval.first);
}
} // namespace Vulkan

View File

@@ -1,132 +0,0 @@
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <span>
#include <utility>
#include <vector>
#include "common/common_types.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
class Device;
class MemoryMap;
class VKMemoryAllocation;
class VKMemoryCommitImpl;
using VKMemoryCommit = std::unique_ptr<VKMemoryCommitImpl>;
class VKMemoryManager final {
public:
explicit VKMemoryManager(const Device& device_);
VKMemoryManager(const VKMemoryManager&) = delete;
~VKMemoryManager();
/**
* Commits a memory with the specified requeriments.
* @param requirements Requirements returned from a Vulkan call.
* @param host_visible Signals the allocator that it *must* use host visible and coherent
* memory. When passing false, it will try to allocate device local memory.
* @returns A memory commit.
*/
VKMemoryCommit Commit(const VkMemoryRequirements& requirements, bool host_visible);
/// Commits memory required by the buffer and binds it.
VKMemoryCommit Commit(const vk::Buffer& buffer, bool host_visible);
/// Commits memory required by the image and binds it.
VKMemoryCommit Commit(const vk::Image& image, bool host_visible);
private:
/// Allocates a chunk of memory.
bool AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
/// Tries to allocate a memory commit.
VKMemoryCommit TryAllocCommit(const VkMemoryRequirements& requirements,
VkMemoryPropertyFlags wanted_properties);
const Device& device; ///< Device handler.
const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
std::vector<std::unique_ptr<VKMemoryAllocation>> allocations; ///< Current allocations.
};
class VKMemoryCommitImpl final {
friend VKMemoryAllocation;
friend MemoryMap;
public:
explicit VKMemoryCommitImpl(const Device& device_, VKMemoryAllocation* allocation_,
const vk::DeviceMemory& memory_, u64 begin_, u64 end_);
~VKMemoryCommitImpl();
/// Maps a memory region and returns a pointer to it.
/// It's illegal to have more than one memory map at the same time.
MemoryMap Map(u64 size, u64 offset = 0) const;
/// Maps the whole commit and returns a pointer to it.
/// It's illegal to have more than one memory map at the same time.
MemoryMap Map() const;
/// Returns the Vulkan memory handler.
VkDeviceMemory GetMemory() const {
return *memory;
}
/// Returns the start position of the commit relative to the allocation.
VkDeviceSize GetOffset() const {
return static_cast<VkDeviceSize>(interval.first);
}
private:
/// Unmaps memory.
void Unmap() const;
const Device& device; ///< Vulkan device.
const vk::DeviceMemory& memory; ///< Vulkan device memory handler.
std::pair<u64, u64> interval{}; ///< Interval where the commit exists.
VKMemoryAllocation* allocation{}; ///< Pointer to the large memory allocation.
};
/// Holds ownership of a memory map.
class MemoryMap final {
public:
explicit MemoryMap(const VKMemoryCommitImpl* commit_, std::span<u8> span_)
: commit{commit_}, span{span_} {}
~MemoryMap() {
if (commit) {
commit->Unmap();
}
}
/// Prematurely releases the memory map.
void Release() {
commit->Unmap();
commit = nullptr;
}
/// Returns a span to the memory map.
[[nodiscard]] std::span<u8> Span() const noexcept {
return span;
}
/// Returns the address of the memory map.
[[nodiscard]] u8* Address() const noexcept {
return span.data();
}
/// Returns the address of the memory map;
[[nodiscard]] operator u8*() const noexcept {
return span.data();
}
private:
const VKMemoryCommitImpl* commit{}; ///< Mapped memory commit.
std::span<u8> span; ///< Address to the mapped memory.
};
} // namespace Vulkan

File diff suppressed because it is too large Load Diff

View File

@@ -1,99 +0,0 @@
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <set>
#include <vector>
#include "common/common_types.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/engines/shader_type.h"
#include "video_core/shader/registry.h"
#include "video_core/shader/shader_ir.h"
namespace Vulkan {
class Device;
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
using UniformTexelEntry = VideoCommon::Shader::SamplerEntry;
using SamplerEntry = VideoCommon::Shader::SamplerEntry;
using StorageTexelEntry = VideoCommon::Shader::ImageEntry;
using ImageEntry = VideoCommon::Shader::ImageEntry;
constexpr u32 DESCRIPTOR_SET = 0;
class ConstBufferEntry : public VideoCommon::Shader::ConstBuffer {
public:
explicit constexpr ConstBufferEntry(const ConstBuffer& entry_, u32 index_)
: ConstBuffer{entry_}, index{index_} {}
constexpr u32 GetIndex() const {
return index;
}
private:
u32 index{};
};
struct GlobalBufferEntry {
u32 cbuf_index{};
u32 cbuf_offset{};
bool is_written{};
};
struct ShaderEntries {
u32 NumBindings() const {
return static_cast<u32>(const_buffers.size() + global_buffers.size() +
uniform_texels.size() + samplers.size() + storage_texels.size() +
images.size());
}
std::vector<ConstBufferEntry> const_buffers;
std::vector<GlobalBufferEntry> global_buffers;
std::vector<UniformTexelEntry> uniform_texels;
std::vector<SamplerEntry> samplers;
std::vector<StorageTexelEntry> storage_texels;
std::vector<ImageEntry> images;
std::set<u32> attributes;
std::array<bool, Maxwell::NumClipDistances> clip_distances{};
std::size_t shader_length{};
u32 enabled_uniform_buffers{};
bool uses_warps{};
};
struct Specialization final {
u32 base_binding{};
// Compute specific
std::array<u32, 3> workgroup_size{};
u32 shared_memory_size{};
// Graphics specific
std::optional<float> point_size;
std::bitset<Maxwell::NumVertexAttributes> enabled_attributes;
std::array<Maxwell::VertexAttribute::Type, Maxwell::NumVertexAttributes> attribute_types{};
bool ndc_minus_one_to_one{};
bool early_fragment_tests{};
float alpha_test_ref{};
Maxwell::ComparisonOp alpha_test_func{};
};
// Old gcc versions don't consider this trivially copyable.
// static_assert(std::is_trivially_copyable_v<Specialization>);
struct SPIRVShader {
std::vector<u32> code;
ShaderEntries entries;
};
ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir);
std::vector<u32> Decompile(const Device& device, const VideoCommon::Shader::ShaderIR& ir,
Tegra::Engines::ShaderType stage,
const VideoCommon::Shader::Registry& registry,
const Specialization& specialization);
} // namespace Vulkan

View File

@@ -1,168 +0,0 @@
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <limits>
#include <optional>
#include <tuple>
#include <vector>
#include "common/alignment.h"
#include "common/assert.h"
#include "common/literals.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_stream_buffer.h"
#include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
namespace {
using namespace Common::Literals;
constexpr VkBufferUsageFlags BUFFER_USAGE =
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
constexpr u64 WATCHES_INITIAL_RESERVE = 0x4000;
constexpr u64 WATCHES_RESERVE_CHUNK = 0x1000;
constexpr u64 PREFERRED_STREAM_BUFFER_SIZE = 256_MiB;
/// Find a memory type with the passed requirements
std::optional<u32> FindMemoryType(const VkPhysicalDeviceMemoryProperties& properties,
VkMemoryPropertyFlags wanted,
u32 filter = std::numeric_limits<u32>::max()) {
for (u32 i = 0; i < properties.memoryTypeCount; ++i) {
const auto flags = properties.memoryTypes[i].propertyFlags;
if ((flags & wanted) == wanted && (filter & (1U << i)) != 0) {
return i;
}
}
return std::nullopt;
}
/// Get the preferred host visible memory type.
u32 GetMemoryType(const VkPhysicalDeviceMemoryProperties& properties,
u32 filter = std::numeric_limits<u32>::max()) {
// Prefer device local host visible allocations. Both AMD and Nvidia now provide one.
// Otherwise search for a host visible allocation.
static constexpr auto HOST_MEMORY =
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
static constexpr auto DYNAMIC_MEMORY = HOST_MEMORY | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
std::optional preferred_type = FindMemoryType(properties, DYNAMIC_MEMORY);
if (!preferred_type) {
preferred_type = FindMemoryType(properties, HOST_MEMORY);
ASSERT_MSG(preferred_type, "No host visible and coherent memory type found");
}
return preferred_type.value_or(0);
}
} // Anonymous namespace
VKStreamBuffer::VKStreamBuffer(const Device& device_, VKScheduler& scheduler_)
: device{device_}, scheduler{scheduler_} {
CreateBuffers();
ReserveWatches(current_watches, WATCHES_INITIAL_RESERVE);
ReserveWatches(previous_watches, WATCHES_INITIAL_RESERVE);
}
VKStreamBuffer::~VKStreamBuffer() = default;
std::pair<u8*, u64> VKStreamBuffer::Map(u64 size, u64 alignment) {
ASSERT(size <= stream_buffer_size);
mapped_size = size;
if (alignment > 0) {
offset = Common::AlignUp(offset, alignment);
}
WaitPendingOperations(offset);
if (offset + size > stream_buffer_size) {
// The buffer would overflow, save the amount of used watches and reset the state.
invalidation_mark = current_watch_cursor;
current_watch_cursor = 0;
offset = 0;
// Swap watches and reset waiting cursors.
std::swap(previous_watches, current_watches);
wait_cursor = 0;
wait_bound = 0;
// Ensure that we don't wait for uncommitted fences.
scheduler.Flush();
}
return std::make_pair(memory.Map(offset, size), offset);
}
void VKStreamBuffer::Unmap(u64 size) {
ASSERT_MSG(size <= mapped_size, "Reserved size is too small");
memory.Unmap();
offset += size;
if (current_watch_cursor + 1 >= current_watches.size()) {
// Ensure that there are enough watches.
ReserveWatches(current_watches, WATCHES_RESERVE_CHUNK);
}
auto& watch = current_watches[current_watch_cursor++];
watch.upper_bound = offset;
watch.tick = scheduler.CurrentTick();
}
void VKStreamBuffer::CreateBuffers() {
const auto memory_properties = device.GetPhysical().GetMemoryProperties();
const u32 preferred_type = GetMemoryType(memory_properties);
const u32 preferred_heap = memory_properties.memoryTypes[preferred_type].heapIndex;
// Substract from the preferred heap size some bytes to avoid getting out of memory.
const VkDeviceSize heap_size = memory_properties.memoryHeaps[preferred_heap].size;
// As per DXVK's example, using `heap_size / 2`
const VkDeviceSize allocable_size = heap_size / 2;
buffer = device.GetLogical().CreateBuffer({
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = std::min(PREFERRED_STREAM_BUFFER_SIZE, allocable_size),
.usage = BUFFER_USAGE,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
});
const auto requirements = device.GetLogical().GetBufferMemoryRequirements(*buffer);
const u32 required_flags = requirements.memoryTypeBits;
stream_buffer_size = static_cast<u64>(requirements.size);
memory = device.GetLogical().AllocateMemory({
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.pNext = nullptr,
.allocationSize = requirements.size,
.memoryTypeIndex = GetMemoryType(memory_properties, required_flags),
});
buffer.BindMemory(*memory, 0);
}
void VKStreamBuffer::ReserveWatches(std::vector<Watch>& watches, std::size_t grow_size) {
watches.resize(watches.size() + grow_size);
}
void VKStreamBuffer::WaitPendingOperations(u64 requested_upper_bound) {
if (!invalidation_mark) {
return;
}
while (requested_upper_bound < wait_bound && wait_cursor < *invalidation_mark) {
auto& watch = previous_watches[wait_cursor];
wait_bound = watch.upper_bound;
scheduler.Wait(watch.tick);
++wait_cursor;
}
}
} // namespace Vulkan

View File

@@ -1,76 +0,0 @@
// Copyright 2019 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <optional>
#include <utility>
#include <vector>
#include "common/common_types.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
namespace Vulkan {
class Device;
class VKFenceWatch;
class VKScheduler;
class VKStreamBuffer final {
public:
explicit VKStreamBuffer(const Device& device, VKScheduler& scheduler);
~VKStreamBuffer();
/**
* Reserves a region of memory from the stream buffer.
* @param size Size to reserve.
* @returns A pair of a raw memory pointer (with offset added), and the buffer offset
*/
std::pair<u8*, u64> Map(u64 size, u64 alignment);
/// Ensures that "size" bytes of memory are available to the GPU, potentially recording a copy.
void Unmap(u64 size);
VkBuffer Handle() const noexcept {
return *buffer;
}
u64 Address() const noexcept {
return 0;
}
private:
struct Watch {
u64 tick{};
u64 upper_bound{};
};
/// Creates Vulkan buffer handles committing the required the required memory.
void CreateBuffers();
/// Increases the amount of watches available.
void ReserveWatches(std::vector<Watch>& watches, std::size_t grow_size);
void WaitPendingOperations(u64 requested_upper_bound);
const Device& device; ///< Vulkan device manager.
VKScheduler& scheduler; ///< Command scheduler.
vk::Buffer buffer; ///< Mapped buffer.
vk::DeviceMemory memory; ///< Memory allocation.
u64 stream_buffer_size{}; ///< Stream buffer size.
u64 offset{}; ///< Buffer iterator.
u64 mapped_size{}; ///< Size reserved for the current copy.
std::vector<Watch> current_watches; ///< Watches recorded in the current iteration.
std::size_t current_watch_cursor{}; ///< Count of watches, reset on invalidation.
std::optional<std::size_t> invalidation_mark; ///< Number of watches used in the previous cycle.
std::vector<Watch> previous_watches; ///< Watches used in the previous iteration.
std::size_t wait_cursor{}; ///< Last watch being waited for completion.
u64 wait_bound{}; ///< Highest offset being watched for completion.
};
} // namespace Vulkan

View File

@@ -1,928 +0,0 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <exception>
#include <memory>
#include <optional>
#include <string_view>
#include <utility>
#include <vector>
#include "common/common_types.h"
#include "common/logging/log.h"
#include "video_core/renderer_vulkan/wrapper.h"
namespace Vulkan::vk {
namespace {
template <typename Func>
void SortPhysicalDevices(std::vector<VkPhysicalDevice>& devices, const InstanceDispatch& dld,
Func&& func) {
// Calling GetProperties calls Vulkan more than needed. But they are supposed to be cheap
// functions.
std::stable_sort(devices.begin(), devices.end(),
[&dld, &func](VkPhysicalDevice lhs, VkPhysicalDevice rhs) {
return func(vk::PhysicalDevice(lhs, dld).GetProperties(),
vk::PhysicalDevice(rhs, dld).GetProperties());
});
}
void SortPhysicalDevicesPerVendor(std::vector<VkPhysicalDevice>& devices,
const InstanceDispatch& dld,
std::initializer_list<u32> vendor_ids) {
for (auto it = vendor_ids.end(); it != vendor_ids.begin();) {
--it;
SortPhysicalDevices(devices, dld, [id = *it](const auto& lhs, const auto& rhs) {
return lhs.vendorID == id && rhs.vendorID != id;
});
}
}
void SortPhysicalDevices(std::vector<VkPhysicalDevice>& devices, const InstanceDispatch& dld) {
// Sort by name, this will set a base and make GPUs with higher numbers appear first
// (e.g. GTX 1650 will intentionally be listed before a GTX 1080).
SortPhysicalDevices(devices, dld, [](const auto& lhs, const auto& rhs) {
return std::string_view{lhs.deviceName} > std::string_view{rhs.deviceName};
});
// Prefer discrete over non-discrete
SortPhysicalDevices(devices, dld, [](const auto& lhs, const auto& rhs) {
return lhs.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU &&
rhs.deviceType != VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU;
});
// Prefer Nvidia over AMD, AMD over Intel, Intel over the rest.
SortPhysicalDevicesPerVendor(devices, dld, {0x10DE, 0x1002, 0x8086});
}
template <typename T>
bool Proc(T& result, const InstanceDispatch& dld, const char* proc_name,
VkInstance instance = nullptr) noexcept {
result = reinterpret_cast<T>(dld.vkGetInstanceProcAddr(instance, proc_name));
return result != nullptr;
}
template <typename T>
void Proc(T& result, const DeviceDispatch& dld, const char* proc_name, VkDevice device) noexcept {
result = reinterpret_cast<T>(dld.vkGetDeviceProcAddr(device, proc_name));
}
void Load(VkDevice device, DeviceDispatch& dld) noexcept {
#define X(name) Proc(dld.name, dld, #name, device)
X(vkAcquireNextImageKHR);
X(vkAllocateCommandBuffers);
X(vkAllocateDescriptorSets);
X(vkAllocateMemory);
X(vkBeginCommandBuffer);
X(vkBindBufferMemory);
X(vkBindImageMemory);
X(vkCmdBeginQuery);
X(vkCmdBeginRenderPass);
X(vkCmdBeginTransformFeedbackEXT);
X(vkCmdBeginDebugUtilsLabelEXT);
X(vkCmdBindDescriptorSets);
X(vkCmdBindIndexBuffer);
X(vkCmdBindPipeline);
X(vkCmdBindTransformFeedbackBuffersEXT);
X(vkCmdBindVertexBuffers);
X(vkCmdBlitImage);
X(vkCmdClearAttachments);
X(vkCmdCopyBuffer);
X(vkCmdCopyBufferToImage);
X(vkCmdCopyImage);
X(vkCmdCopyImageToBuffer);
X(vkCmdDispatch);
X(vkCmdDraw);
X(vkCmdDrawIndexed);
X(vkCmdEndQuery);
X(vkCmdEndRenderPass);
X(vkCmdEndTransformFeedbackEXT);
X(vkCmdEndDebugUtilsLabelEXT);
X(vkCmdFillBuffer);
X(vkCmdPipelineBarrier);
X(vkCmdPushConstants);
X(vkCmdSetBlendConstants);
X(vkCmdSetDepthBias);
X(vkCmdSetDepthBounds);
X(vkCmdSetEvent);
X(vkCmdSetScissor);
X(vkCmdSetStencilCompareMask);
X(vkCmdSetStencilReference);
X(vkCmdSetStencilWriteMask);
X(vkCmdSetViewport);
X(vkCmdWaitEvents);
X(vkCmdBindVertexBuffers2EXT);
X(vkCmdSetCullModeEXT);
X(vkCmdSetDepthBoundsTestEnableEXT);
X(vkCmdSetDepthCompareOpEXT);
X(vkCmdSetDepthTestEnableEXT);
X(vkCmdSetDepthWriteEnableEXT);
X(vkCmdSetFrontFaceEXT);
X(vkCmdSetPrimitiveTopologyEXT);
X(vkCmdSetStencilOpEXT);
X(vkCmdSetStencilTestEnableEXT);
X(vkCmdResolveImage);
X(vkCreateBuffer);
X(vkCreateBufferView);
X(vkCreateCommandPool);
X(vkCreateComputePipelines);
X(vkCreateDescriptorPool);
X(vkCreateDescriptorSetLayout);
X(vkCreateDescriptorUpdateTemplateKHR);
X(vkCreateEvent);
X(vkCreateFence);
X(vkCreateFramebuffer);
X(vkCreateGraphicsPipelines);
X(vkCreateImage);
X(vkCreateImageView);
X(vkCreatePipelineLayout);
X(vkCreateQueryPool);
X(vkCreateRenderPass);
X(vkCreateSampler);
X(vkCreateSemaphore);
X(vkCreateShaderModule);
X(vkCreateSwapchainKHR);
X(vkDestroyBuffer);
X(vkDestroyBufferView);
X(vkDestroyCommandPool);
X(vkDestroyDescriptorPool);
X(vkDestroyDescriptorSetLayout);
X(vkDestroyDescriptorUpdateTemplateKHR);
X(vkDestroyEvent);
X(vkDestroyFence);
X(vkDestroyFramebuffer);
X(vkDestroyImage);
X(vkDestroyImageView);
X(vkDestroyPipeline);
X(vkDestroyPipelineLayout);
X(vkDestroyQueryPool);
X(vkDestroyRenderPass);
X(vkDestroySampler);
X(vkDestroySemaphore);
X(vkDestroyShaderModule);
X(vkDestroySwapchainKHR);
X(vkDeviceWaitIdle);
X(vkEndCommandBuffer);
X(vkFreeCommandBuffers);
X(vkFreeDescriptorSets);
X(vkFreeMemory);
X(vkGetBufferMemoryRequirements);
X(vkGetDeviceQueue);
X(vkGetEventStatus);
X(vkGetFenceStatus);
X(vkGetImageMemoryRequirements);
X(vkGetQueryPoolResults);
X(vkGetSemaphoreCounterValueKHR);
X(vkMapMemory);
X(vkQueueSubmit);
X(vkResetFences);
X(vkResetQueryPoolEXT);
X(vkSetDebugUtilsObjectNameEXT);
X(vkSetDebugUtilsObjectTagEXT);
X(vkUnmapMemory);
X(vkUpdateDescriptorSetWithTemplateKHR);
X(vkUpdateDescriptorSets);
X(vkWaitForFences);
X(vkWaitSemaphoresKHR);
#undef X
}
template <typename T>
void SetObjectName(const DeviceDispatch* dld, VkDevice device, T handle, VkObjectType type,
const char* name) {
const VkDebugUtilsObjectNameInfoEXT name_info{
.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT,
.pNext = nullptr,
.objectType = VK_OBJECT_TYPE_IMAGE,
.objectHandle = reinterpret_cast<u64>(handle),
.pObjectName = name,
};
Check(dld->vkSetDebugUtilsObjectNameEXT(device, &name_info));
}
} // Anonymous namespace
bool Load(InstanceDispatch& dld) noexcept {
#define X(name) Proc(dld.name, dld, #name)
return X(vkCreateInstance) && X(vkEnumerateInstanceExtensionProperties) &&
X(vkEnumerateInstanceLayerProperties);
#undef X
}
bool Load(VkInstance instance, InstanceDispatch& dld) noexcept {
#define X(name) Proc(dld.name, dld, #name, instance)
// These functions may fail to load depending on the enabled extensions.
// Don't return a failure on these.
X(vkCreateDebugUtilsMessengerEXT);
X(vkDestroyDebugUtilsMessengerEXT);
X(vkDestroySurfaceKHR);
X(vkGetPhysicalDeviceFeatures2KHR);
X(vkGetPhysicalDeviceProperties2KHR);
X(vkGetPhysicalDeviceSurfaceCapabilitiesKHR);
X(vkGetPhysicalDeviceSurfaceFormatsKHR);
X(vkGetPhysicalDeviceSurfacePresentModesKHR);
X(vkGetPhysicalDeviceSurfaceSupportKHR);
X(vkGetSwapchainImagesKHR);
X(vkQueuePresentKHR);
return X(vkCreateDevice) && X(vkDestroyDevice) && X(vkDestroyDevice) &&
X(vkEnumerateDeviceExtensionProperties) && X(vkEnumeratePhysicalDevices) &&
X(vkGetDeviceProcAddr) && X(vkGetPhysicalDeviceFormatProperties) &&
X(vkGetPhysicalDeviceMemoryProperties) && X(vkGetPhysicalDeviceProperties) &&
X(vkGetPhysicalDeviceQueueFamilyProperties);
#undef X
}
const char* Exception::what() const noexcept {
return ToString(result);
}
const char* ToString(VkResult result) noexcept {
switch (result) {
case VkResult::VK_SUCCESS:
return "VK_SUCCESS";
case VkResult::VK_NOT_READY:
return "VK_NOT_READY";
case VkResult::VK_TIMEOUT:
return "VK_TIMEOUT";
case VkResult::VK_EVENT_SET:
return "VK_EVENT_SET";
case VkResult::VK_EVENT_RESET:
return "VK_EVENT_RESET";
case VkResult::VK_INCOMPLETE:
return "VK_INCOMPLETE";
case VkResult::VK_ERROR_OUT_OF_HOST_MEMORY:
return "VK_ERROR_OUT_OF_HOST_MEMORY";
case VkResult::VK_ERROR_OUT_OF_DEVICE_MEMORY:
return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
case VkResult::VK_ERROR_INITIALIZATION_FAILED:
return "VK_ERROR_INITIALIZATION_FAILED";
case VkResult::VK_ERROR_DEVICE_LOST:
return "VK_ERROR_DEVICE_LOST";
case VkResult::VK_ERROR_MEMORY_MAP_FAILED:
return "VK_ERROR_MEMORY_MAP_FAILED";
case VkResult::VK_ERROR_LAYER_NOT_PRESENT:
return "VK_ERROR_LAYER_NOT_PRESENT";
case VkResult::VK_ERROR_EXTENSION_NOT_PRESENT:
return "VK_ERROR_EXTENSION_NOT_PRESENT";
case VkResult::VK_ERROR_FEATURE_NOT_PRESENT:
return "VK_ERROR_FEATURE_NOT_PRESENT";
case VkResult::VK_ERROR_INCOMPATIBLE_DRIVER:
return "VK_ERROR_INCOMPATIBLE_DRIVER";
case VkResult::VK_ERROR_TOO_MANY_OBJECTS:
return "VK_ERROR_TOO_MANY_OBJECTS";
case VkResult::VK_ERROR_FORMAT_NOT_SUPPORTED:
return "VK_ERROR_FORMAT_NOT_SUPPORTED";
case VkResult::VK_ERROR_FRAGMENTED_POOL:
return "VK_ERROR_FRAGMENTED_POOL";
case VkResult::VK_ERROR_OUT_OF_POOL_MEMORY:
return "VK_ERROR_OUT_OF_POOL_MEMORY";
case VkResult::VK_ERROR_INVALID_EXTERNAL_HANDLE:
return "VK_ERROR_INVALID_EXTERNAL_HANDLE";
case VkResult::VK_ERROR_SURFACE_LOST_KHR:
return "VK_ERROR_SURFACE_LOST_KHR";
case VkResult::VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
case VkResult::VK_SUBOPTIMAL_KHR:
return "VK_SUBOPTIMAL_KHR";
case VkResult::VK_ERROR_OUT_OF_DATE_KHR:
return "VK_ERROR_OUT_OF_DATE_KHR";
case VkResult::VK_ERROR_INCOMPATIBLE_DISPLAY_KHR:
return "VK_ERROR_INCOMPATIBLE_DISPLAY_KHR";
case VkResult::VK_ERROR_VALIDATION_FAILED_EXT:
return "VK_ERROR_VALIDATION_FAILED_EXT";
case VkResult::VK_ERROR_INVALID_SHADER_NV:
return "VK_ERROR_INVALID_SHADER_NV";
case VkResult::VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT:
return "VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT";
case VkResult::VK_ERROR_FRAGMENTATION_EXT:
return "VK_ERROR_FRAGMENTATION_EXT";
case VkResult::VK_ERROR_NOT_PERMITTED_EXT:
return "VK_ERROR_NOT_PERMITTED_EXT";
case VkResult::VK_ERROR_INVALID_DEVICE_ADDRESS_EXT:
return "VK_ERROR_INVALID_DEVICE_ADDRESS_EXT";
case VkResult::VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT:
return "VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT";
case VkResult::VK_ERROR_UNKNOWN:
return "VK_ERROR_UNKNOWN";
case VkResult::VK_ERROR_INCOMPATIBLE_VERSION_KHR:
return "VK_ERROR_INCOMPATIBLE_VERSION_KHR";
case VkResult::VK_THREAD_IDLE_KHR:
return "VK_THREAD_IDLE_KHR";
case VkResult::VK_THREAD_DONE_KHR:
return "VK_THREAD_DONE_KHR";
case VkResult::VK_OPERATION_DEFERRED_KHR:
return "VK_OPERATION_DEFERRED_KHR";
case VkResult::VK_OPERATION_NOT_DEFERRED_KHR:
return "VK_OPERATION_NOT_DEFERRED_KHR";
case VkResult::VK_PIPELINE_COMPILE_REQUIRED_EXT:
return "VK_PIPELINE_COMPILE_REQUIRED_EXT";
case VkResult::VK_RESULT_MAX_ENUM:
return "VK_RESULT_MAX_ENUM";
}
return "Unknown";
}
void Destroy(VkInstance instance, const InstanceDispatch& dld) noexcept {
dld.vkDestroyInstance(instance, nullptr);
}
void Destroy(VkDevice device, const InstanceDispatch& dld) noexcept {
dld.vkDestroyDevice(device, nullptr);
}
void Destroy(VkDevice device, VkBuffer handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroyBuffer(device, handle, nullptr);
}
void Destroy(VkDevice device, VkBufferView handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroyBufferView(device, handle, nullptr);
}
void Destroy(VkDevice device, VkCommandPool handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroyCommandPool(device, handle, nullptr);
}
void Destroy(VkDevice device, VkDescriptorPool handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroyDescriptorPool(device, handle, nullptr);
}
void Destroy(VkDevice device, VkDescriptorSetLayout handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroyDescriptorSetLayout(device, handle, nullptr);
}
void Destroy(VkDevice device, VkDescriptorUpdateTemplateKHR handle,
const DeviceDispatch& dld) noexcept {
dld.vkDestroyDescriptorUpdateTemplateKHR(device, handle, nullptr);
}
void Destroy(VkDevice device, VkDeviceMemory handle, const DeviceDispatch& dld) noexcept {
dld.vkFreeMemory(device, handle, nullptr);
}
void Destroy(VkDevice device, VkEvent handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroyEvent(device, handle, nullptr);
}
void Destroy(VkDevice device, VkFence handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroyFence(device, handle, nullptr);
}
void Destroy(VkDevice device, VkFramebuffer handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroyFramebuffer(device, handle, nullptr);
}
void Destroy(VkDevice device, VkImage handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroyImage(device, handle, nullptr);
}
void Destroy(VkDevice device, VkImageView handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroyImageView(device, handle, nullptr);
}
void Destroy(VkDevice device, VkPipeline handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroyPipeline(device, handle, nullptr);
}
void Destroy(VkDevice device, VkPipelineLayout handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroyPipelineLayout(device, handle, nullptr);
}
void Destroy(VkDevice device, VkQueryPool handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroyQueryPool(device, handle, nullptr);
}
void Destroy(VkDevice device, VkRenderPass handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroyRenderPass(device, handle, nullptr);
}
void Destroy(VkDevice device, VkSampler handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroySampler(device, handle, nullptr);
}
void Destroy(VkDevice device, VkSwapchainKHR handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroySwapchainKHR(device, handle, nullptr);
}
void Destroy(VkDevice device, VkSemaphore handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroySemaphore(device, handle, nullptr);
}
void Destroy(VkDevice device, VkShaderModule handle, const DeviceDispatch& dld) noexcept {
dld.vkDestroyShaderModule(device, handle, nullptr);
}
void Destroy(VkInstance instance, VkDebugUtilsMessengerEXT handle,
const InstanceDispatch& dld) noexcept {
dld.vkDestroyDebugUtilsMessengerEXT(instance, handle, nullptr);
}
void Destroy(VkInstance instance, VkSurfaceKHR handle, const InstanceDispatch& dld) noexcept {
dld.vkDestroySurfaceKHR(instance, handle, nullptr);
}
VkResult Free(VkDevice device, VkDescriptorPool handle, Span<VkDescriptorSet> sets,
const DeviceDispatch& dld) noexcept {
return dld.vkFreeDescriptorSets(device, handle, sets.size(), sets.data());
}
VkResult Free(VkDevice device, VkCommandPool handle, Span<VkCommandBuffer> buffers,
const DeviceDispatch& dld) noexcept {
dld.vkFreeCommandBuffers(device, handle, buffers.size(), buffers.data());
return VK_SUCCESS;
}
Instance Instance::Create(u32 version, Span<const char*> layers, Span<const char*> extensions,
InstanceDispatch& dispatch) noexcept {
const VkApplicationInfo application_info{
.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
.pNext = nullptr,
.pApplicationName = "yuzu Emulator",
.applicationVersion = VK_MAKE_VERSION(0, 1, 0),
.pEngineName = "yuzu Emulator",
.engineVersion = VK_MAKE_VERSION(0, 1, 0),
.apiVersion = version,
};
const VkInstanceCreateInfo ci{
.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.pApplicationInfo = &application_info,
.enabledLayerCount = layers.size(),
.ppEnabledLayerNames = layers.data(),
.enabledExtensionCount = extensions.size(),
.ppEnabledExtensionNames = extensions.data(),
};
VkInstance instance;
if (dispatch.vkCreateInstance(&ci, nullptr, &instance) != VK_SUCCESS) {
// Failed to create the instance.
return {};
}
if (!Proc(dispatch.vkDestroyInstance, dispatch, "vkDestroyInstance", instance)) {
// We successfully created an instance but the destroy function couldn't be loaded.
// This is a good moment to panic.
return {};
}
return Instance(instance, dispatch);
}
std::optional<std::vector<VkPhysicalDevice>> Instance::EnumeratePhysicalDevices() {
u32 num;
if (dld->vkEnumeratePhysicalDevices(handle, &num, nullptr) != VK_SUCCESS) {
return std::nullopt;
}
std::vector<VkPhysicalDevice> physical_devices(num);
if (dld->vkEnumeratePhysicalDevices(handle, &num, physical_devices.data()) != VK_SUCCESS) {
return std::nullopt;
}
SortPhysicalDevices(physical_devices, *dld);
return std::make_optional(std::move(physical_devices));
}
DebugCallback Instance::TryCreateDebugCallback(
PFN_vkDebugUtilsMessengerCallbackEXT callback) noexcept {
const VkDebugUtilsMessengerCreateInfoEXT ci{
.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT,
.pNext = nullptr,
.flags = 0,
.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT,
.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT,
.pfnUserCallback = callback,
.pUserData = nullptr,
};
VkDebugUtilsMessengerEXT messenger;
if (dld->vkCreateDebugUtilsMessengerEXT(handle, &ci, nullptr, &messenger) != VK_SUCCESS) {
return {};
}
return DebugCallback(messenger, handle, *dld);
}
void Buffer::BindMemory(VkDeviceMemory memory, VkDeviceSize offset) const {
Check(dld->vkBindBufferMemory(owner, handle, memory, offset));
}
void Buffer::SetObjectNameEXT(const char* name) const {
SetObjectName(dld, owner, handle, VK_OBJECT_TYPE_BUFFER, name);
}
void BufferView::SetObjectNameEXT(const char* name) const {
SetObjectName(dld, owner, handle, VK_OBJECT_TYPE_BUFFER_VIEW, name);
}
void Image::BindMemory(VkDeviceMemory memory, VkDeviceSize offset) const {
Check(dld->vkBindImageMemory(owner, handle, memory, offset));
}
void Image::SetObjectNameEXT(const char* name) const {
SetObjectName(dld, owner, handle, VK_OBJECT_TYPE_IMAGE, name);
}
void ImageView::SetObjectNameEXT(const char* name) const {
SetObjectName(dld, owner, handle, VK_OBJECT_TYPE_IMAGE_VIEW, name);
}
void DeviceMemory::SetObjectNameEXT(const char* name) const {
SetObjectName(dld, owner, handle, VK_OBJECT_TYPE_DEVICE_MEMORY, name);
}
void Fence::SetObjectNameEXT(const char* name) const {
SetObjectName(dld, owner, handle, VK_OBJECT_TYPE_FENCE, name);
}
void Framebuffer::SetObjectNameEXT(const char* name) const {
SetObjectName(dld, owner, handle, VK_OBJECT_TYPE_FRAMEBUFFER, name);
}
DescriptorSets DescriptorPool::Allocate(const VkDescriptorSetAllocateInfo& ai) const {
const std::size_t num = ai.descriptorSetCount;
std::unique_ptr sets = std::make_unique<VkDescriptorSet[]>(num);
switch (const VkResult result = dld->vkAllocateDescriptorSets(owner, &ai, sets.get())) {
case VK_SUCCESS:
return DescriptorSets(std::move(sets), num, owner, handle, *dld);
case VK_ERROR_OUT_OF_POOL_MEMORY:
return {};
default:
throw Exception(result);
}
}
void DescriptorPool::SetObjectNameEXT(const char* name) const {
SetObjectName(dld, owner, handle, VK_OBJECT_TYPE_DESCRIPTOR_POOL, name);
}
CommandBuffers CommandPool::Allocate(std::size_t num_buffers, VkCommandBufferLevel level) const {
const VkCommandBufferAllocateInfo ai{
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
.pNext = nullptr,
.commandPool = handle,
.level = level,
.commandBufferCount = static_cast<u32>(num_buffers),
};
std::unique_ptr buffers = std::make_unique<VkCommandBuffer[]>(num_buffers);
switch (const VkResult result = dld->vkAllocateCommandBuffers(owner, &ai, buffers.get())) {
case VK_SUCCESS:
return CommandBuffers(std::move(buffers), num_buffers, owner, handle, *dld);
case VK_ERROR_OUT_OF_POOL_MEMORY:
return {};
default:
throw Exception(result);
}
}
void CommandPool::SetObjectNameEXT(const char* name) const {
SetObjectName(dld, owner, handle, VK_OBJECT_TYPE_COMMAND_POOL, name);
}
std::vector<VkImage> SwapchainKHR::GetImages() const {
u32 num;
Check(dld->vkGetSwapchainImagesKHR(owner, handle, &num, nullptr));
std::vector<VkImage> images(num);
Check(dld->vkGetSwapchainImagesKHR(owner, handle, &num, images.data()));
return images;
}
void Event::SetObjectNameEXT(const char* name) const {
SetObjectName(dld, owner, handle, VK_OBJECT_TYPE_EVENT, name);
}
void ShaderModule::SetObjectNameEXT(const char* name) const {
SetObjectName(dld, owner, handle, VK_OBJECT_TYPE_SHADER_MODULE, name);
}
void Semaphore::SetObjectNameEXT(const char* name) const {
SetObjectName(dld, owner, handle, VK_OBJECT_TYPE_SEMAPHORE, name);
}
Device Device::Create(VkPhysicalDevice physical_device, Span<VkDeviceQueueCreateInfo> queues_ci,
Span<const char*> enabled_extensions, const void* next,
DeviceDispatch& dispatch) noexcept {
const VkDeviceCreateInfo ci{
.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
.pNext = next,
.flags = 0,
.queueCreateInfoCount = queues_ci.size(),
.pQueueCreateInfos = queues_ci.data(),
.enabledLayerCount = 0,
.ppEnabledLayerNames = nullptr,
.enabledExtensionCount = enabled_extensions.size(),
.ppEnabledExtensionNames = enabled_extensions.data(),
.pEnabledFeatures = nullptr,
};
VkDevice device;
if (dispatch.vkCreateDevice(physical_device, &ci, nullptr, &device) != VK_SUCCESS) {
return {};
}
Load(device, dispatch);
return Device(device, dispatch);
}
Queue Device::GetQueue(u32 family_index) const noexcept {
VkQueue queue;
dld->vkGetDeviceQueue(handle, family_index, 0, &queue);
return Queue(queue, *dld);
}
Buffer Device::CreateBuffer(const VkBufferCreateInfo& ci) const {
VkBuffer object;
Check(dld->vkCreateBuffer(handle, &ci, nullptr, &object));
return Buffer(object, handle, *dld);
}
BufferView Device::CreateBufferView(const VkBufferViewCreateInfo& ci) const {
VkBufferView object;
Check(dld->vkCreateBufferView(handle, &ci, nullptr, &object));
return BufferView(object, handle, *dld);
}
Image Device::CreateImage(const VkImageCreateInfo& ci) const {
VkImage object;
Check(dld->vkCreateImage(handle, &ci, nullptr, &object));
return Image(object, handle, *dld);
}
ImageView Device::CreateImageView(const VkImageViewCreateInfo& ci) const {
VkImageView object;
Check(dld->vkCreateImageView(handle, &ci, nullptr, &object));
return ImageView(object, handle, *dld);
}
Semaphore Device::CreateSemaphore() const {
static constexpr VkSemaphoreCreateInfo ci{
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
};
return CreateSemaphore(ci);
}
Semaphore Device::CreateSemaphore(const VkSemaphoreCreateInfo& ci) const {
VkSemaphore object;
Check(dld->vkCreateSemaphore(handle, &ci, nullptr, &object));
return Semaphore(object, handle, *dld);
}
Fence Device::CreateFence(const VkFenceCreateInfo& ci) const {
VkFence object;
Check(dld->vkCreateFence(handle, &ci, nullptr, &object));
return Fence(object, handle, *dld);
}
DescriptorPool Device::CreateDescriptorPool(const VkDescriptorPoolCreateInfo& ci) const {
VkDescriptorPool object;
Check(dld->vkCreateDescriptorPool(handle, &ci, nullptr, &object));
return DescriptorPool(object, handle, *dld);
}
RenderPass Device::CreateRenderPass(const VkRenderPassCreateInfo& ci) const {
VkRenderPass object;
Check(dld->vkCreateRenderPass(handle, &ci, nullptr, &object));
return RenderPass(object, handle, *dld);
}
DescriptorSetLayout Device::CreateDescriptorSetLayout(
const VkDescriptorSetLayoutCreateInfo& ci) const {
VkDescriptorSetLayout object;
Check(dld->vkCreateDescriptorSetLayout(handle, &ci, nullptr, &object));
return DescriptorSetLayout(object, handle, *dld);
}
PipelineLayout Device::CreatePipelineLayout(const VkPipelineLayoutCreateInfo& ci) const {
VkPipelineLayout object;
Check(dld->vkCreatePipelineLayout(handle, &ci, nullptr, &object));
return PipelineLayout(object, handle, *dld);
}
Pipeline Device::CreateGraphicsPipeline(const VkGraphicsPipelineCreateInfo& ci) const {
VkPipeline object;
Check(dld->vkCreateGraphicsPipelines(handle, nullptr, 1, &ci, nullptr, &object));
return Pipeline(object, handle, *dld);
}
Pipeline Device::CreateComputePipeline(const VkComputePipelineCreateInfo& ci) const {
VkPipeline object;
Check(dld->vkCreateComputePipelines(handle, nullptr, 1, &ci, nullptr, &object));
return Pipeline(object, handle, *dld);
}
Sampler Device::CreateSampler(const VkSamplerCreateInfo& ci) const {
VkSampler object;
Check(dld->vkCreateSampler(handle, &ci, nullptr, &object));
return Sampler(object, handle, *dld);
}
Framebuffer Device::CreateFramebuffer(const VkFramebufferCreateInfo& ci) const {
VkFramebuffer object;
Check(dld->vkCreateFramebuffer(handle, &ci, nullptr, &object));
return Framebuffer(object, handle, *dld);
}
CommandPool Device::CreateCommandPool(const VkCommandPoolCreateInfo& ci) const {
VkCommandPool object;
Check(dld->vkCreateCommandPool(handle, &ci, nullptr, &object));
return CommandPool(object, handle, *dld);
}
DescriptorUpdateTemplateKHR Device::CreateDescriptorUpdateTemplateKHR(
const VkDescriptorUpdateTemplateCreateInfoKHR& ci) const {
VkDescriptorUpdateTemplateKHR object;
Check(dld->vkCreateDescriptorUpdateTemplateKHR(handle, &ci, nullptr, &object));
return DescriptorUpdateTemplateKHR(object, handle, *dld);
}
QueryPool Device::CreateQueryPool(const VkQueryPoolCreateInfo& ci) const {
VkQueryPool object;
Check(dld->vkCreateQueryPool(handle, &ci, nullptr, &object));
return QueryPool(object, handle, *dld);
}
ShaderModule Device::CreateShaderModule(const VkShaderModuleCreateInfo& ci) const {
VkShaderModule object;
Check(dld->vkCreateShaderModule(handle, &ci, nullptr, &object));
return ShaderModule(object, handle, *dld);
}
Event Device::CreateEvent() const {
static constexpr VkEventCreateInfo ci{
.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
};
VkEvent object;
Check(dld->vkCreateEvent(handle, &ci, nullptr, &object));
return Event(object, handle, *dld);
}
SwapchainKHR Device::CreateSwapchainKHR(const VkSwapchainCreateInfoKHR& ci) const {
VkSwapchainKHR object;
Check(dld->vkCreateSwapchainKHR(handle, &ci, nullptr, &object));
return SwapchainKHR(object, handle, *dld);
}
DeviceMemory Device::TryAllocateMemory(const VkMemoryAllocateInfo& ai) const noexcept {
VkDeviceMemory memory;
if (dld->vkAllocateMemory(handle, &ai, nullptr, &memory) != VK_SUCCESS) {
return {};
}
return DeviceMemory(memory, handle, *dld);
}
DeviceMemory Device::AllocateMemory(const VkMemoryAllocateInfo& ai) const {
VkDeviceMemory memory;
Check(dld->vkAllocateMemory(handle, &ai, nullptr, &memory));
return DeviceMemory(memory, handle, *dld);
}
VkMemoryRequirements Device::GetBufferMemoryRequirements(VkBuffer buffer) const noexcept {
VkMemoryRequirements requirements;
dld->vkGetBufferMemoryRequirements(handle, buffer, &requirements);
return requirements;
}
VkMemoryRequirements Device::GetImageMemoryRequirements(VkImage image) const noexcept {
VkMemoryRequirements requirements;
dld->vkGetImageMemoryRequirements(handle, image, &requirements);
return requirements;
}
void Device::UpdateDescriptorSets(Span<VkWriteDescriptorSet> writes,
Span<VkCopyDescriptorSet> copies) const noexcept {
dld->vkUpdateDescriptorSets(handle, writes.size(), writes.data(), copies.size(), copies.data());
}
VkPhysicalDeviceProperties PhysicalDevice::GetProperties() const noexcept {
VkPhysicalDeviceProperties properties;
dld->vkGetPhysicalDeviceProperties(physical_device, &properties);
return properties;
}
void PhysicalDevice::GetProperties2KHR(VkPhysicalDeviceProperties2KHR& properties) const noexcept {
dld->vkGetPhysicalDeviceProperties2KHR(physical_device, &properties);
}
VkPhysicalDeviceFeatures PhysicalDevice::GetFeatures() const noexcept {
VkPhysicalDeviceFeatures2KHR features2;
features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR;
features2.pNext = nullptr;
dld->vkGetPhysicalDeviceFeatures2KHR(physical_device, &features2);
return features2.features;
}
void PhysicalDevice::GetFeatures2KHR(VkPhysicalDeviceFeatures2KHR& features) const noexcept {
dld->vkGetPhysicalDeviceFeatures2KHR(physical_device, &features);
}
VkFormatProperties PhysicalDevice::GetFormatProperties(VkFormat format) const noexcept {
VkFormatProperties properties;
dld->vkGetPhysicalDeviceFormatProperties(physical_device, format, &properties);
return properties;
}
std::vector<VkExtensionProperties> PhysicalDevice::EnumerateDeviceExtensionProperties() const {
u32 num;
dld->vkEnumerateDeviceExtensionProperties(physical_device, nullptr, &num, nullptr);
std::vector<VkExtensionProperties> properties(num);
dld->vkEnumerateDeviceExtensionProperties(physical_device, nullptr, &num, properties.data());
return properties;
}
std::vector<VkQueueFamilyProperties> PhysicalDevice::GetQueueFamilyProperties() const {
u32 num;
dld->vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &num, nullptr);
std::vector<VkQueueFamilyProperties> properties(num);
dld->vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &num, properties.data());
return properties;
}
bool PhysicalDevice::GetSurfaceSupportKHR(u32 queue_family_index, VkSurfaceKHR surface) const {
VkBool32 supported;
Check(dld->vkGetPhysicalDeviceSurfaceSupportKHR(physical_device, queue_family_index, surface,
&supported));
return supported == VK_TRUE;
}
VkSurfaceCapabilitiesKHR PhysicalDevice::GetSurfaceCapabilitiesKHR(VkSurfaceKHR surface) const {
VkSurfaceCapabilitiesKHR capabilities;
Check(dld->vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device, surface, &capabilities));
return capabilities;
}
std::vector<VkSurfaceFormatKHR> PhysicalDevice::GetSurfaceFormatsKHR(VkSurfaceKHR surface) const {
u32 num;
Check(dld->vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device, surface, &num, nullptr));
std::vector<VkSurfaceFormatKHR> formats(num);
Check(
dld->vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device, surface, &num, formats.data()));
return formats;
}
std::vector<VkPresentModeKHR> PhysicalDevice::GetSurfacePresentModesKHR(
VkSurfaceKHR surface) const {
u32 num;
Check(dld->vkGetPhysicalDeviceSurfacePresentModesKHR(physical_device, surface, &num, nullptr));
std::vector<VkPresentModeKHR> modes(num);
Check(dld->vkGetPhysicalDeviceSurfacePresentModesKHR(physical_device, surface, &num,
modes.data()));
return modes;
}
VkPhysicalDeviceMemoryProperties PhysicalDevice::GetMemoryProperties() const noexcept {
VkPhysicalDeviceMemoryProperties properties;
dld->vkGetPhysicalDeviceMemoryProperties(physical_device, &properties);
return properties;
}
u32 AvailableVersion(const InstanceDispatch& dld) noexcept {
PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion;
if (!Proc(vkEnumerateInstanceVersion, dld, "vkEnumerateInstanceVersion")) {
// If the procedure is not found, Vulkan 1.0 is assumed
return VK_API_VERSION_1_0;
}
u32 version;
if (const VkResult result = vkEnumerateInstanceVersion(&version); result != VK_SUCCESS) {
LOG_ERROR(Render_Vulkan, "vkEnumerateInstanceVersion returned {}, assuming Vulkan 1.1",
ToString(result));
return VK_API_VERSION_1_1;
}
return version;
}
std::optional<std::vector<VkExtensionProperties>> EnumerateInstanceExtensionProperties(
const InstanceDispatch& dld) {
u32 num;
if (dld.vkEnumerateInstanceExtensionProperties(nullptr, &num, nullptr) != VK_SUCCESS) {
return std::nullopt;
}
std::vector<VkExtensionProperties> properties(num);
if (dld.vkEnumerateInstanceExtensionProperties(nullptr, &num, properties.data()) !=
VK_SUCCESS) {
return std::nullopt;
}
return properties;
}
std::optional<std::vector<VkLayerProperties>> EnumerateInstanceLayerProperties(
const InstanceDispatch& dld) {
u32 num;
if (dld.vkEnumerateInstanceLayerProperties(&num, nullptr) != VK_SUCCESS) {
return std::nullopt;
}
std::vector<VkLayerProperties> properties(num);
if (dld.vkEnumerateInstanceLayerProperties(&num, properties.data()) != VK_SUCCESS) {
return std::nullopt;
}
return properties;
}
} // namespace Vulkan::vk

File diff suppressed because it is too large Load Diff