early-access version 1866
This commit is contained in:
@@ -49,6 +49,16 @@ constexpr VkDescriptorSetLayoutCreateInfo ONE_TEXTURE_DESCRIPTOR_SET_LAYOUT_CREA
|
||||
.bindingCount = 1,
|
||||
.pBindings = &TEXTURE_DESCRIPTOR_SET_LAYOUT_BINDING<0>,
|
||||
};
|
||||
template <u32 num_textures>
|
||||
inline constexpr DescriptorBankInfo TEXTURE_DESCRIPTOR_BANK_INFO{
|
||||
.uniform_buffers = 0,
|
||||
.storage_buffers = 0,
|
||||
.texture_buffers = 0,
|
||||
.image_buffers = 0,
|
||||
.textures = num_textures,
|
||||
.images = 0,
|
||||
.score = 2,
|
||||
};
|
||||
constexpr VkDescriptorSetLayoutCreateInfo TWO_TEXTURES_DESCRIPTOR_SET_LAYOUT_CREATE_INFO{
|
||||
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
@@ -323,18 +333,19 @@ void BindBlitState(vk::CommandBuffer cmdbuf, VkPipelineLayout layout, const Regi
|
||||
cmdbuf.SetScissor(0, scissor);
|
||||
cmdbuf.PushConstants(layout, VK_SHADER_STAGE_VERTEX_BIT, push_constants);
|
||||
}
|
||||
|
||||
} // Anonymous namespace
|
||||
|
||||
BlitImageHelper::BlitImageHelper(const Device& device_, VKScheduler& scheduler_,
|
||||
StateTracker& state_tracker_, VKDescriptorPool& descriptor_pool)
|
||||
StateTracker& state_tracker_, DescriptorPool& descriptor_pool)
|
||||
: device{device_}, scheduler{scheduler_}, state_tracker{state_tracker_},
|
||||
one_texture_set_layout(device.GetLogical().CreateDescriptorSetLayout(
|
||||
ONE_TEXTURE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO)),
|
||||
two_textures_set_layout(device.GetLogical().CreateDescriptorSetLayout(
|
||||
TWO_TEXTURES_DESCRIPTOR_SET_LAYOUT_CREATE_INFO)),
|
||||
one_texture_descriptor_allocator(descriptor_pool, *one_texture_set_layout),
|
||||
two_textures_descriptor_allocator(descriptor_pool, *two_textures_set_layout),
|
||||
one_texture_descriptor_allocator{
|
||||
descriptor_pool.Allocator(*one_texture_set_layout, TEXTURE_DESCRIPTOR_BANK_INFO<1>)},
|
||||
two_textures_descriptor_allocator{
|
||||
descriptor_pool.Allocator(*two_textures_set_layout, TEXTURE_DESCRIPTOR_BANK_INFO<2>)},
|
||||
one_texture_pipeline_layout(device.GetLogical().CreatePipelineLayout(
|
||||
PipelineLayoutCreateInfo(one_texture_set_layout.address()))),
|
||||
two_textures_pipeline_layout(device.GetLogical().CreatePipelineLayout(
|
||||
@@ -362,7 +373,7 @@ void BlitImageHelper::BlitColor(const Framebuffer* dst_framebuffer, const ImageV
|
||||
.operation = operation,
|
||||
};
|
||||
const VkPipelineLayout layout = *one_texture_pipeline_layout;
|
||||
const VkImageView src_view = src_image_view.Handle(ImageViewType::e2D);
|
||||
const VkImageView src_view = src_image_view.Handle(Shader::TextureType::Color2D);
|
||||
const VkSampler sampler = is_linear ? *linear_sampler : *nearest_sampler;
|
||||
const VkPipeline pipeline = FindOrEmplacePipeline(key);
|
||||
const VkDescriptorSet descriptor_set = one_texture_descriptor_allocator.Commit();
|
||||
@@ -416,7 +427,6 @@ void BlitImageHelper::ConvertD32ToR32(const Framebuffer* dst_framebuffer,
|
||||
|
||||
void BlitImageHelper::ConvertR32ToD32(const Framebuffer* dst_framebuffer,
|
||||
const ImageView& src_image_view) {
|
||||
|
||||
ConvertColorToDepthPipeline(convert_r32_to_d32_pipeline, dst_framebuffer->RenderPass());
|
||||
Convert(*convert_r32_to_d32_pipeline, dst_framebuffer, src_image_view);
|
||||
}
|
||||
@@ -436,7 +446,7 @@ void BlitImageHelper::ConvertR16ToD16(const Framebuffer* dst_framebuffer,
|
||||
void BlitImageHelper::Convert(VkPipeline pipeline, const Framebuffer* dst_framebuffer,
|
||||
const ImageView& src_image_view) {
|
||||
const VkPipelineLayout layout = *one_texture_pipeline_layout;
|
||||
const VkImageView src_view = src_image_view.Handle(ImageViewType::e2D);
|
||||
const VkImageView src_view = src_image_view.Handle(Shader::TextureType::Color2D);
|
||||
const VkSampler sampler = *nearest_sampler;
|
||||
const VkDescriptorSet descriptor_set = one_texture_descriptor_allocator.Commit();
|
||||
const VkExtent2D extent{
|
||||
|
@@ -31,7 +31,7 @@ struct BlitImagePipelineKey {
|
||||
class BlitImageHelper {
|
||||
public:
|
||||
explicit BlitImageHelper(const Device& device, VKScheduler& scheduler,
|
||||
StateTracker& state_tracker, VKDescriptorPool& descriptor_pool);
|
||||
StateTracker& state_tracker, DescriptorPool& descriptor_pool);
|
||||
~BlitImageHelper();
|
||||
|
||||
void BlitColor(const Framebuffer* dst_framebuffer, const ImageView& src_image_view,
|
||||
|
@@ -15,9 +15,7 @@
|
||||
#include "video_core/renderer_vulkan/vk_state_tracker.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr size_t POINT = 0;
|
||||
constexpr size_t LINE = 1;
|
||||
constexpr size_t POLYGON = 2;
|
||||
@@ -39,10 +37,20 @@ constexpr std::array POLYGON_OFFSET_ENABLE_LUT = {
|
||||
POLYGON, // Patches
|
||||
};
|
||||
|
||||
void RefreshXfbState(VideoCommon::TransformFeedbackState& state, const Maxwell& regs) {
|
||||
std::ranges::transform(regs.tfb_layouts, state.layouts.begin(), [](const auto& layout) {
|
||||
return VideoCommon::TransformFeedbackState::Layout{
|
||||
.stream = layout.stream,
|
||||
.varying_count = layout.varying_count,
|
||||
.stride = layout.stride,
|
||||
};
|
||||
});
|
||||
state.varyings = regs.tfb_varying_locs;
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
|
||||
bool has_extended_dynamic_state) {
|
||||
bool has_extended_dynamic_state, bool has_dynamic_vertex_input) {
|
||||
const Maxwell& regs = maxwell3d.regs;
|
||||
const std::array enabled_lut{
|
||||
regs.polygon_offset_point_enable,
|
||||
@@ -52,6 +60,9 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
|
||||
const u32 topology_index = static_cast<u32>(regs.draw.topology.Value());
|
||||
|
||||
raw1 = 0;
|
||||
extended_dynamic_state.Assign(has_extended_dynamic_state ? 1 : 0);
|
||||
dynamic_vertex_input.Assign(has_dynamic_vertex_input ? 1 : 0);
|
||||
xfb_enabled.Assign(regs.tfb_enabled != 0);
|
||||
primitive_restart_enable.Assign(regs.primitive_restart.enabled != 0 ? 1 : 0);
|
||||
depth_bias_enable.Assign(enabled_lut[POLYGON_OFFSET_ENABLE_LUT[topology_index]] != 0 ? 1 : 0);
|
||||
depth_clamp_disabled.Assign(regs.view_volume_clip_control.depth_clamp_disabled.Value());
|
||||
@@ -63,37 +74,66 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
|
||||
tessellation_clockwise.Assign(regs.tess_mode.cw.Value());
|
||||
logic_op_enable.Assign(regs.logic_op.enable != 0 ? 1 : 0);
|
||||
logic_op.Assign(PackLogicOp(regs.logic_op.operation));
|
||||
rasterize_enable.Assign(regs.rasterize_enable != 0 ? 1 : 0);
|
||||
topology.Assign(regs.draw.topology);
|
||||
msaa_mode.Assign(regs.multisample_mode);
|
||||
|
||||
raw2 = 0;
|
||||
rasterize_enable.Assign(regs.rasterize_enable != 0 ? 1 : 0);
|
||||
const auto test_func =
|
||||
regs.alpha_test_enabled != 0 ? regs.alpha_test_func : Maxwell::ComparisonOp::Always;
|
||||
alpha_test_func.Assign(PackComparisonOp(test_func));
|
||||
early_z.Assign(regs.force_early_fragment_tests != 0 ? 1 : 0);
|
||||
depth_enabled.Assign(regs.zeta_enable != 0 ? 1 : 0);
|
||||
depth_format.Assign(static_cast<u32>(regs.zeta.format));
|
||||
y_negate.Assign(regs.screen_y_control.y_negate != 0 ? 1 : 0);
|
||||
provoking_vertex_last.Assign(regs.provoking_vertex_last != 0 ? 1 : 0);
|
||||
conservative_raster_enable.Assign(regs.conservative_raster_enable != 0 ? 1 : 0);
|
||||
smooth_lines.Assign(regs.line_smooth_enable != 0 ? 1 : 0);
|
||||
|
||||
for (size_t i = 0; i < regs.rt.size(); ++i) {
|
||||
color_formats[i] = static_cast<u8>(regs.rt[i].format);
|
||||
}
|
||||
alpha_test_ref = Common::BitCast<u32>(regs.alpha_test_ref);
|
||||
point_size = Common::BitCast<u32>(regs.point_size);
|
||||
|
||||
if (maxwell3d.dirty.flags[Dirty::InstanceDivisors]) {
|
||||
maxwell3d.dirty.flags[Dirty::InstanceDivisors] = false;
|
||||
for (size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
|
||||
const bool is_enabled = regs.instanced_arrays.IsInstancingEnabled(index);
|
||||
binding_divisors[index] = is_enabled ? regs.vertex_array[index].divisor : 0;
|
||||
}
|
||||
}
|
||||
if (maxwell3d.dirty.flags[Dirty::VertexAttributes]) {
|
||||
maxwell3d.dirty.flags[Dirty::VertexAttributes] = false;
|
||||
for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
|
||||
const auto& input = regs.vertex_attrib_format[index];
|
||||
auto& attribute = attributes[index];
|
||||
attribute.raw = 0;
|
||||
attribute.enabled.Assign(input.IsConstant() ? 0 : 1);
|
||||
attribute.buffer.Assign(input.buffer);
|
||||
attribute.offset.Assign(input.offset);
|
||||
attribute.type.Assign(static_cast<u32>(input.type.Value()));
|
||||
attribute.size.Assign(static_cast<u32>(input.size.Value()));
|
||||
if (maxwell3d.dirty.flags[Dirty::VertexInput]) {
|
||||
if (has_dynamic_vertex_input) {
|
||||
// Dirty flag will be reset by the command buffer update
|
||||
static constexpr std::array LUT{
|
||||
0u, // Invalid
|
||||
1u, // SignedNorm
|
||||
1u, // UnsignedNorm
|
||||
2u, // SignedInt
|
||||
3u, // UnsignedInt
|
||||
1u, // UnsignedScaled
|
||||
1u, // SignedScaled
|
||||
1u, // Float
|
||||
};
|
||||
const auto& attrs = regs.vertex_attrib_format;
|
||||
attribute_types = 0;
|
||||
for (size_t i = 0; i < Maxwell::NumVertexAttributes; ++i) {
|
||||
const u32 mask = attrs[i].constant != 0 ? 0 : 3;
|
||||
const u32 type = LUT[static_cast<size_t>(attrs[i].type.Value())];
|
||||
attribute_types |= static_cast<u64>(type & mask) << (i * 2);
|
||||
}
|
||||
} else {
|
||||
maxwell3d.dirty.flags[Dirty::VertexInput] = false;
|
||||
enabled_divisors = 0;
|
||||
for (size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
|
||||
const bool is_enabled = regs.instanced_arrays.IsInstancingEnabled(index);
|
||||
binding_divisors[index] = is_enabled ? regs.vertex_array[index].divisor : 0;
|
||||
enabled_divisors |= (is_enabled ? u64{1} : 0) << index;
|
||||
}
|
||||
for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
|
||||
const auto& input = regs.vertex_attrib_format[index];
|
||||
auto& attribute = attributes[index];
|
||||
attribute.raw = 0;
|
||||
attribute.enabled.Assign(input.constant ? 0 : 1);
|
||||
attribute.buffer.Assign(input.buffer);
|
||||
attribute.offset.Assign(input.offset);
|
||||
attribute.type.Assign(static_cast<u32>(input.type.Value()));
|
||||
attribute.size.Assign(static_cast<u32>(input.size.Value()));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (maxwell3d.dirty.flags[Dirty::Blending]) {
|
||||
@@ -109,10 +149,12 @@ void FixedPipelineState::Refresh(Tegra::Engines::Maxwell3D& maxwell3d,
|
||||
return static_cast<u16>(viewport.swizzle.raw);
|
||||
});
|
||||
}
|
||||
if (!has_extended_dynamic_state) {
|
||||
no_extended_dynamic_state.Assign(1);
|
||||
if (!extended_dynamic_state) {
|
||||
dynamic_state.Refresh(regs);
|
||||
}
|
||||
if (xfb_enabled) {
|
||||
RefreshXfbState(xfb_state, regs);
|
||||
}
|
||||
}
|
||||
|
||||
void FixedPipelineState::BlendingAttachment::Refresh(const Maxwell& regs, size_t index) {
|
||||
|
@@ -12,6 +12,7 @@
|
||||
|
||||
#include "video_core/engines/maxwell_3d.h"
|
||||
#include "video_core/surface.h"
|
||||
#include "video_core/transform_feedback.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
@@ -60,7 +61,7 @@ struct FixedPipelineState {
|
||||
|
||||
void Refresh(const Maxwell& regs, size_t index);
|
||||
|
||||
constexpr std::array<bool, 4> Mask() const noexcept {
|
||||
std::array<bool, 4> Mask() const noexcept {
|
||||
return {mask_r != 0, mask_g != 0, mask_b != 0, mask_a != 0};
|
||||
}
|
||||
|
||||
@@ -97,11 +98,11 @@ struct FixedPipelineState {
|
||||
BitField<20, 3, u32> type;
|
||||
BitField<23, 6, u32> size;
|
||||
|
||||
constexpr Maxwell::VertexAttribute::Type Type() const noexcept {
|
||||
Maxwell::VertexAttribute::Type Type() const noexcept {
|
||||
return static_cast<Maxwell::VertexAttribute::Type>(type.Value());
|
||||
}
|
||||
|
||||
constexpr Maxwell::VertexAttribute::Size Size() const noexcept {
|
||||
Maxwell::VertexAttribute::Size Size() const noexcept {
|
||||
return static_cast<Maxwell::VertexAttribute::Size>(size.Value());
|
||||
}
|
||||
};
|
||||
@@ -167,37 +168,53 @@ struct FixedPipelineState {
|
||||
|
||||
union {
|
||||
u32 raw1;
|
||||
BitField<0, 1, u32> no_extended_dynamic_state;
|
||||
BitField<2, 1, u32> primitive_restart_enable;
|
||||
BitField<3, 1, u32> depth_bias_enable;
|
||||
BitField<4, 1, u32> depth_clamp_disabled;
|
||||
BitField<5, 1, u32> ndc_minus_one_to_one;
|
||||
BitField<6, 2, u32> polygon_mode;
|
||||
BitField<8, 5, u32> patch_control_points_minus_one;
|
||||
BitField<13, 2, u32> tessellation_primitive;
|
||||
BitField<15, 2, u32> tessellation_spacing;
|
||||
BitField<17, 1, u32> tessellation_clockwise;
|
||||
BitField<18, 1, u32> logic_op_enable;
|
||||
BitField<19, 4, u32> logic_op;
|
||||
BitField<23, 1, u32> rasterize_enable;
|
||||
BitField<0, 1, u32> extended_dynamic_state;
|
||||
BitField<1, 1, u32> dynamic_vertex_input;
|
||||
BitField<2, 1, u32> xfb_enabled;
|
||||
BitField<3, 1, u32> primitive_restart_enable;
|
||||
BitField<4, 1, u32> depth_bias_enable;
|
||||
BitField<5, 1, u32> depth_clamp_disabled;
|
||||
BitField<6, 1, u32> ndc_minus_one_to_one;
|
||||
BitField<7, 2, u32> polygon_mode;
|
||||
BitField<9, 5, u32> patch_control_points_minus_one;
|
||||
BitField<14, 2, u32> tessellation_primitive;
|
||||
BitField<16, 2, u32> tessellation_spacing;
|
||||
BitField<18, 1, u32> tessellation_clockwise;
|
||||
BitField<19, 1, u32> logic_op_enable;
|
||||
BitField<20, 4, u32> logic_op;
|
||||
BitField<24, 4, Maxwell::PrimitiveTopology> topology;
|
||||
BitField<28, 4, Tegra::Texture::MsaaMode> msaa_mode;
|
||||
};
|
||||
union {
|
||||
u32 raw2;
|
||||
BitField<0, 3, u32> alpha_test_func;
|
||||
BitField<3, 1, u32> early_z;
|
||||
BitField<0, 1, u32> rasterize_enable;
|
||||
BitField<1, 3, u32> alpha_test_func;
|
||||
BitField<4, 1, u32> early_z;
|
||||
BitField<5, 1, u32> depth_enabled;
|
||||
BitField<6, 5, u32> depth_format;
|
||||
BitField<11, 1, u32> y_negate;
|
||||
BitField<12, 1, u32> provoking_vertex_last;
|
||||
BitField<13, 1, u32> conservative_raster_enable;
|
||||
BitField<14, 1, u32> smooth_lines;
|
||||
};
|
||||
std::array<u8, Maxwell::NumRenderTargets> color_formats;
|
||||
|
||||
u32 alpha_test_ref;
|
||||
u32 point_size;
|
||||
std::array<u32, Maxwell::NumVertexArrays> binding_divisors;
|
||||
std::array<VertexAttribute, Maxwell::NumVertexAttributes> attributes;
|
||||
std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments;
|
||||
std::array<u16, Maxwell::NumViewports> viewport_swizzles;
|
||||
DynamicState dynamic_state;
|
||||
union {
|
||||
u64 attribute_types; // Used with VK_EXT_vertex_input_dynamic_state
|
||||
u64 enabled_divisors;
|
||||
};
|
||||
std::array<VertexAttribute, Maxwell::NumVertexAttributes> attributes;
|
||||
std::array<u32, Maxwell::NumVertexArrays> binding_divisors;
|
||||
|
||||
void Refresh(Tegra::Engines::Maxwell3D& maxwell3d, bool has_extended_dynamic_state);
|
||||
DynamicState dynamic_state;
|
||||
VideoCommon::TransformFeedbackState xfb_state;
|
||||
|
||||
void Refresh(Tegra::Engines::Maxwell3D& maxwell3d, bool has_extended_dynamic_state,
|
||||
bool has_dynamic_vertex_input);
|
||||
|
||||
size_t Hash() const noexcept;
|
||||
|
||||
@@ -208,8 +225,24 @@ struct FixedPipelineState {
|
||||
}
|
||||
|
||||
size_t Size() const noexcept {
|
||||
const size_t total_size = sizeof *this;
|
||||
return total_size - (no_extended_dynamic_state != 0 ? 0 : sizeof(DynamicState));
|
||||
if (xfb_enabled) {
|
||||
// When transform feedback is enabled, use the whole struct
|
||||
return sizeof(*this);
|
||||
}
|
||||
if (dynamic_vertex_input) {
|
||||
// Exclude dynamic state and attributes
|
||||
return offsetof(FixedPipelineState, attributes);
|
||||
}
|
||||
if (extended_dynamic_state) {
|
||||
// Exclude dynamic state
|
||||
return offsetof(FixedPipelineState, dynamic_state);
|
||||
}
|
||||
// Default
|
||||
return offsetof(FixedPipelineState, xfb_state);
|
||||
}
|
||||
|
||||
u32 DynamicAttributeType(size_t index) const noexcept {
|
||||
return (attribute_types >> (index * 2)) & 0b11;
|
||||
}
|
||||
};
|
||||
static_assert(std::has_unique_object_representations_v<FixedPipelineState>);
|
||||
|
@@ -157,7 +157,7 @@ struct FormatTuple {
|
||||
{VK_FORMAT_R32_SFLOAT, Attachable | Storage}, // R32_FLOAT
|
||||
{VK_FORMAT_R16_SFLOAT, Attachable | Storage}, // R16_FLOAT
|
||||
{VK_FORMAT_R16_UNORM, Attachable | Storage}, // R16_UNORM
|
||||
{VK_FORMAT_UNDEFINED}, // R16_SNORM
|
||||
{VK_FORMAT_R16_SNORM, Attachable | Storage}, // R16_SNORM
|
||||
{VK_FORMAT_R16_UINT, Attachable | Storage}, // R16_UINT
|
||||
{VK_FORMAT_UNDEFINED}, // R16_SINT
|
||||
{VK_FORMAT_R16G16_UNORM, Attachable | Storage}, // R16G16_UNORM
|
||||
@@ -266,19 +266,20 @@ FormatInfo SurfaceFormat(const Device& device, FormatType format_type, bool with
|
||||
return {device.GetSupportedFormat(tuple.format, usage, format_type), attachable, storage};
|
||||
}
|
||||
|
||||
VkShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage) {
|
||||
VkShaderStageFlagBits ShaderStage(Shader::Stage stage) {
|
||||
switch (stage) {
|
||||
case Tegra::Engines::ShaderType::Vertex:
|
||||
case Shader::Stage::VertexA:
|
||||
case Shader::Stage::VertexB:
|
||||
return VK_SHADER_STAGE_VERTEX_BIT;
|
||||
case Tegra::Engines::ShaderType::TesselationControl:
|
||||
case Shader::Stage::TessellationControl:
|
||||
return VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
|
||||
case Tegra::Engines::ShaderType::TesselationEval:
|
||||
case Shader::Stage::TessellationEval:
|
||||
return VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
|
||||
case Tegra::Engines::ShaderType::Geometry:
|
||||
case Shader::Stage::Geometry:
|
||||
return VK_SHADER_STAGE_GEOMETRY_BIT;
|
||||
case Tegra::Engines::ShaderType::Fragment:
|
||||
case Shader::Stage::Fragment:
|
||||
return VK_SHADER_STAGE_FRAGMENT_BIT;
|
||||
case Tegra::Engines::ShaderType::Compute:
|
||||
case Shader::Stage::Compute:
|
||||
return VK_SHADER_STAGE_COMPUTE_BIT;
|
||||
}
|
||||
UNIMPLEMENTED_MSG("Unimplemented shader stage={}", stage);
|
||||
@@ -685,6 +686,19 @@ VkCullModeFlagBits CullFace(Maxwell::CullFace cull_face) {
|
||||
return {};
|
||||
}
|
||||
|
||||
VkPolygonMode PolygonMode(Maxwell::PolygonMode polygon_mode) {
|
||||
switch (polygon_mode) {
|
||||
case Maxwell::PolygonMode::Point:
|
||||
return VK_POLYGON_MODE_POINT;
|
||||
case Maxwell::PolygonMode::Line:
|
||||
return VK_POLYGON_MODE_LINE;
|
||||
case Maxwell::PolygonMode::Fill:
|
||||
return VK_POLYGON_MODE_FILL;
|
||||
}
|
||||
UNIMPLEMENTED_MSG("Unimplemented polygon mode={}", polygon_mode);
|
||||
return {};
|
||||
}
|
||||
|
||||
VkComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle) {
|
||||
switch (swizzle) {
|
||||
case Tegra::Texture::SwizzleSource::Zero:
|
||||
@@ -741,4 +755,28 @@ VkSamplerReductionMode SamplerReduction(Tegra::Texture::SamplerReduction reducti
|
||||
return VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT;
|
||||
}
|
||||
|
||||
VkSampleCountFlagBits MsaaMode(Tegra::Texture::MsaaMode msaa_mode) {
|
||||
switch (msaa_mode) {
|
||||
case Tegra::Texture::MsaaMode::Msaa1x1:
|
||||
return VK_SAMPLE_COUNT_1_BIT;
|
||||
case Tegra::Texture::MsaaMode::Msaa2x1:
|
||||
case Tegra::Texture::MsaaMode::Msaa2x1_D3D:
|
||||
return VK_SAMPLE_COUNT_2_BIT;
|
||||
case Tegra::Texture::MsaaMode::Msaa2x2:
|
||||
case Tegra::Texture::MsaaMode::Msaa2x2_VC4:
|
||||
case Tegra::Texture::MsaaMode::Msaa2x2_VC12:
|
||||
return VK_SAMPLE_COUNT_4_BIT;
|
||||
case Tegra::Texture::MsaaMode::Msaa4x2:
|
||||
case Tegra::Texture::MsaaMode::Msaa4x2_D3D:
|
||||
case Tegra::Texture::MsaaMode::Msaa4x2_VC8:
|
||||
case Tegra::Texture::MsaaMode::Msaa4x2_VC24:
|
||||
return VK_SAMPLE_COUNT_8_BIT;
|
||||
case Tegra::Texture::MsaaMode::Msaa4x4:
|
||||
return VK_SAMPLE_COUNT_16_BIT;
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid msaa_mode={}", static_cast<int>(msaa_mode));
|
||||
return VK_SAMPLE_COUNT_1_BIT;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Vulkan::MaxwellToVK
|
||||
|
@@ -5,6 +5,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "shader_recompiler/stage.h"
|
||||
#include "video_core/engines/maxwell_3d.h"
|
||||
#include "video_core/surface.h"
|
||||
#include "video_core/textures/texture.h"
|
||||
@@ -45,7 +46,7 @@ struct FormatInfo {
|
||||
[[nodiscard]] FormatInfo SurfaceFormat(const Device& device, FormatType format_type, bool with_srgb,
|
||||
PixelFormat pixel_format);
|
||||
|
||||
VkShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage);
|
||||
VkShaderStageFlagBits ShaderStage(Shader::Stage stage);
|
||||
|
||||
VkPrimitiveTopology PrimitiveTopology(const Device& device, Maxwell::PrimitiveTopology topology);
|
||||
|
||||
@@ -65,10 +66,14 @@ VkFrontFace FrontFace(Maxwell::FrontFace front_face);
|
||||
|
||||
VkCullModeFlagBits CullFace(Maxwell::CullFace cull_face);
|
||||
|
||||
VkPolygonMode PolygonMode(Maxwell::PolygonMode polygon_mode);
|
||||
|
||||
VkComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle);
|
||||
|
||||
VkViewportCoordinateSwizzleNV ViewportSwizzle(Maxwell::ViewportSwizzle swizzle);
|
||||
|
||||
VkSamplerReductionMode SamplerReduction(Tegra::Texture::SamplerReduction reduction);
|
||||
|
||||
VkSampleCountFlagBits MsaaMode(Tegra::Texture::MsaaMode msaa_mode);
|
||||
|
||||
} // namespace Vulkan::MaxwellToVK
|
||||
|
154
src/video_core/renderer_vulkan/pipeline_helper.h
Executable file
154
src/video_core/renderer_vulkan/pipeline_helper.h
Executable file
@@ -0,0 +1,154 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
|
||||
#include <boost/container/small_vector.hpp>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "shader_recompiler/shader_info.h"
|
||||
#include "video_core/renderer_vulkan/vk_texture_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
|
||||
#include "video_core/texture_cache/texture_cache.h"
|
||||
#include "video_core/texture_cache/types.h"
|
||||
#include "video_core/textures/texture.h"
|
||||
#include "video_core/vulkan_common/vulkan_device.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
class DescriptorLayoutBuilder {
|
||||
public:
|
||||
DescriptorLayoutBuilder(const Device& device_) : device{&device_} {}
|
||||
|
||||
bool CanUsePushDescriptor() const noexcept {
|
||||
return device->IsKhrPushDescriptorSupported() &&
|
||||
num_descriptors <= device->MaxPushDescriptors();
|
||||
}
|
||||
|
||||
vk::DescriptorSetLayout CreateDescriptorSetLayout(bool use_push_descriptor) const {
|
||||
if (bindings.empty()) {
|
||||
return nullptr;
|
||||
}
|
||||
const VkDescriptorSetLayoutCreateFlags flags =
|
||||
use_push_descriptor ? VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR : 0;
|
||||
return device->GetLogical().CreateDescriptorSetLayout({
|
||||
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = flags,
|
||||
.bindingCount = static_cast<u32>(bindings.size()),
|
||||
.pBindings = bindings.data(),
|
||||
});
|
||||
}
|
||||
|
||||
vk::DescriptorUpdateTemplateKHR CreateTemplate(VkDescriptorSetLayout descriptor_set_layout,
|
||||
VkPipelineLayout pipeline_layout,
|
||||
bool use_push_descriptor) const {
|
||||
if (entries.empty()) {
|
||||
return nullptr;
|
||||
}
|
||||
const VkDescriptorUpdateTemplateType type =
|
||||
use_push_descriptor ? VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR
|
||||
: VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR;
|
||||
return device->GetLogical().CreateDescriptorUpdateTemplateKHR({
|
||||
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.descriptorUpdateEntryCount = static_cast<u32>(entries.size()),
|
||||
.pDescriptorUpdateEntries = entries.data(),
|
||||
.templateType = type,
|
||||
.descriptorSetLayout = descriptor_set_layout,
|
||||
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
|
||||
.pipelineLayout = pipeline_layout,
|
||||
.set = 0,
|
||||
});
|
||||
}
|
||||
|
||||
vk::PipelineLayout CreatePipelineLayout(VkDescriptorSetLayout descriptor_set_layout) const {
|
||||
return device->GetLogical().CreatePipelineLayout({
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.setLayoutCount = descriptor_set_layout ? 1U : 0U,
|
||||
.pSetLayouts = bindings.empty() ? nullptr : &descriptor_set_layout,
|
||||
.pushConstantRangeCount = 0,
|
||||
.pPushConstantRanges = nullptr,
|
||||
});
|
||||
}
|
||||
|
||||
void Add(const Shader::Info& info, VkShaderStageFlags stage) {
|
||||
Add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, stage, info.constant_buffer_descriptors);
|
||||
Add(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, stage, info.storage_buffers_descriptors);
|
||||
Add(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, stage, info.texture_buffer_descriptors);
|
||||
Add(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, stage, info.image_buffer_descriptors);
|
||||
Add(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, stage, info.texture_descriptors);
|
||||
Add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, stage, info.image_descriptors);
|
||||
}
|
||||
|
||||
private:
|
||||
template <typename Descriptors>
|
||||
void Add(VkDescriptorType type, VkShaderStageFlags stage, const Descriptors& descriptors) {
|
||||
const size_t num{descriptors.size()};
|
||||
for (size_t i = 0; i < num; ++i) {
|
||||
bindings.push_back({
|
||||
.binding = binding,
|
||||
.descriptorType = type,
|
||||
.descriptorCount = descriptors[i].count,
|
||||
.stageFlags = stage,
|
||||
.pImmutableSamplers = nullptr,
|
||||
});
|
||||
entries.push_back({
|
||||
.dstBinding = binding,
|
||||
.dstArrayElement = 0,
|
||||
.descriptorCount = descriptors[i].count,
|
||||
.descriptorType = type,
|
||||
.offset = offset,
|
||||
.stride = sizeof(DescriptorUpdateEntry),
|
||||
});
|
||||
++binding;
|
||||
num_descriptors += descriptors[i].count;
|
||||
offset += sizeof(DescriptorUpdateEntry);
|
||||
}
|
||||
}
|
||||
|
||||
const Device* device{};
|
||||
boost::container::small_vector<VkDescriptorSetLayoutBinding, 32> bindings;
|
||||
boost::container::small_vector<VkDescriptorUpdateTemplateEntryKHR, 32> entries;
|
||||
u32 binding{};
|
||||
u32 num_descriptors{};
|
||||
size_t offset{};
|
||||
};
|
||||
|
||||
inline void PushImageDescriptors(const Shader::Info& info, const VkSampler*& samplers,
|
||||
const ImageId*& image_view_ids, TextureCache& texture_cache,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue) {
|
||||
for (const auto& desc : info.texture_buffer_descriptors) {
|
||||
image_view_ids += desc.count;
|
||||
}
|
||||
for (const auto& desc : info.image_buffer_descriptors) {
|
||||
image_view_ids += desc.count;
|
||||
}
|
||||
for (const auto& desc : info.texture_descriptors) {
|
||||
for (u32 index = 0; index < desc.count; ++index) {
|
||||
const VkSampler sampler{*(samplers++)};
|
||||
ImageView& image_view{texture_cache.GetImageView(*(image_view_ids++))};
|
||||
const VkImageView vk_image_view{image_view.Handle(desc.type)};
|
||||
update_descriptor_queue.AddSampledImage(vk_image_view, sampler);
|
||||
}
|
||||
}
|
||||
for (const auto& desc : info.image_descriptors) {
|
||||
for (u32 index = 0; index < desc.count; ++index) {
|
||||
ImageView& image_view{texture_cache.GetImageView(*(image_view_ids++))};
|
||||
if (desc.is_written) {
|
||||
texture_cache.MarkModification(image_view.image_id);
|
||||
}
|
||||
const VkImageView vk_image_view{image_view.StorageView(desc.type, desc.format)};
|
||||
update_descriptor_queue.AddImage(vk_image_view);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
@@ -97,19 +97,14 @@ RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_,
|
||||
Core::Frontend::EmuWindow& emu_window,
|
||||
Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_,
|
||||
std::unique_ptr<Core::Frontend::GraphicsContext> context_) try
|
||||
: RendererBase(emu_window, std::move(context_)),
|
||||
telemetry_session(telemetry_session_),
|
||||
cpu_memory(cpu_memory_),
|
||||
gpu(gpu_),
|
||||
library(OpenLibrary()),
|
||||
: RendererBase(emu_window, std::move(context_)), telemetry_session(telemetry_session_),
|
||||
cpu_memory(cpu_memory_), gpu(gpu_), library(OpenLibrary()),
|
||||
instance(CreateInstance(library, dld, VK_API_VERSION_1_1, render_window.GetWindowInfo().type,
|
||||
true, Settings::values.renderer_debug.GetValue())),
|
||||
debug_callback(Settings::values.renderer_debug ? CreateDebugCallback(instance) : nullptr),
|
||||
surface(CreateSurface(instance, render_window)),
|
||||
device(CreateDevice(instance, dld, *surface)),
|
||||
memory_allocator(device, false),
|
||||
state_tracker(gpu),
|
||||
scheduler(device, state_tracker),
|
||||
device(CreateDevice(instance, dld, *surface)), memory_allocator(device, false),
|
||||
state_tracker(gpu), scheduler(device, state_tracker),
|
||||
swapchain(*surface, device, scheduler, render_window.GetFramebufferLayout().width,
|
||||
render_window.GetFramebufferLayout().height, false),
|
||||
blit_screen(cpu_memory, render_window, device, memory_allocator, swapchain, scheduler,
|
||||
@@ -130,35 +125,45 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
|
||||
if (!framebuffer) {
|
||||
return;
|
||||
}
|
||||
const auto& layout = render_window.GetFramebufferLayout();
|
||||
if (layout.width > 0 && layout.height > 0 && render_window.IsShown()) {
|
||||
const VAddr framebuffer_addr = framebuffer->address + framebuffer->offset;
|
||||
const bool use_accelerated =
|
||||
rasterizer.AccelerateDisplay(*framebuffer, framebuffer_addr, framebuffer->stride);
|
||||
const bool is_srgb = use_accelerated && screen_info.is_srgb;
|
||||
if (swapchain.HasFramebufferChanged(layout) || swapchain.GetSrgbState() != is_srgb) {
|
||||
swapchain.Create(layout.width, layout.height, is_srgb);
|
||||
blit_screen.Recreate();
|
||||
}
|
||||
|
||||
scheduler.WaitWorker();
|
||||
|
||||
while (!swapchain.AcquireNextImage()) {
|
||||
swapchain.Create(layout.width, layout.height, is_srgb);
|
||||
blit_screen.Recreate();
|
||||
}
|
||||
const VkSemaphore render_semaphore = blit_screen.Draw(*framebuffer, use_accelerated);
|
||||
|
||||
scheduler.Flush(render_semaphore);
|
||||
|
||||
if (swapchain.Present(render_semaphore)) {
|
||||
blit_screen.Recreate();
|
||||
}
|
||||
gpu.RendererFrameEndNotify();
|
||||
rasterizer.TickFrame();
|
||||
SCOPE_EXIT({ render_window.OnFrameDisplayed(); });
|
||||
if (!render_window.IsShown()) {
|
||||
return;
|
||||
}
|
||||
const VAddr framebuffer_addr = framebuffer->address + framebuffer->offset;
|
||||
const bool use_accelerated =
|
||||
rasterizer.AccelerateDisplay(*framebuffer, framebuffer_addr, framebuffer->stride);
|
||||
const bool is_srgb = use_accelerated && screen_info.is_srgb;
|
||||
|
||||
render_window.OnFrameDisplayed();
|
||||
bool has_been_recreated = false;
|
||||
const auto recreate_swapchain = [&] {
|
||||
if (!has_been_recreated) {
|
||||
has_been_recreated = true;
|
||||
scheduler.WaitWorker();
|
||||
}
|
||||
const Layout::FramebufferLayout layout = render_window.GetFramebufferLayout();
|
||||
swapchain.Create(layout.width, layout.height, is_srgb);
|
||||
};
|
||||
if (swapchain.IsSubOptimal() || swapchain.HasColorSpaceChanged(is_srgb)) {
|
||||
recreate_swapchain();
|
||||
}
|
||||
bool is_outdated;
|
||||
do {
|
||||
swapchain.AcquireNextImage();
|
||||
is_outdated = swapchain.IsOutDated();
|
||||
if (is_outdated) {
|
||||
recreate_swapchain();
|
||||
}
|
||||
} while (is_outdated);
|
||||
if (has_been_recreated) {
|
||||
blit_screen.Recreate();
|
||||
}
|
||||
const VkSemaphore render_semaphore = blit_screen.Draw(*framebuffer, use_accelerated);
|
||||
scheduler.Flush(render_semaphore);
|
||||
scheduler.WaitWorker();
|
||||
swapchain.Present(render_semaphore);
|
||||
|
||||
gpu.RendererFrameEndNotify();
|
||||
rasterizer.TickFrame();
|
||||
}
|
||||
|
||||
void RendererVulkan::Report() const {
|
||||
|
@@ -184,55 +184,51 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
|
||||
.depth = 1,
|
||||
},
|
||||
};
|
||||
scheduler.Record(
|
||||
[buffer = *buffer, image = *raw_images[image_index], copy](vk::CommandBuffer cmdbuf) {
|
||||
const VkImageMemoryBarrier base_barrier{
|
||||
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
|
||||
.pNext = nullptr,
|
||||
.srcAccessMask = 0,
|
||||
.dstAccessMask = 0,
|
||||
.oldLayout = VK_IMAGE_LAYOUT_GENERAL,
|
||||
.newLayout = VK_IMAGE_LAYOUT_GENERAL,
|
||||
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||
.image = image,
|
||||
.subresourceRange =
|
||||
{
|
||||
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
|
||||
.baseMipLevel = 0,
|
||||
.levelCount = 1,
|
||||
.baseArrayLayer = 0,
|
||||
.layerCount = 1,
|
||||
},
|
||||
};
|
||||
VkImageMemoryBarrier read_barrier = base_barrier;
|
||||
read_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
|
||||
read_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
|
||||
read_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
|
||||
scheduler.Record([this, copy, image_index](vk::CommandBuffer cmdbuf) {
|
||||
const VkImage image = *raw_images[image_index];
|
||||
const VkImageMemoryBarrier base_barrier{
|
||||
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
|
||||
.pNext = nullptr,
|
||||
.srcAccessMask = 0,
|
||||
.dstAccessMask = 0,
|
||||
.oldLayout = VK_IMAGE_LAYOUT_GENERAL,
|
||||
.newLayout = VK_IMAGE_LAYOUT_GENERAL,
|
||||
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||
.image = image,
|
||||
.subresourceRange{
|
||||
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
|
||||
.baseMipLevel = 0,
|
||||
.levelCount = 1,
|
||||
.baseArrayLayer = 0,
|
||||
.layerCount = 1,
|
||||
},
|
||||
};
|
||||
VkImageMemoryBarrier read_barrier = base_barrier;
|
||||
read_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
|
||||
read_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
|
||||
read_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
|
||||
|
||||
VkImageMemoryBarrier write_barrier = base_barrier;
|
||||
write_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
|
||||
write_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
|
||||
VkImageMemoryBarrier write_barrier = base_barrier;
|
||||
write_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
|
||||
write_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
|
||||
|
||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
0, read_barrier);
|
||||
cmdbuf.CopyBufferToImage(buffer, image, VK_IMAGE_LAYOUT_GENERAL, copy);
|
||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, write_barrier);
|
||||
});
|
||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0,
|
||||
read_barrier);
|
||||
cmdbuf.CopyBufferToImage(*buffer, image, VK_IMAGE_LAYOUT_GENERAL, copy);
|
||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, write_barrier);
|
||||
});
|
||||
}
|
||||
scheduler.Record([renderpass = *renderpass, framebuffer = *framebuffers[image_index],
|
||||
descriptor_set = descriptor_sets[image_index], buffer = *buffer,
|
||||
size = swapchain.GetSize(), pipeline = *pipeline,
|
||||
layout = *pipeline_layout](vk::CommandBuffer cmdbuf) {
|
||||
scheduler.Record([this, image_index, size = swapchain.GetSize()](vk::CommandBuffer cmdbuf) {
|
||||
const VkClearValue clear_color{
|
||||
.color = {.float32 = {0.0f, 0.0f, 0.0f, 0.0f}},
|
||||
};
|
||||
const VkRenderPassBeginInfo renderpass_bi{
|
||||
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
|
||||
.pNext = nullptr,
|
||||
.renderPass = renderpass,
|
||||
.framebuffer = framebuffer,
|
||||
.renderPass = *renderpass,
|
||||
.framebuffer = *framebuffers[image_index],
|
||||
.renderArea =
|
||||
{
|
||||
.offset = {0, 0},
|
||||
@@ -254,12 +250,13 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
|
||||
.extent = size,
|
||||
};
|
||||
cmdbuf.BeginRenderPass(renderpass_bi, VK_SUBPASS_CONTENTS_INLINE);
|
||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
|
||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
|
||||
cmdbuf.SetViewport(0, viewport);
|
||||
cmdbuf.SetScissor(0, scissor);
|
||||
|
||||
cmdbuf.BindVertexBuffer(0, buffer, offsetof(BufferData, vertices));
|
||||
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, layout, 0, descriptor_set, {});
|
||||
cmdbuf.BindVertexBuffer(0, *buffer, offsetof(BufferData, vertices));
|
||||
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline_layout, 0,
|
||||
descriptor_sets[image_index], {});
|
||||
cmdbuf.Draw(4, 1, 0, 0);
|
||||
cmdbuf.EndRenderPass();
|
||||
});
|
||||
@@ -301,8 +298,7 @@ void VKBlitScreen::CreateShaders() {
|
||||
|
||||
void VKBlitScreen::CreateSemaphores() {
|
||||
semaphores.resize(image_count);
|
||||
std::generate(semaphores.begin(), semaphores.end(),
|
||||
[this] { return device.GetLogical().CreateSemaphore(); });
|
||||
std::ranges::generate(semaphores, [this] { return device.GetLogical().CreateSemaphore(); });
|
||||
}
|
||||
|
||||
void VKBlitScreen::CreateDescriptorPool() {
|
||||
@@ -630,8 +626,8 @@ void VKBlitScreen::CreateFramebuffers() {
|
||||
}
|
||||
|
||||
void VKBlitScreen::ReleaseRawImages() {
|
||||
for (std::size_t i = 0; i < raw_images.size(); ++i) {
|
||||
scheduler.Wait(resource_ticks.at(i));
|
||||
for (const u64 tick : resource_ticks) {
|
||||
scheduler.Wait(tick);
|
||||
}
|
||||
raw_images.clear();
|
||||
raw_buffer_commits.clear();
|
||||
|
@@ -60,6 +60,27 @@ std::array<T, 6> MakeQuadIndices(u32 quad, u32 first) {
|
||||
}
|
||||
return indices;
|
||||
}
|
||||
|
||||
vk::Buffer CreateBuffer(const Device& device, u64 size) {
|
||||
VkBufferUsageFlags flags =
|
||||
VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
|
||||
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT |
|
||||
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
|
||||
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
|
||||
if (device.IsExtTransformFeedbackSupported()) {
|
||||
flags |= VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT;
|
||||
}
|
||||
return device.GetLogical().CreateBuffer({
|
||||
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.size = size,
|
||||
.usage = flags,
|
||||
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||
.queueFamilyIndexCount = 0,
|
||||
.pQueueFamilyIndices = nullptr,
|
||||
});
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
Buffer::Buffer(BufferCacheRuntime&, VideoCommon::NullBufferParams null_params)
|
||||
@@ -67,31 +88,46 @@ Buffer::Buffer(BufferCacheRuntime&, VideoCommon::NullBufferParams null_params)
|
||||
|
||||
Buffer::Buffer(BufferCacheRuntime& runtime, VideoCore::RasterizerInterface& rasterizer_,
|
||||
VAddr cpu_addr_, u64 size_bytes_)
|
||||
: VideoCommon::BufferBase<VideoCore::RasterizerInterface>(rasterizer_, cpu_addr_, size_bytes_) {
|
||||
buffer = runtime.device.GetLogical().CreateBuffer(VkBufferCreateInfo{
|
||||
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.size = SizeBytes(),
|
||||
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
|
||||
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT |
|
||||
VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
|
||||
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
|
||||
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
|
||||
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||
.queueFamilyIndexCount = 0,
|
||||
.pQueueFamilyIndices = nullptr,
|
||||
});
|
||||
: VideoCommon::BufferBase<VideoCore::RasterizerInterface>(rasterizer_, cpu_addr_, size_bytes_),
|
||||
device{&runtime.device}, buffer{CreateBuffer(*device, SizeBytes())},
|
||||
commit{runtime.memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal)} {
|
||||
if (runtime.device.HasDebuggingToolAttached()) {
|
||||
buffer.SetObjectNameEXT(fmt::format("Buffer 0x{:x}", CpuAddr()).c_str());
|
||||
}
|
||||
commit = runtime.memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal);
|
||||
}
|
||||
|
||||
VkBufferView Buffer::View(u32 offset, u32 size, VideoCore::Surface::PixelFormat format) {
|
||||
if (!device) {
|
||||
// Null buffer, return a null descriptor
|
||||
return VK_NULL_HANDLE;
|
||||
}
|
||||
const auto it{std::ranges::find_if(views, [offset, size, format](const BufferView& view) {
|
||||
return offset == view.offset && size == view.size && format == view.format;
|
||||
})};
|
||||
if (it != views.end()) {
|
||||
return *it->handle;
|
||||
}
|
||||
views.push_back({
|
||||
.offset = offset,
|
||||
.size = size,
|
||||
.format = format,
|
||||
.handle = device->GetLogical().CreateBufferView({
|
||||
.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.buffer = *buffer,
|
||||
.format = MaxwellToVK::SurfaceFormat(*device, FormatType::Buffer, false, format).format,
|
||||
.offset = offset,
|
||||
.range = size,
|
||||
}),
|
||||
});
|
||||
return *views.back().handle;
|
||||
}
|
||||
|
||||
BufferCacheRuntime::BufferCacheRuntime(const Device& device_, MemoryAllocator& memory_allocator_,
|
||||
VKScheduler& scheduler_, StagingBufferPool& staging_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
||||
VKDescriptorPool& descriptor_pool)
|
||||
DescriptorPool& descriptor_pool)
|
||||
: device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_},
|
||||
staging_pool{staging_pool_}, update_descriptor_queue{update_descriptor_queue_},
|
||||
uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
|
||||
|
@@ -9,13 +9,14 @@
|
||||
#include "video_core/renderer_vulkan/vk_compute_pass.h"
|
||||
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
|
||||
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
|
||||
#include "video_core/surface.h"
|
||||
#include "video_core/vulkan_common/vulkan_memory_allocator.h"
|
||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
class Device;
|
||||
class VKDescriptorPool;
|
||||
class DescriptorPool;
|
||||
class VKScheduler;
|
||||
|
||||
class BufferCacheRuntime;
|
||||
@@ -26,6 +27,8 @@ public:
|
||||
explicit Buffer(BufferCacheRuntime& runtime, VideoCore::RasterizerInterface& rasterizer_,
|
||||
VAddr cpu_addr_, u64 size_bytes_);
|
||||
|
||||
[[nodiscard]] VkBufferView View(u32 offset, u32 size, VideoCore::Surface::PixelFormat format);
|
||||
|
||||
[[nodiscard]] VkBuffer Handle() const noexcept {
|
||||
return *buffer;
|
||||
}
|
||||
@@ -35,8 +38,17 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
struct BufferView {
|
||||
u32 offset;
|
||||
u32 size;
|
||||
VideoCore::Surface::PixelFormat format;
|
||||
vk::BufferView handle;
|
||||
};
|
||||
|
||||
const Device* device{};
|
||||
vk::Buffer buffer;
|
||||
MemoryCommit commit;
|
||||
std::vector<BufferView> views;
|
||||
};
|
||||
|
||||
class BufferCacheRuntime {
|
||||
@@ -49,7 +61,7 @@ public:
|
||||
explicit BufferCacheRuntime(const Device& device_, MemoryAllocator& memory_manager_,
|
||||
VKScheduler& scheduler_, StagingBufferPool& staging_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
||||
VKDescriptorPool& descriptor_pool);
|
||||
DescriptorPool& descriptor_pool);
|
||||
|
||||
void Finish();
|
||||
|
||||
@@ -85,6 +97,11 @@ public:
|
||||
BindBuffer(buffer, offset, size);
|
||||
}
|
||||
|
||||
void BindTextureBuffer(Buffer& buffer, u32 offset, u32 size,
|
||||
VideoCore::Surface::PixelFormat format) {
|
||||
update_descriptor_queue.AddTexelBuffer(buffer.View(offset, size, format));
|
||||
}
|
||||
|
||||
private:
|
||||
void BindBuffer(VkBuffer buffer, u32 offset, u32 size) {
|
||||
update_descriptor_queue.AddBuffer(buffer, offset, size);
|
||||
@@ -122,6 +139,7 @@ struct BufferCacheParams {
|
||||
static constexpr bool NEEDS_BIND_UNIFORM_INDEX = false;
|
||||
static constexpr bool NEEDS_BIND_STORAGE_INDEX = false;
|
||||
static constexpr bool USE_MEMORY_MAPS = true;
|
||||
static constexpr bool SEPARATE_IMAGE_BUFFER_BINDINGS = false;
|
||||
};
|
||||
|
||||
using BufferCache = VideoCommon::BufferCache<BufferCacheParams>;
|
||||
|
@@ -41,80 +41,92 @@ constexpr u32 ASTC_BINDING_SWIZZLE_BUFFER = 2;
|
||||
constexpr u32 ASTC_BINDING_OUTPUT_IMAGE = 3;
|
||||
constexpr size_t ASTC_NUM_BINDINGS = 4;
|
||||
|
||||
VkPushConstantRange BuildComputePushConstantRange(std::size_t size) {
|
||||
return {
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.offset = 0,
|
||||
.size = static_cast<u32>(size),
|
||||
};
|
||||
}
|
||||
template <size_t size>
|
||||
inline constexpr VkPushConstantRange COMPUTE_PUSH_CONSTANT_RANGE{
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.offset = 0,
|
||||
.size = static_cast<u32>(size),
|
||||
};
|
||||
|
||||
std::array<VkDescriptorSetLayoutBinding, 2> BuildInputOutputDescriptorSetBindings() {
|
||||
return {{
|
||||
{
|
||||
.binding = 0,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||
.descriptorCount = 1,
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.pImmutableSamplers = nullptr,
|
||||
},
|
||||
{
|
||||
.binding = 1,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||
.descriptorCount = 1,
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.pImmutableSamplers = nullptr,
|
||||
},
|
||||
}};
|
||||
}
|
||||
|
||||
std::array<VkDescriptorSetLayoutBinding, ASTC_NUM_BINDINGS> BuildASTCDescriptorSetBindings() {
|
||||
return {{
|
||||
{
|
||||
.binding = ASTC_BINDING_INPUT_BUFFER,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||
.descriptorCount = 1,
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.pImmutableSamplers = nullptr,
|
||||
},
|
||||
{
|
||||
.binding = ASTC_BINDING_ENC_BUFFER,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||
.descriptorCount = 1,
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.pImmutableSamplers = nullptr,
|
||||
},
|
||||
{
|
||||
.binding = ASTC_BINDING_SWIZZLE_BUFFER,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||
.descriptorCount = 1,
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.pImmutableSamplers = nullptr,
|
||||
},
|
||||
{
|
||||
.binding = ASTC_BINDING_OUTPUT_IMAGE,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
|
||||
.descriptorCount = 1,
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.pImmutableSamplers = nullptr,
|
||||
},
|
||||
}};
|
||||
}
|
||||
|
||||
VkDescriptorUpdateTemplateEntryKHR BuildInputOutputDescriptorUpdateTemplate() {
|
||||
return {
|
||||
.dstBinding = 0,
|
||||
.dstArrayElement = 0,
|
||||
.descriptorCount = 2,
|
||||
constexpr std::array<VkDescriptorSetLayoutBinding, 2> INPUT_OUTPUT_DESCRIPTOR_SET_BINDINGS{{
|
||||
{
|
||||
.binding = 0,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||
.offset = 0,
|
||||
.stride = sizeof(DescriptorUpdateEntry),
|
||||
};
|
||||
}
|
||||
.descriptorCount = 1,
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.pImmutableSamplers = nullptr,
|
||||
},
|
||||
{
|
||||
.binding = 1,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||
.descriptorCount = 1,
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.pImmutableSamplers = nullptr,
|
||||
},
|
||||
}};
|
||||
|
||||
std::array<VkDescriptorUpdateTemplateEntryKHR, ASTC_NUM_BINDINGS>
|
||||
BuildASTCPassDescriptorUpdateTemplateEntry() {
|
||||
return {{
|
||||
constexpr DescriptorBankInfo INPUT_OUTPUT_BANK_INFO{
|
||||
.uniform_buffers = 0,
|
||||
.storage_buffers = 2,
|
||||
.texture_buffers = 0,
|
||||
.image_buffers = 0,
|
||||
.textures = 0,
|
||||
.images = 0,
|
||||
.score = 2,
|
||||
};
|
||||
|
||||
constexpr std::array<VkDescriptorSetLayoutBinding, 4> ASTC_DESCRIPTOR_SET_BINDINGS{{
|
||||
{
|
||||
.binding = ASTC_BINDING_INPUT_BUFFER,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||
.descriptorCount = 1,
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.pImmutableSamplers = nullptr,
|
||||
},
|
||||
{
|
||||
.binding = ASTC_BINDING_ENC_BUFFER,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||
.descriptorCount = 1,
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.pImmutableSamplers = nullptr,
|
||||
},
|
||||
{
|
||||
.binding = ASTC_BINDING_SWIZZLE_BUFFER,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||
.descriptorCount = 1,
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.pImmutableSamplers = nullptr,
|
||||
},
|
||||
{
|
||||
.binding = ASTC_BINDING_OUTPUT_IMAGE,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
|
||||
.descriptorCount = 1,
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.pImmutableSamplers = nullptr,
|
||||
},
|
||||
}};
|
||||
|
||||
constexpr DescriptorBankInfo ASTC_BANK_INFO{
|
||||
.uniform_buffers = 0,
|
||||
.storage_buffers = 3,
|
||||
.texture_buffers = 0,
|
||||
.image_buffers = 0,
|
||||
.textures = 0,
|
||||
.images = 1,
|
||||
.score = 4,
|
||||
};
|
||||
|
||||
constexpr VkDescriptorUpdateTemplateEntryKHR INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE{
|
||||
.dstBinding = 0,
|
||||
.dstArrayElement = 0,
|
||||
.descriptorCount = 2,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||
.offset = 0,
|
||||
.stride = sizeof(DescriptorUpdateEntry),
|
||||
};
|
||||
|
||||
constexpr std::array<VkDescriptorUpdateTemplateEntryKHR, ASTC_NUM_BINDINGS>
|
||||
ASTC_PASS_DESCRIPTOR_UPDATE_TEMPLATE_ENTRY{{
|
||||
{
|
||||
.dstBinding = ASTC_BINDING_INPUT_BUFFER,
|
||||
.dstArrayElement = 0,
|
||||
@@ -148,7 +160,6 @@ BuildASTCPassDescriptorUpdateTemplateEntry() {
|
||||
.stride = sizeof(DescriptorUpdateEntry),
|
||||
},
|
||||
}};
|
||||
}
|
||||
|
||||
struct AstcPushConstants {
|
||||
std::array<u32, 2> blocks_dims;
|
||||
@@ -159,14 +170,14 @@ struct AstcPushConstants {
|
||||
u32 block_height;
|
||||
u32 block_height_mask;
|
||||
};
|
||||
|
||||
} // Anonymous namespace
|
||||
|
||||
VKComputePass::VKComputePass(const Device& device, VKDescriptorPool& descriptor_pool,
|
||||
vk::Span<VkDescriptorSetLayoutBinding> bindings,
|
||||
vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
|
||||
vk::Span<VkPushConstantRange> push_constants,
|
||||
std::span<const u32> code) {
|
||||
ComputePass::ComputePass(const Device& device_, DescriptorPool& descriptor_pool,
|
||||
vk::Span<VkDescriptorSetLayoutBinding> bindings,
|
||||
vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
|
||||
const DescriptorBankInfo& bank_info,
|
||||
vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code)
|
||||
: device{device_} {
|
||||
descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout({
|
||||
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
@@ -196,8 +207,7 @@ VKComputePass::VKComputePass(const Device& device, VKDescriptorPool& descriptor_
|
||||
.pipelineLayout = *layout,
|
||||
.set = 0,
|
||||
});
|
||||
|
||||
descriptor_allocator.emplace(descriptor_pool, *descriptor_set_layout);
|
||||
descriptor_allocator = descriptor_pool.Allocator(*descriptor_set_layout, bank_info);
|
||||
}
|
||||
module = device.GetLogical().CreateShaderModule({
|
||||
.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
|
||||
@@ -206,43 +216,34 @@ VKComputePass::VKComputePass(const Device& device, VKDescriptorPool& descriptor_
|
||||
.codeSize = static_cast<u32>(code.size_bytes()),
|
||||
.pCode = code.data(),
|
||||
});
|
||||
device.SaveShader(code);
|
||||
pipeline = device.GetLogical().CreateComputePipeline({
|
||||
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.stage =
|
||||
{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.module = *module,
|
||||
.pName = "main",
|
||||
.pSpecializationInfo = nullptr,
|
||||
},
|
||||
.stage{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.module = *module,
|
||||
.pName = "main",
|
||||
.pSpecializationInfo = nullptr,
|
||||
},
|
||||
.layout = *layout,
|
||||
.basePipelineHandle = nullptr,
|
||||
.basePipelineIndex = 0,
|
||||
});
|
||||
}
|
||||
|
||||
VKComputePass::~VKComputePass() = default;
|
||||
ComputePass::~ComputePass() = default;
|
||||
|
||||
VkDescriptorSet VKComputePass::CommitDescriptorSet(
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue) {
|
||||
if (!descriptor_template) {
|
||||
return nullptr;
|
||||
}
|
||||
const VkDescriptorSet set = descriptor_allocator->Commit();
|
||||
update_descriptor_queue.Send(*descriptor_template, set);
|
||||
return set;
|
||||
}
|
||||
|
||||
Uint8Pass::Uint8Pass(const Device& device, VKScheduler& scheduler_,
|
||||
VKDescriptorPool& descriptor_pool, StagingBufferPool& staging_buffer_pool_,
|
||||
Uint8Pass::Uint8Pass(const Device& device_, VKScheduler& scheduler_,
|
||||
DescriptorPool& descriptor_pool, StagingBufferPool& staging_buffer_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_)
|
||||
: VKComputePass(device, descriptor_pool, BuildInputOutputDescriptorSetBindings(),
|
||||
BuildInputOutputDescriptorUpdateTemplate(), {}, VULKAN_UINT8_COMP_SPV),
|
||||
: ComputePass(device_, descriptor_pool, INPUT_OUTPUT_DESCRIPTOR_SET_BINDINGS,
|
||||
INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE, INPUT_OUTPUT_BANK_INFO, {},
|
||||
VULKAN_UINT8_COMP_SPV),
|
||||
scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_},
|
||||
update_descriptor_queue{update_descriptor_queue_} {}
|
||||
|
||||
@@ -256,11 +257,11 @@ std::pair<VkBuffer, VkDeviceSize> Uint8Pass::Assemble(u32 num_vertices, VkBuffer
|
||||
update_descriptor_queue.Acquire();
|
||||
update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices);
|
||||
update_descriptor_queue.AddBuffer(staging.buffer, staging.offset, staging_size);
|
||||
const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
|
||||
const void* const descriptor_data{update_descriptor_queue.UpdateData()};
|
||||
const VkBuffer buffer{staging.buffer};
|
||||
|
||||
scheduler.RequestOutsideRenderPassOperationContext();
|
||||
scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging.buffer, set,
|
||||
num_vertices](vk::CommandBuffer cmdbuf) {
|
||||
scheduler.Record([this, buffer, descriptor_data, num_vertices](vk::CommandBuffer cmdbuf) {
|
||||
static constexpr u32 DISPATCH_SIZE = 1024;
|
||||
static constexpr VkMemoryBarrier WRITE_BARRIER{
|
||||
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
|
||||
@@ -268,8 +269,10 @@ std::pair<VkBuffer, VkDeviceSize> Uint8Pass::Assemble(u32 num_vertices, VkBuffer
|
||||
.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
|
||||
.dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
|
||||
};
|
||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
|
||||
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {});
|
||||
const VkDescriptorSet set = descriptor_allocator.Commit();
|
||||
device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data);
|
||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
|
||||
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
|
||||
cmdbuf.Dispatch(Common::DivCeil(num_vertices, DISPATCH_SIZE), 1, 1);
|
||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
|
||||
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, WRITE_BARRIER);
|
||||
@@ -278,12 +281,12 @@ std::pair<VkBuffer, VkDeviceSize> Uint8Pass::Assemble(u32 num_vertices, VkBuffer
|
||||
}
|
||||
|
||||
QuadIndexedPass::QuadIndexedPass(const Device& device_, VKScheduler& scheduler_,
|
||||
VKDescriptorPool& descriptor_pool_,
|
||||
DescriptorPool& descriptor_pool_,
|
||||
StagingBufferPool& staging_buffer_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_)
|
||||
: VKComputePass(device_, descriptor_pool_, BuildInputOutputDescriptorSetBindings(),
|
||||
BuildInputOutputDescriptorUpdateTemplate(),
|
||||
BuildComputePushConstantRange(sizeof(u32) * 2), VULKAN_QUAD_INDEXED_COMP_SPV),
|
||||
: ComputePass(device_, descriptor_pool_, INPUT_OUTPUT_DESCRIPTOR_SET_BINDINGS,
|
||||
INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE, INPUT_OUTPUT_BANK_INFO,
|
||||
COMPUTE_PUSH_CONSTANT_RANGE<sizeof(u32) * 2>, VULKAN_QUAD_INDEXED_COMP_SPV),
|
||||
scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_},
|
||||
update_descriptor_queue{update_descriptor_queue_} {}
|
||||
|
||||
@@ -313,11 +316,11 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
|
||||
update_descriptor_queue.Acquire();
|
||||
update_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size);
|
||||
update_descriptor_queue.AddBuffer(staging.buffer, staging.offset, staging_size);
|
||||
const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
|
||||
const void* const descriptor_data{update_descriptor_queue.UpdateData()};
|
||||
|
||||
scheduler.RequestOutsideRenderPassOperationContext();
|
||||
scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging.buffer, set,
|
||||
num_tri_vertices, base_vertex, index_shift](vk::CommandBuffer cmdbuf) {
|
||||
scheduler.Record([this, buffer = staging.buffer, descriptor_data, num_tri_vertices, base_vertex,
|
||||
index_shift](vk::CommandBuffer cmdbuf) {
|
||||
static constexpr u32 DISPATCH_SIZE = 1024;
|
||||
static constexpr VkMemoryBarrier WRITE_BARRIER{
|
||||
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
|
||||
@@ -325,10 +328,12 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
|
||||
.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
|
||||
.dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
|
||||
};
|
||||
const std::array push_constants = {base_vertex, index_shift};
|
||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
|
||||
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {});
|
||||
cmdbuf.PushConstants(layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants),
|
||||
const std::array push_constants{base_vertex, index_shift};
|
||||
const VkDescriptorSet set = descriptor_allocator.Commit();
|
||||
device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data);
|
||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
|
||||
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
|
||||
cmdbuf.PushConstants(*layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(push_constants),
|
||||
&push_constants);
|
||||
cmdbuf.Dispatch(Common::DivCeil(num_tri_vertices, DISPATCH_SIZE), 1, 1);
|
||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
|
||||
@@ -338,15 +343,14 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
|
||||
}
|
||||
|
||||
ASTCDecoderPass::ASTCDecoderPass(const Device& device_, VKScheduler& scheduler_,
|
||||
VKDescriptorPool& descriptor_pool_,
|
||||
DescriptorPool& descriptor_pool_,
|
||||
StagingBufferPool& staging_buffer_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
||||
MemoryAllocator& memory_allocator_)
|
||||
: VKComputePass(device_, descriptor_pool_, BuildASTCDescriptorSetBindings(),
|
||||
BuildASTCPassDescriptorUpdateTemplateEntry(),
|
||||
BuildComputePushConstantRange(sizeof(AstcPushConstants)),
|
||||
ASTC_DECODER_COMP_SPV),
|
||||
device{device_}, scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_},
|
||||
: ComputePass(device_, descriptor_pool_, ASTC_DESCRIPTOR_SET_BINDINGS,
|
||||
ASTC_PASS_DESCRIPTOR_UPDATE_TEMPLATE_ENTRY, ASTC_BANK_INFO,
|
||||
COMPUTE_PUSH_CONSTANT_RANGE<sizeof(AstcPushConstants)>, ASTC_DECODER_COMP_SPV),
|
||||
scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_},
|
||||
update_descriptor_queue{update_descriptor_queue_}, memory_allocator{memory_allocator_} {}
|
||||
|
||||
ASTCDecoderPass::~ASTCDecoderPass() = default;
|
||||
@@ -443,16 +447,14 @@ void ASTCDecoderPass::Assemble(Image& image, const StagingBufferRef& map,
|
||||
update_descriptor_queue.AddBuffer(*data_buffer, sizeof(ASTC_ENCODINGS_VALUES),
|
||||
sizeof(SWIZZLE_TABLE));
|
||||
update_descriptor_queue.AddImage(image.StorageImageView(swizzle.level));
|
||||
|
||||
const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
|
||||
const VkPipelineLayout vk_layout = *layout;
|
||||
const void* const descriptor_data{update_descriptor_queue.UpdateData()};
|
||||
|
||||
// To unswizzle the ASTC data
|
||||
const auto params = MakeBlockLinearSwizzle2DParams(swizzle, image.info);
|
||||
ASSERT(params.origin == (std::array<u32, 3>{0, 0, 0}));
|
||||
ASSERT(params.destination == (std::array<s32, 3>{0, 0, 0}));
|
||||
scheduler.Record([vk_layout, num_dispatches_x, num_dispatches_y, num_dispatches_z,
|
||||
block_dims, params, set](vk::CommandBuffer cmdbuf) {
|
||||
scheduler.Record([this, num_dispatches_x, num_dispatches_y, num_dispatches_z, block_dims,
|
||||
params, descriptor_data](vk::CommandBuffer cmdbuf) {
|
||||
const AstcPushConstants uniforms{
|
||||
.blocks_dims = block_dims,
|
||||
.bytes_per_block_log2 = params.bytes_per_block_log2,
|
||||
@@ -462,8 +464,10 @@ void ASTCDecoderPass::Assemble(Image& image, const StagingBufferRef& map,
|
||||
.block_height = params.block_height,
|
||||
.block_height_mask = params.block_height_mask,
|
||||
};
|
||||
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, vk_layout, 0, set, {});
|
||||
cmdbuf.PushConstants(vk_layout, VK_SHADER_STAGE_COMPUTE_BIT, uniforms);
|
||||
const VkDescriptorSet set = descriptor_allocator.Commit();
|
||||
device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data);
|
||||
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
|
||||
cmdbuf.PushConstants(*layout, VK_SHADER_STAGE_COMPUTE_BIT, uniforms);
|
||||
cmdbuf.Dispatch(num_dispatches_x, num_dispatches_y, num_dispatches_z);
|
||||
});
|
||||
}
|
||||
|
@@ -4,7 +4,6 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <optional>
|
||||
#include <span>
|
||||
#include <utility>
|
||||
|
||||
@@ -27,31 +26,31 @@ class VKUpdateDescriptorQueue;
|
||||
class Image;
|
||||
struct StagingBufferRef;
|
||||
|
||||
class VKComputePass {
|
||||
class ComputePass {
|
||||
public:
|
||||
explicit VKComputePass(const Device& device, VKDescriptorPool& descriptor_pool,
|
||||
vk::Span<VkDescriptorSetLayoutBinding> bindings,
|
||||
vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
|
||||
vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code);
|
||||
~VKComputePass();
|
||||
explicit ComputePass(const Device& device, DescriptorPool& descriptor_pool,
|
||||
vk::Span<VkDescriptorSetLayoutBinding> bindings,
|
||||
vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
|
||||
const DescriptorBankInfo& bank_info,
|
||||
vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code);
|
||||
~ComputePass();
|
||||
|
||||
protected:
|
||||
VkDescriptorSet CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue);
|
||||
|
||||
const Device& device;
|
||||
vk::DescriptorUpdateTemplateKHR descriptor_template;
|
||||
vk::PipelineLayout layout;
|
||||
vk::Pipeline pipeline;
|
||||
vk::DescriptorSetLayout descriptor_set_layout;
|
||||
DescriptorAllocator descriptor_allocator;
|
||||
|
||||
private:
|
||||
vk::DescriptorSetLayout descriptor_set_layout;
|
||||
std::optional<DescriptorAllocator> descriptor_allocator;
|
||||
vk::ShaderModule module;
|
||||
};
|
||||
|
||||
class Uint8Pass final : public VKComputePass {
|
||||
class Uint8Pass final : public ComputePass {
|
||||
public:
|
||||
explicit Uint8Pass(const Device& device_, VKScheduler& scheduler_,
|
||||
VKDescriptorPool& descriptor_pool_, StagingBufferPool& staging_buffer_pool_,
|
||||
DescriptorPool& descriptor_pool_, StagingBufferPool& staging_buffer_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_);
|
||||
~Uint8Pass();
|
||||
|
||||
@@ -66,10 +65,10 @@ private:
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue;
|
||||
};
|
||||
|
||||
class QuadIndexedPass final : public VKComputePass {
|
||||
class QuadIndexedPass final : public ComputePass {
|
||||
public:
|
||||
explicit QuadIndexedPass(const Device& device_, VKScheduler& scheduler_,
|
||||
VKDescriptorPool& descriptor_pool_,
|
||||
DescriptorPool& descriptor_pool_,
|
||||
StagingBufferPool& staging_buffer_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_);
|
||||
~QuadIndexedPass();
|
||||
@@ -84,10 +83,10 @@ private:
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue;
|
||||
};
|
||||
|
||||
class ASTCDecoderPass final : public VKComputePass {
|
||||
class ASTCDecoderPass final : public ComputePass {
|
||||
public:
|
||||
explicit ASTCDecoderPass(const Device& device_, VKScheduler& scheduler_,
|
||||
VKDescriptorPool& descriptor_pool_,
|
||||
DescriptorPool& descriptor_pool_,
|
||||
StagingBufferPool& staging_buffer_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
||||
MemoryAllocator& memory_allocator_);
|
||||
@@ -99,7 +98,6 @@ public:
|
||||
private:
|
||||
void MakeDataBuffer();
|
||||
|
||||
const Device& device;
|
||||
VKScheduler& scheduler;
|
||||
StagingBufferPool& staging_buffer_pool;
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue;
|
||||
|
@@ -2,152 +2,198 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/container/small_vector.hpp>
|
||||
|
||||
#include "video_core/renderer_vulkan/pipeline_helper.h"
|
||||
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
|
||||
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
|
||||
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_scheduler.h"
|
||||
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
|
||||
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
|
||||
#include "video_core/shader_notify.h"
|
||||
#include "video_core/vulkan_common/vulkan_device.h"
|
||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
VKComputePipeline::VKComputePipeline(const Device& device_, VKScheduler& scheduler_,
|
||||
VKDescriptorPool& descriptor_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
||||
const SPIRVShader& shader_)
|
||||
: device{device_}, scheduler{scheduler_}, entries{shader_.entries},
|
||||
descriptor_set_layout{CreateDescriptorSetLayout()},
|
||||
descriptor_allocator{descriptor_pool_, *descriptor_set_layout},
|
||||
update_descriptor_queue{update_descriptor_queue_}, layout{CreatePipelineLayout()},
|
||||
descriptor_template{CreateDescriptorUpdateTemplate()},
|
||||
shader_module{CreateShaderModule(shader_.code)}, pipeline{CreatePipeline()} {}
|
||||
using Shader::ImageBufferDescriptor;
|
||||
using Tegra::Texture::TexturePair;
|
||||
|
||||
VKComputePipeline::~VKComputePipeline() = default;
|
||||
|
||||
VkDescriptorSet VKComputePipeline::CommitDescriptorSet() {
|
||||
if (!descriptor_template) {
|
||||
return {};
|
||||
ComputePipeline::ComputePipeline(const Device& device_, DescriptorPool& descriptor_pool,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
||||
Common::ThreadWorker* thread_worker,
|
||||
VideoCore::ShaderNotify* shader_notify, const Shader::Info& info_,
|
||||
vk::ShaderModule spv_module_)
|
||||
: device{device_}, update_descriptor_queue{update_descriptor_queue_}, info{info_},
|
||||
spv_module(std::move(spv_module_)) {
|
||||
if (shader_notify) {
|
||||
shader_notify->MarkShaderBuilding();
|
||||
}
|
||||
const VkDescriptorSet set = descriptor_allocator.Commit();
|
||||
update_descriptor_queue.Send(*descriptor_template, set);
|
||||
return set;
|
||||
}
|
||||
std::copy_n(info.constant_buffer_used_sizes.begin(), uniform_buffer_sizes.size(),
|
||||
uniform_buffer_sizes.begin());
|
||||
|
||||
vk::DescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const {
|
||||
std::vector<VkDescriptorSetLayoutBinding> bindings;
|
||||
u32 binding = 0;
|
||||
const auto add_bindings = [&](VkDescriptorType descriptor_type, std::size_t num_entries) {
|
||||
// TODO(Rodrigo): Maybe make individual bindings here?
|
||||
for (u32 bindpoint = 0; bindpoint < static_cast<u32>(num_entries); ++bindpoint) {
|
||||
bindings.push_back({
|
||||
.binding = binding++,
|
||||
.descriptorType = descriptor_type,
|
||||
.descriptorCount = 1,
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.pImmutableSamplers = nullptr,
|
||||
});
|
||||
}
|
||||
};
|
||||
add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size());
|
||||
add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, entries.global_buffers.size());
|
||||
add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, entries.uniform_texels.size());
|
||||
add_bindings(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, entries.samplers.size());
|
||||
add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, entries.storage_texels.size());
|
||||
add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size());
|
||||
auto func{[this, &descriptor_pool, shader_notify] {
|
||||
DescriptorLayoutBuilder builder{device};
|
||||
builder.Add(info, VK_SHADER_STAGE_COMPUTE_BIT);
|
||||
|
||||
return device.GetLogical().CreateDescriptorSetLayout({
|
||||
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.bindingCount = static_cast<u32>(bindings.size()),
|
||||
.pBindings = bindings.data(),
|
||||
});
|
||||
}
|
||||
|
||||
vk::PipelineLayout VKComputePipeline::CreatePipelineLayout() const {
|
||||
return device.GetLogical().CreatePipelineLayout({
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.setLayoutCount = 1,
|
||||
.pSetLayouts = descriptor_set_layout.address(),
|
||||
.pushConstantRangeCount = 0,
|
||||
.pPushConstantRanges = nullptr,
|
||||
});
|
||||
}
|
||||
|
||||
vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplate() const {
|
||||
std::vector<VkDescriptorUpdateTemplateEntryKHR> template_entries;
|
||||
u32 binding = 0;
|
||||
u32 offset = 0;
|
||||
FillDescriptorUpdateTemplateEntries(entries, binding, offset, template_entries);
|
||||
if (template_entries.empty()) {
|
||||
// If the shader doesn't use descriptor sets, skip template creation.
|
||||
return {};
|
||||
}
|
||||
|
||||
return device.GetLogical().CreateDescriptorUpdateTemplateKHR({
|
||||
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.descriptorUpdateEntryCount = static_cast<u32>(template_entries.size()),
|
||||
.pDescriptorUpdateEntries = template_entries.data(),
|
||||
.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR,
|
||||
.descriptorSetLayout = *descriptor_set_layout,
|
||||
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
|
||||
.pipelineLayout = *layout,
|
||||
.set = DESCRIPTOR_SET,
|
||||
});
|
||||
}
|
||||
|
||||
vk::ShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const {
|
||||
device.SaveShader(code);
|
||||
|
||||
return device.GetLogical().CreateShaderModule({
|
||||
.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.codeSize = code.size() * sizeof(u32),
|
||||
.pCode = code.data(),
|
||||
});
|
||||
}
|
||||
|
||||
vk::Pipeline VKComputePipeline::CreatePipeline() const {
|
||||
|
||||
VkComputePipelineCreateInfo ci{
|
||||
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.stage =
|
||||
{
|
||||
descriptor_set_layout = builder.CreateDescriptorSetLayout(false);
|
||||
pipeline_layout = builder.CreatePipelineLayout(*descriptor_set_layout);
|
||||
descriptor_update_template =
|
||||
builder.CreateTemplate(*descriptor_set_layout, *pipeline_layout, false);
|
||||
descriptor_allocator = descriptor_pool.Allocator(*descriptor_set_layout, info);
|
||||
const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
|
||||
.pNext = nullptr,
|
||||
.requiredSubgroupSize = GuestWarpSize,
|
||||
};
|
||||
pipeline = device.GetLogical().CreateComputePipeline({
|
||||
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.stage{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.pNext = device.IsExtSubgroupSizeControlSupported() ? &subgroup_size_ci : nullptr,
|
||||
.flags = 0,
|
||||
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.module = *shader_module,
|
||||
.module = *spv_module,
|
||||
.pName = "main",
|
||||
.pSpecializationInfo = nullptr,
|
||||
},
|
||||
.layout = *layout,
|
||||
.basePipelineHandle = nullptr,
|
||||
.basePipelineIndex = 0,
|
||||
};
|
||||
.layout = *pipeline_layout,
|
||||
.basePipelineHandle = 0,
|
||||
.basePipelineIndex = 0,
|
||||
});
|
||||
std::lock_guard lock{build_mutex};
|
||||
is_built = true;
|
||||
build_condvar.notify_one();
|
||||
if (shader_notify) {
|
||||
shader_notify->MarkShaderComplete();
|
||||
}
|
||||
}};
|
||||
if (thread_worker) {
|
||||
thread_worker->QueueWork(std::move(func));
|
||||
} else {
|
||||
func();
|
||||
}
|
||||
}
|
||||
|
||||
const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
|
||||
.pNext = nullptr,
|
||||
.requiredSubgroupSize = GuestWarpSize,
|
||||
};
|
||||
void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute,
|
||||
Tegra::MemoryManager& gpu_memory, VKScheduler& scheduler,
|
||||
BufferCache& buffer_cache, TextureCache& texture_cache) {
|
||||
update_descriptor_queue.Acquire();
|
||||
|
||||
if (entries.uses_warps && device.IsGuestWarpSizeSupported(VK_SHADER_STAGE_COMPUTE_BIT)) {
|
||||
ci.stage.pNext = &subgroup_size_ci;
|
||||
buffer_cache.SetComputeUniformBufferState(info.constant_buffer_mask, &uniform_buffer_sizes);
|
||||
buffer_cache.UnbindComputeStorageBuffers();
|
||||
size_t ssbo_index{};
|
||||
for (const auto& desc : info.storage_buffers_descriptors) {
|
||||
ASSERT(desc.count == 1);
|
||||
buffer_cache.BindComputeStorageBuffer(ssbo_index, desc.cbuf_index, desc.cbuf_offset,
|
||||
desc.is_written);
|
||||
++ssbo_index;
|
||||
}
|
||||
|
||||
return device.GetLogical().CreateComputePipeline(ci);
|
||||
texture_cache.SynchronizeComputeDescriptors();
|
||||
|
||||
static constexpr size_t max_elements = 64;
|
||||
std::array<ImageId, max_elements> image_view_ids;
|
||||
boost::container::static_vector<u32, max_elements> image_view_indices;
|
||||
boost::container::static_vector<VkSampler, max_elements> samplers;
|
||||
|
||||
const auto& qmd{kepler_compute.launch_description};
|
||||
const auto& cbufs{qmd.const_buffer_config};
|
||||
const bool via_header_index{qmd.linked_tsc != 0};
|
||||
const auto read_handle{[&](const auto& desc, u32 index) {
|
||||
ASSERT(((qmd.const_buffer_enable_mask >> desc.cbuf_index) & 1) != 0);
|
||||
const u32 index_offset{index << desc.size_shift};
|
||||
const u32 offset{desc.cbuf_offset + index_offset};
|
||||
const GPUVAddr addr{cbufs[desc.cbuf_index].Address() + offset};
|
||||
if constexpr (std::is_same_v<decltype(desc), const Shader::TextureDescriptor&> ||
|
||||
std::is_same_v<decltype(desc), const Shader::TextureBufferDescriptor&>) {
|
||||
if (desc.has_secondary) {
|
||||
ASSERT(((qmd.const_buffer_enable_mask >> desc.secondary_cbuf_index) & 1) != 0);
|
||||
const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset};
|
||||
const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() +
|
||||
secondary_offset};
|
||||
const u32 lhs_raw{gpu_memory.Read<u32>(addr)};
|
||||
const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)};
|
||||
return TexturePair(lhs_raw | rhs_raw, via_header_index);
|
||||
}
|
||||
}
|
||||
return TexturePair(gpu_memory.Read<u32>(addr), via_header_index);
|
||||
}};
|
||||
const auto add_image{[&](const auto& desc) {
|
||||
for (u32 index = 0; index < desc.count; ++index) {
|
||||
const auto handle{read_handle(desc, index)};
|
||||
image_view_indices.push_back(handle.first);
|
||||
}
|
||||
}};
|
||||
std::ranges::for_each(info.texture_buffer_descriptors, add_image);
|
||||
std::ranges::for_each(info.image_buffer_descriptors, add_image);
|
||||
for (const auto& desc : info.texture_descriptors) {
|
||||
for (u32 index = 0; index < desc.count; ++index) {
|
||||
const auto handle{read_handle(desc, index)};
|
||||
image_view_indices.push_back(handle.first);
|
||||
|
||||
Sampler* const sampler = texture_cache.GetComputeSampler(handle.second);
|
||||
samplers.push_back(sampler->Handle());
|
||||
}
|
||||
}
|
||||
std::ranges::for_each(info.image_descriptors, add_image);
|
||||
|
||||
const std::span indices_span(image_view_indices.data(), image_view_indices.size());
|
||||
texture_cache.FillComputeImageViews(indices_span, image_view_ids);
|
||||
|
||||
buffer_cache.UnbindComputeTextureBuffers();
|
||||
ImageId* texture_buffer_ids{image_view_ids.data()};
|
||||
size_t index{};
|
||||
const auto add_buffer{[&](const auto& desc) {
|
||||
constexpr bool is_image = std::is_same_v<decltype(desc), const ImageBufferDescriptor&>;
|
||||
for (u32 i = 0; i < desc.count; ++i) {
|
||||
bool is_written{false};
|
||||
if constexpr (is_image) {
|
||||
is_written = desc.is_written;
|
||||
}
|
||||
ImageView& image_view = texture_cache.GetImageView(*texture_buffer_ids);
|
||||
buffer_cache.BindComputeTextureBuffer(index, image_view.GpuAddr(),
|
||||
image_view.BufferSize(), image_view.format,
|
||||
is_written, is_image);
|
||||
++texture_buffer_ids;
|
||||
++index;
|
||||
}
|
||||
}};
|
||||
std::ranges::for_each(info.texture_buffer_descriptors, add_buffer);
|
||||
std::ranges::for_each(info.image_buffer_descriptors, add_buffer);
|
||||
|
||||
buffer_cache.UpdateComputeBuffers();
|
||||
buffer_cache.BindHostComputeBuffers();
|
||||
|
||||
const VkSampler* samplers_it{samplers.data()};
|
||||
const ImageId* views_it{image_view_ids.data()};
|
||||
PushImageDescriptors(info, samplers_it, views_it, texture_cache, update_descriptor_queue);
|
||||
|
||||
if (!is_built.load(std::memory_order::relaxed)) {
|
||||
// Wait for the pipeline to be built
|
||||
scheduler.Record([this](vk::CommandBuffer) {
|
||||
std::unique_lock lock{build_mutex};
|
||||
build_condvar.wait(lock, [this] { return is_built.load(std::memory_order::relaxed); });
|
||||
});
|
||||
}
|
||||
const void* const descriptor_data{update_descriptor_queue.UpdateData()};
|
||||
scheduler.Record([this, descriptor_data](vk::CommandBuffer cmdbuf) {
|
||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
|
||||
if (!descriptor_set_layout) {
|
||||
return;
|
||||
}
|
||||
const VkDescriptorSet descriptor_set{descriptor_allocator.Commit()};
|
||||
const vk::Device& dev{device.GetLogical()};
|
||||
dev.UpdateDescriptorSet(descriptor_set, *descriptor_update_template, descriptor_data);
|
||||
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline_layout, 0,
|
||||
descriptor_set, nullptr);
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
||||
|
@@ -4,61 +4,63 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/thread_worker.h"
|
||||
#include "shader_recompiler/shader_info.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
|
||||
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
|
||||
#include "video_core/renderer_vulkan/vk_texture_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
|
||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||
|
||||
namespace VideoCore {
|
||||
class ShaderNotify;
|
||||
}
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
class Device;
|
||||
class VKScheduler;
|
||||
class VKUpdateDescriptorQueue;
|
||||
|
||||
class VKComputePipeline final {
|
||||
class ComputePipeline {
|
||||
public:
|
||||
explicit VKComputePipeline(const Device& device_, VKScheduler& scheduler_,
|
||||
VKDescriptorPool& descriptor_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
||||
const SPIRVShader& shader_);
|
||||
~VKComputePipeline();
|
||||
explicit ComputePipeline(const Device& device, DescriptorPool& descriptor_pool,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue,
|
||||
Common::ThreadWorker* thread_worker,
|
||||
VideoCore::ShaderNotify* shader_notify, const Shader::Info& info,
|
||||
vk::ShaderModule spv_module);
|
||||
|
||||
VkDescriptorSet CommitDescriptorSet();
|
||||
ComputePipeline& operator=(ComputePipeline&&) noexcept = delete;
|
||||
ComputePipeline(ComputePipeline&&) noexcept = delete;
|
||||
|
||||
VkPipeline GetHandle() const {
|
||||
return *pipeline;
|
||||
}
|
||||
ComputePipeline& operator=(const ComputePipeline&) = delete;
|
||||
ComputePipeline(const ComputePipeline&) = delete;
|
||||
|
||||
VkPipelineLayout GetLayout() const {
|
||||
return *layout;
|
||||
}
|
||||
|
||||
const ShaderEntries& GetEntries() const {
|
||||
return entries;
|
||||
}
|
||||
void Configure(Tegra::Engines::KeplerCompute& kepler_compute, Tegra::MemoryManager& gpu_memory,
|
||||
VKScheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache);
|
||||
|
||||
private:
|
||||
vk::DescriptorSetLayout CreateDescriptorSetLayout() const;
|
||||
|
||||
vk::PipelineLayout CreatePipelineLayout() const;
|
||||
|
||||
vk::DescriptorUpdateTemplateKHR CreateDescriptorUpdateTemplate() const;
|
||||
|
||||
vk::ShaderModule CreateShaderModule(const std::vector<u32>& code) const;
|
||||
|
||||
vk::Pipeline CreatePipeline() const;
|
||||
|
||||
const Device& device;
|
||||
VKScheduler& scheduler;
|
||||
ShaderEntries entries;
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue;
|
||||
Shader::Info info;
|
||||
|
||||
VideoCommon::ComputeUniformBufferSizes uniform_buffer_sizes{};
|
||||
|
||||
vk::ShaderModule spv_module;
|
||||
vk::DescriptorSetLayout descriptor_set_layout;
|
||||
DescriptorAllocator descriptor_allocator;
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue;
|
||||
vk::PipelineLayout layout;
|
||||
vk::DescriptorUpdateTemplateKHR descriptor_template;
|
||||
vk::ShaderModule shader_module;
|
||||
vk::PipelineLayout pipeline_layout;
|
||||
vk::DescriptorUpdateTemplateKHR descriptor_update_template;
|
||||
vk::Pipeline pipeline;
|
||||
|
||||
std::condition_variable build_condvar;
|
||||
std::mutex build_mutex;
|
||||
std::atomic_bool is_built{false};
|
||||
};
|
||||
|
||||
} // namespace Vulkan
|
||||
|
@@ -2,6 +2,8 @@
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <mutex>
|
||||
#include <span>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
@@ -13,79 +15,149 @@
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
// Prefer small grow rates to avoid saturating the descriptor pool with barely used pipelines.
|
||||
constexpr std::size_t SETS_GROW_RATE = 0x20;
|
||||
// Prefer small grow rates to avoid saturating the descriptor pool with barely used pipelines
|
||||
constexpr size_t SETS_GROW_RATE = 16;
|
||||
constexpr s32 SCORE_THRESHOLD = 3;
|
||||
constexpr u32 SETS_PER_POOL = 64;
|
||||
|
||||
DescriptorAllocator::DescriptorAllocator(VKDescriptorPool& descriptor_pool_,
|
||||
VkDescriptorSetLayout layout_)
|
||||
: ResourcePool(descriptor_pool_.master_semaphore, SETS_GROW_RATE),
|
||||
descriptor_pool{descriptor_pool_}, layout{layout_} {}
|
||||
struct DescriptorBank {
|
||||
DescriptorBankInfo info;
|
||||
std::vector<vk::DescriptorPool> pools;
|
||||
};
|
||||
|
||||
DescriptorAllocator::~DescriptorAllocator() = default;
|
||||
|
||||
VkDescriptorSet DescriptorAllocator::Commit() {
|
||||
const std::size_t index = CommitResource();
|
||||
return descriptors_allocations[index / SETS_GROW_RATE][index % SETS_GROW_RATE];
|
||||
bool DescriptorBankInfo::IsSuperset(const DescriptorBankInfo& subset) const noexcept {
|
||||
return uniform_buffers >= subset.uniform_buffers && storage_buffers >= subset.storage_buffers &&
|
||||
texture_buffers >= subset.texture_buffers && image_buffers >= subset.image_buffers &&
|
||||
textures >= subset.textures && images >= subset.image_buffers;
|
||||
}
|
||||
|
||||
void DescriptorAllocator::Allocate(std::size_t begin, std::size_t end) {
|
||||
descriptors_allocations.push_back(descriptor_pool.AllocateDescriptors(layout, end - begin));
|
||||
template <typename Descriptors>
|
||||
static u32 Accumulate(const Descriptors& descriptors) {
|
||||
u32 count = 0;
|
||||
for (const auto& descriptor : descriptors) {
|
||||
count += descriptor.count;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
VKDescriptorPool::VKDescriptorPool(const Device& device_, VKScheduler& scheduler)
|
||||
: device{device_}, master_semaphore{scheduler.GetMasterSemaphore()}, active_pool{
|
||||
AllocateNewPool()} {}
|
||||
static DescriptorBankInfo MakeBankInfo(std::span<const Shader::Info> infos) {
|
||||
DescriptorBankInfo bank;
|
||||
for (const Shader::Info& info : infos) {
|
||||
bank.uniform_buffers += Accumulate(info.constant_buffer_descriptors);
|
||||
bank.storage_buffers += Accumulate(info.storage_buffers_descriptors);
|
||||
bank.texture_buffers += Accumulate(info.texture_buffer_descriptors);
|
||||
bank.image_buffers += Accumulate(info.image_buffer_descriptors);
|
||||
bank.textures += Accumulate(info.texture_descriptors);
|
||||
bank.images += Accumulate(info.image_descriptors);
|
||||
}
|
||||
bank.score = bank.uniform_buffers + bank.storage_buffers + bank.texture_buffers +
|
||||
bank.image_buffers + bank.textures + bank.images;
|
||||
return bank;
|
||||
}
|
||||
|
||||
VKDescriptorPool::~VKDescriptorPool() = default;
|
||||
|
||||
vk::DescriptorPool* VKDescriptorPool::AllocateNewPool() {
|
||||
static constexpr u32 num_sets = 0x20000;
|
||||
static constexpr VkDescriptorPoolSize pool_sizes[] = {
|
||||
{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, num_sets * 90},
|
||||
{VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, num_sets * 60},
|
||||
{VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, num_sets * 64},
|
||||
{VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, num_sets * 64},
|
||||
{VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, num_sets * 64},
|
||||
{VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, num_sets * 40},
|
||||
static void AllocatePool(const Device& device, DescriptorBank& bank) {
|
||||
std::array<VkDescriptorPoolSize, 6> pool_sizes;
|
||||
size_t pool_cursor{};
|
||||
const auto add = [&](VkDescriptorType type, u32 count) {
|
||||
if (count > 0) {
|
||||
pool_sizes[pool_cursor++] = {
|
||||
.type = type,
|
||||
.descriptorCount = count * SETS_PER_POOL,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
const VkDescriptorPoolCreateInfo ci{
|
||||
const auto& info{bank.info};
|
||||
add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, info.uniform_buffers);
|
||||
add(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, info.storage_buffers);
|
||||
add(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, info.texture_buffers);
|
||||
add(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, info.image_buffers);
|
||||
add(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, info.textures);
|
||||
add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, info.images);
|
||||
bank.pools.push_back(device.GetLogical().CreateDescriptorPool({
|
||||
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
|
||||
.maxSets = num_sets,
|
||||
.poolSizeCount = static_cast<u32>(std::size(pool_sizes)),
|
||||
.maxSets = SETS_PER_POOL,
|
||||
.poolSizeCount = static_cast<u32>(pool_cursor),
|
||||
.pPoolSizes = std::data(pool_sizes),
|
||||
};
|
||||
return &pools.emplace_back(device.GetLogical().CreateDescriptorPool(ci));
|
||||
}));
|
||||
}
|
||||
|
||||
vk::DescriptorSets VKDescriptorPool::AllocateDescriptors(VkDescriptorSetLayout layout,
|
||||
std::size_t count) {
|
||||
const std::vector layout_copies(count, layout);
|
||||
VkDescriptorSetAllocateInfo ai{
|
||||
DescriptorAllocator::DescriptorAllocator(const Device& device_, MasterSemaphore& master_semaphore_,
|
||||
DescriptorBank& bank_, VkDescriptorSetLayout layout_)
|
||||
: ResourcePool(master_semaphore_, SETS_GROW_RATE), device{&device_}, bank{&bank_},
|
||||
layout{layout_} {}
|
||||
|
||||
VkDescriptorSet DescriptorAllocator::Commit() {
|
||||
const size_t index = CommitResource();
|
||||
return sets[index / SETS_GROW_RATE][index % SETS_GROW_RATE];
|
||||
}
|
||||
|
||||
void DescriptorAllocator::Allocate(size_t begin, size_t end) {
|
||||
sets.push_back(AllocateDescriptors(end - begin));
|
||||
}
|
||||
|
||||
vk::DescriptorSets DescriptorAllocator::AllocateDescriptors(size_t count) {
|
||||
const std::vector<VkDescriptorSetLayout> layouts(count, layout);
|
||||
VkDescriptorSetAllocateInfo allocate_info{
|
||||
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.descriptorPool = **active_pool,
|
||||
.descriptorPool = *bank->pools.back(),
|
||||
.descriptorSetCount = static_cast<u32>(count),
|
||||
.pSetLayouts = layout_copies.data(),
|
||||
.pSetLayouts = layouts.data(),
|
||||
};
|
||||
|
||||
vk::DescriptorSets sets = active_pool->Allocate(ai);
|
||||
if (!sets.IsOutOfPoolMemory()) {
|
||||
return sets;
|
||||
vk::DescriptorSets new_sets = bank->pools.back().Allocate(allocate_info);
|
||||
if (!new_sets.IsOutOfPoolMemory()) {
|
||||
return new_sets;
|
||||
}
|
||||
|
||||
// Our current pool is out of memory. Allocate a new one and retry
|
||||
active_pool = AllocateNewPool();
|
||||
ai.descriptorPool = **active_pool;
|
||||
sets = active_pool->Allocate(ai);
|
||||
if (!sets.IsOutOfPoolMemory()) {
|
||||
return sets;
|
||||
AllocatePool(*device, *bank);
|
||||
allocate_info.descriptorPool = *bank->pools.back();
|
||||
new_sets = bank->pools.back().Allocate(allocate_info);
|
||||
if (!new_sets.IsOutOfPoolMemory()) {
|
||||
return new_sets;
|
||||
}
|
||||
|
||||
// After allocating a new pool, we are out of memory again. We can't handle this from here.
|
||||
throw vk::Exception(VK_ERROR_OUT_OF_POOL_MEMORY);
|
||||
}
|
||||
|
||||
DescriptorPool::DescriptorPool(const Device& device_, VKScheduler& scheduler)
|
||||
: device{device_}, master_semaphore{scheduler.GetMasterSemaphore()} {}
|
||||
|
||||
DescriptorPool::~DescriptorPool() = default;
|
||||
|
||||
DescriptorAllocator DescriptorPool::Allocator(VkDescriptorSetLayout layout,
|
||||
std::span<const Shader::Info> infos) {
|
||||
return Allocator(layout, MakeBankInfo(infos));
|
||||
}
|
||||
|
||||
DescriptorAllocator DescriptorPool::Allocator(VkDescriptorSetLayout layout,
|
||||
const Shader::Info& info) {
|
||||
return Allocator(layout, MakeBankInfo(std::array{info}));
|
||||
}
|
||||
|
||||
DescriptorAllocator DescriptorPool::Allocator(VkDescriptorSetLayout layout,
|
||||
const DescriptorBankInfo& info) {
|
||||
return DescriptorAllocator(device, master_semaphore, Bank(info), layout);
|
||||
}
|
||||
|
||||
DescriptorBank& DescriptorPool::Bank(const DescriptorBankInfo& reqs) {
|
||||
std::shared_lock read_lock{banks_mutex};
|
||||
const auto it = std::ranges::find_if(bank_infos, [&reqs](const DescriptorBankInfo& bank) {
|
||||
return std::abs(bank.score - reqs.score) < SCORE_THRESHOLD && bank.IsSuperset(reqs);
|
||||
});
|
||||
if (it != bank_infos.end()) {
|
||||
return *banks[std::distance(bank_infos.begin(), it)].get();
|
||||
}
|
||||
read_lock.unlock();
|
||||
|
||||
std::unique_lock write_lock{banks_mutex};
|
||||
bank_infos.push_back(reqs);
|
||||
|
||||
auto& bank = *banks.emplace_back(std::make_unique<DescriptorBank>());
|
||||
bank.info = reqs;
|
||||
AllocatePool(device, bank);
|
||||
return bank;
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
||||
|
@@ -4,57 +4,85 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <shared_mutex>
|
||||
#include <span>
|
||||
#include <vector>
|
||||
|
||||
#include "shader_recompiler/shader_info.h"
|
||||
#include "video_core/renderer_vulkan/vk_resource_pool.h"
|
||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
class Device;
|
||||
class VKDescriptorPool;
|
||||
class VKScheduler;
|
||||
|
||||
struct DescriptorBank;
|
||||
|
||||
struct DescriptorBankInfo {
|
||||
[[nodiscard]] bool IsSuperset(const DescriptorBankInfo& subset) const noexcept;
|
||||
|
||||
u32 uniform_buffers{}; ///< Number of uniform buffer descriptors
|
||||
u32 storage_buffers{}; ///< Number of storage buffer descriptors
|
||||
u32 texture_buffers{}; ///< Number of texture buffer descriptors
|
||||
u32 image_buffers{}; ///< Number of image buffer descriptors
|
||||
u32 textures{}; ///< Number of texture descriptors
|
||||
u32 images{}; ///< Number of image descriptors
|
||||
s32 score{}; ///< Number of descriptors in total
|
||||
};
|
||||
|
||||
class DescriptorAllocator final : public ResourcePool {
|
||||
friend class DescriptorPool;
|
||||
|
||||
public:
|
||||
explicit DescriptorAllocator(VKDescriptorPool& descriptor_pool, VkDescriptorSetLayout layout);
|
||||
~DescriptorAllocator() override;
|
||||
explicit DescriptorAllocator() = default;
|
||||
~DescriptorAllocator() override = default;
|
||||
|
||||
DescriptorAllocator& operator=(DescriptorAllocator&&) noexcept = default;
|
||||
DescriptorAllocator(DescriptorAllocator&&) noexcept = default;
|
||||
|
||||
DescriptorAllocator& operator=(const DescriptorAllocator&) = delete;
|
||||
DescriptorAllocator(const DescriptorAllocator&) = delete;
|
||||
|
||||
VkDescriptorSet Commit();
|
||||
|
||||
protected:
|
||||
void Allocate(std::size_t begin, std::size_t end) override;
|
||||
|
||||
private:
|
||||
VKDescriptorPool& descriptor_pool;
|
||||
const VkDescriptorSetLayout layout;
|
||||
explicit DescriptorAllocator(const Device& device_, MasterSemaphore& master_semaphore_,
|
||||
DescriptorBank& bank_, VkDescriptorSetLayout layout_);
|
||||
|
||||
std::vector<vk::DescriptorSets> descriptors_allocations;
|
||||
void Allocate(size_t begin, size_t end) override;
|
||||
|
||||
vk::DescriptorSets AllocateDescriptors(size_t count);
|
||||
|
||||
const Device* device{};
|
||||
DescriptorBank* bank{};
|
||||
VkDescriptorSetLayout layout{};
|
||||
|
||||
std::vector<vk::DescriptorSets> sets;
|
||||
};
|
||||
|
||||
class VKDescriptorPool final {
|
||||
friend DescriptorAllocator;
|
||||
|
||||
class DescriptorPool {
|
||||
public:
|
||||
explicit VKDescriptorPool(const Device& device, VKScheduler& scheduler);
|
||||
~VKDescriptorPool();
|
||||
explicit DescriptorPool(const Device& device, VKScheduler& scheduler);
|
||||
~DescriptorPool();
|
||||
|
||||
VKDescriptorPool(const VKDescriptorPool&) = delete;
|
||||
VKDescriptorPool& operator=(const VKDescriptorPool&) = delete;
|
||||
DescriptorPool& operator=(const DescriptorPool&) = delete;
|
||||
DescriptorPool(const DescriptorPool&) = delete;
|
||||
|
||||
DescriptorAllocator Allocator(VkDescriptorSetLayout layout,
|
||||
std::span<const Shader::Info> infos);
|
||||
DescriptorAllocator Allocator(VkDescriptorSetLayout layout, const Shader::Info& info);
|
||||
DescriptorAllocator Allocator(VkDescriptorSetLayout layout, const DescriptorBankInfo& info);
|
||||
|
||||
private:
|
||||
vk::DescriptorPool* AllocateNewPool();
|
||||
|
||||
vk::DescriptorSets AllocateDescriptors(VkDescriptorSetLayout layout, std::size_t count);
|
||||
DescriptorBank& Bank(const DescriptorBankInfo& reqs);
|
||||
|
||||
const Device& device;
|
||||
MasterSemaphore& master_semaphore;
|
||||
|
||||
std::vector<vk::DescriptorPool> pools;
|
||||
vk::DescriptorPool* active_pool;
|
||||
std::shared_mutex banks_mutex;
|
||||
std::vector<DescriptorBankInfo> bank_infos;
|
||||
std::vector<std::unique_ptr<DescriptorBank>> banks;
|
||||
};
|
||||
|
||||
} // namespace Vulkan
|
File diff suppressed because it is too large
Load Diff
@@ -1,30 +1,36 @@
|
||||
// Copyright 2019 yuzu Emulator Project
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
#include <type_traits>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/thread_worker.h"
|
||||
#include "shader_recompiler/shader_info.h"
|
||||
#include "video_core/engines/maxwell_3d.h"
|
||||
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
|
||||
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
|
||||
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
|
||||
#include "video_core/renderer_vulkan/vk_texture_cache.h"
|
||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||
|
||||
namespace VideoCore {
|
||||
class ShaderNotify;
|
||||
}
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
|
||||
|
||||
struct GraphicsPipelineCacheKey {
|
||||
VkRenderPass renderpass;
|
||||
std::array<GPUVAddr, Maxwell::MaxShaderProgram> shaders;
|
||||
FixedPipelineState fixed_state;
|
||||
std::array<u64, 6> unique_hashes;
|
||||
FixedPipelineState state;
|
||||
|
||||
std::size_t Hash() const noexcept;
|
||||
size_t Hash() const noexcept;
|
||||
|
||||
bool operator==(const GraphicsPipelineCacheKey& rhs) const noexcept;
|
||||
|
||||
@@ -32,72 +38,117 @@ struct GraphicsPipelineCacheKey {
|
||||
return !operator==(rhs);
|
||||
}
|
||||
|
||||
std::size_t Size() const noexcept {
|
||||
return sizeof(renderpass) + sizeof(shaders) + fixed_state.Size();
|
||||
size_t Size() const noexcept {
|
||||
return sizeof(unique_hashes) + state.Size();
|
||||
}
|
||||
};
|
||||
static_assert(std::has_unique_object_representations_v<GraphicsPipelineCacheKey>);
|
||||
static_assert(std::is_trivially_copyable_v<GraphicsPipelineCacheKey>);
|
||||
static_assert(std::is_trivially_constructible_v<GraphicsPipelineCacheKey>);
|
||||
|
||||
} // namespace Vulkan
|
||||
|
||||
namespace std {
|
||||
template <>
|
||||
struct hash<Vulkan::GraphicsPipelineCacheKey> {
|
||||
size_t operator()(const Vulkan::GraphicsPipelineCacheKey& k) const noexcept {
|
||||
return k.Hash();
|
||||
}
|
||||
};
|
||||
} // namespace std
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
class Device;
|
||||
class VKDescriptorPool;
|
||||
class RenderPassCache;
|
||||
class VKScheduler;
|
||||
class VKUpdateDescriptorQueue;
|
||||
|
||||
using SPIRVProgram = std::array<std::optional<SPIRVShader>, Maxwell::MaxShaderStage>;
|
||||
class GraphicsPipeline {
|
||||
static constexpr size_t NUM_STAGES = Tegra::Engines::Maxwell3D::Regs::MaxShaderStage;
|
||||
|
||||
class VKGraphicsPipeline final {
|
||||
public:
|
||||
explicit VKGraphicsPipeline(const Device& device_, VKScheduler& scheduler_,
|
||||
VKDescriptorPool& descriptor_pool,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
||||
const GraphicsPipelineCacheKey& key,
|
||||
vk::Span<VkDescriptorSetLayoutBinding> bindings,
|
||||
const SPIRVProgram& program, u32 num_color_buffers);
|
||||
~VKGraphicsPipeline();
|
||||
explicit GraphicsPipeline(
|
||||
Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory,
|
||||
VKScheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache,
|
||||
VideoCore::ShaderNotify* shader_notify, const Device& device,
|
||||
DescriptorPool& descriptor_pool, VKUpdateDescriptorQueue& update_descriptor_queue,
|
||||
Common::ThreadWorker* worker_thread, RenderPassCache& render_pass_cache,
|
||||
const GraphicsPipelineCacheKey& key, std::array<vk::ShaderModule, NUM_STAGES> stages,
|
||||
const std::array<const Shader::Info*, NUM_STAGES>& infos);
|
||||
|
||||
VkDescriptorSet CommitDescriptorSet();
|
||||
GraphicsPipeline& operator=(GraphicsPipeline&&) noexcept = delete;
|
||||
GraphicsPipeline(GraphicsPipeline&&) noexcept = delete;
|
||||
|
||||
VkPipeline GetHandle() const {
|
||||
return *pipeline;
|
||||
GraphicsPipeline& operator=(const GraphicsPipeline&) = delete;
|
||||
GraphicsPipeline(const GraphicsPipeline&) = delete;
|
||||
|
||||
void AddTransition(GraphicsPipeline* transition);
|
||||
|
||||
void Configure(bool is_indexed) {
|
||||
configure_func(this, is_indexed);
|
||||
}
|
||||
|
||||
VkPipelineLayout GetLayout() const {
|
||||
return *layout;
|
||||
[[nodiscard]] GraphicsPipeline* Next(const GraphicsPipelineCacheKey& current_key) noexcept {
|
||||
if (key == current_key) {
|
||||
return this;
|
||||
}
|
||||
const auto it{std::find(transition_keys.begin(), transition_keys.end(), current_key)};
|
||||
return it != transition_keys.end() ? transitions[std::distance(transition_keys.begin(), it)]
|
||||
: nullptr;
|
||||
}
|
||||
|
||||
GraphicsPipelineCacheKey GetCacheKey() const {
|
||||
return cache_key;
|
||||
[[nodiscard]] bool IsBuilt() const noexcept {
|
||||
return is_built.load(std::memory_order::relaxed);
|
||||
}
|
||||
|
||||
template <typename Spec>
|
||||
static auto MakeConfigureSpecFunc() {
|
||||
return [](GraphicsPipeline* pipeline, bool is_indexed) {
|
||||
pipeline->ConfigureImpl<Spec>(is_indexed);
|
||||
};
|
||||
}
|
||||
|
||||
private:
|
||||
vk::DescriptorSetLayout CreateDescriptorSetLayout(
|
||||
vk::Span<VkDescriptorSetLayoutBinding> bindings) const;
|
||||
template <typename Spec>
|
||||
void ConfigureImpl(bool is_indexed);
|
||||
|
||||
vk::PipelineLayout CreatePipelineLayout() const;
|
||||
void ConfigureDraw();
|
||||
|
||||
vk::DescriptorUpdateTemplateKHR CreateDescriptorUpdateTemplate(
|
||||
const SPIRVProgram& program) const;
|
||||
void MakePipeline(VkRenderPass render_pass);
|
||||
|
||||
std::vector<vk::ShaderModule> CreateShaderModules(const SPIRVProgram& program) const;
|
||||
|
||||
vk::Pipeline CreatePipeline(const SPIRVProgram& program, VkRenderPass renderpass,
|
||||
u32 num_color_buffers) const;
|
||||
void Validate();
|
||||
|
||||
const GraphicsPipelineCacheKey key;
|
||||
Tegra::Engines::Maxwell3D& maxwell3d;
|
||||
Tegra::MemoryManager& gpu_memory;
|
||||
const Device& device;
|
||||
TextureCache& texture_cache;
|
||||
BufferCache& buffer_cache;
|
||||
VKScheduler& scheduler;
|
||||
const GraphicsPipelineCacheKey cache_key;
|
||||
const u64 hash;
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue;
|
||||
|
||||
void (*configure_func)(GraphicsPipeline*, bool){};
|
||||
|
||||
std::vector<GraphicsPipelineCacheKey> transition_keys;
|
||||
std::vector<GraphicsPipeline*> transitions;
|
||||
|
||||
std::array<vk::ShaderModule, NUM_STAGES> spv_modules;
|
||||
|
||||
std::array<Shader::Info, NUM_STAGES> stage_infos;
|
||||
std::array<u32, 5> enabled_uniform_buffer_masks{};
|
||||
VideoCommon::UniformBufferSizes uniform_buffer_sizes{};
|
||||
|
||||
vk::DescriptorSetLayout descriptor_set_layout;
|
||||
DescriptorAllocator descriptor_allocator;
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue;
|
||||
vk::PipelineLayout layout;
|
||||
vk::DescriptorUpdateTemplateKHR descriptor_template;
|
||||
std::vector<vk::ShaderModule> modules;
|
||||
|
||||
vk::PipelineLayout pipeline_layout;
|
||||
vk::DescriptorUpdateTemplateKHR descriptor_update_template;
|
||||
vk::Pipeline pipeline;
|
||||
|
||||
std::condition_variable build_condvar;
|
||||
std::mutex build_mutex;
|
||||
std::atomic_bool is_built{false};
|
||||
bool uses_push_descriptor{false};
|
||||
};
|
||||
|
||||
} // namespace Vulkan
|
||||
|
@@ -39,9 +39,9 @@ public:
|
||||
return KnownGpuTick() >= tick;
|
||||
}
|
||||
|
||||
/// Advance to the logical tick.
|
||||
void NextTick() noexcept {
|
||||
++current_tick;
|
||||
/// Advance to the logical tick and return the old one
|
||||
[[nodiscard]] u64 NextTick() noexcept {
|
||||
return current_tick.fetch_add(1, std::memory_order::relaxed);
|
||||
}
|
||||
|
||||
/// Refresh the known GPU tick
|
||||
|
@@ -4,444 +4,610 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstddef>
|
||||
#include <fstream>
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
#include "common/bit_cast.h"
|
||||
#include "common/cityhash.h"
|
||||
#include "common/fs/fs.h"
|
||||
#include "common/fs/path_util.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/thread_worker.h"
|
||||
#include "core/core.h"
|
||||
#include "core/memory.h"
|
||||
#include "shader_recompiler/backend/spirv/emit_spirv.h"
|
||||
#include "shader_recompiler/environment.h"
|
||||
#include "shader_recompiler/frontend/maxwell/control_flow.h"
|
||||
#include "shader_recompiler/frontend/maxwell/translate_program.h"
|
||||
#include "shader_recompiler/program_header.h"
|
||||
#include "video_core/dirty_flags.h"
|
||||
#include "video_core/engines/kepler_compute.h"
|
||||
#include "video_core/engines/maxwell_3d.h"
|
||||
#include "video_core/memory_manager.h"
|
||||
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
|
||||
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
|
||||
#include "video_core/renderer_vulkan/pipeline_helper.h"
|
||||
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
|
||||
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
|
||||
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
|
||||
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_rasterizer.h"
|
||||
#include "video_core/renderer_vulkan/vk_scheduler.h"
|
||||
#include "video_core/renderer_vulkan/vk_shader_util.h"
|
||||
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
|
||||
#include "video_core/shader/compiler_settings.h"
|
||||
#include "video_core/shader/memory_util.h"
|
||||
#include "video_core/shader_cache.h"
|
||||
#include "video_core/shader_environment.h"
|
||||
#include "video_core/shader_notify.h"
|
||||
#include "video_core/vulkan_common/vulkan_device.h"
|
||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
MICROPROFILE_DECLARE(Vulkan_PipelineCache);
|
||||
|
||||
using Tegra::Engines::ShaderType;
|
||||
using VideoCommon::Shader::GetShaderAddress;
|
||||
using VideoCommon::Shader::GetShaderCode;
|
||||
using VideoCommon::Shader::KERNEL_MAIN_OFFSET;
|
||||
using VideoCommon::Shader::ProgramCode;
|
||||
using VideoCommon::Shader::STAGE_MAIN_OFFSET;
|
||||
|
||||
namespace {
|
||||
using Shader::Backend::SPIRV::EmitSPIRV;
|
||||
using Shader::Maxwell::MergeDualVertexPrograms;
|
||||
using Shader::Maxwell::TranslateProgram;
|
||||
using VideoCommon::ComputeEnvironment;
|
||||
using VideoCommon::FileEnvironment;
|
||||
using VideoCommon::GenericEnvironment;
|
||||
using VideoCommon::GraphicsEnvironment;
|
||||
|
||||
constexpr VkDescriptorType UNIFORM_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
constexpr VkDescriptorType STORAGE_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
|
||||
constexpr VkDescriptorType UNIFORM_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
|
||||
constexpr VkDescriptorType COMBINED_IMAGE_SAMPLER = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
|
||||
constexpr VkDescriptorType STORAGE_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
|
||||
constexpr VkDescriptorType STORAGE_IMAGE = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
|
||||
|
||||
constexpr VideoCommon::Shader::CompilerSettings compiler_settings{
|
||||
.depth = VideoCommon::Shader::CompileDepth::FullDecompile,
|
||||
.disable_else_derivation = true,
|
||||
};
|
||||
|
||||
constexpr std::size_t GetStageFromProgram(std::size_t program) {
|
||||
return program == 0 ? 0 : program - 1;
|
||||
template <typename Container>
|
||||
auto MakeSpan(Container& container) {
|
||||
return std::span(container.data(), container.size());
|
||||
}
|
||||
|
||||
constexpr ShaderType GetStageFromProgram(Maxwell::ShaderProgram program) {
|
||||
return static_cast<ShaderType>(GetStageFromProgram(static_cast<std::size_t>(program)));
|
||||
}
|
||||
|
||||
ShaderType GetShaderType(Maxwell::ShaderProgram program) {
|
||||
switch (program) {
|
||||
case Maxwell::ShaderProgram::VertexB:
|
||||
return ShaderType::Vertex;
|
||||
case Maxwell::ShaderProgram::TesselationControl:
|
||||
return ShaderType::TesselationControl;
|
||||
case Maxwell::ShaderProgram::TesselationEval:
|
||||
return ShaderType::TesselationEval;
|
||||
case Maxwell::ShaderProgram::Geometry:
|
||||
return ShaderType::Geometry;
|
||||
case Maxwell::ShaderProgram::Fragment:
|
||||
return ShaderType::Fragment;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("program={}", program);
|
||||
return ShaderType::Vertex;
|
||||
Shader::CompareFunction MaxwellToCompareFunction(Maxwell::ComparisonOp comparison) {
|
||||
switch (comparison) {
|
||||
case Maxwell::ComparisonOp::Never:
|
||||
case Maxwell::ComparisonOp::NeverOld:
|
||||
return Shader::CompareFunction::Never;
|
||||
case Maxwell::ComparisonOp::Less:
|
||||
case Maxwell::ComparisonOp::LessOld:
|
||||
return Shader::CompareFunction::Less;
|
||||
case Maxwell::ComparisonOp::Equal:
|
||||
case Maxwell::ComparisonOp::EqualOld:
|
||||
return Shader::CompareFunction::Equal;
|
||||
case Maxwell::ComparisonOp::LessEqual:
|
||||
case Maxwell::ComparisonOp::LessEqualOld:
|
||||
return Shader::CompareFunction::LessThanEqual;
|
||||
case Maxwell::ComparisonOp::Greater:
|
||||
case Maxwell::ComparisonOp::GreaterOld:
|
||||
return Shader::CompareFunction::Greater;
|
||||
case Maxwell::ComparisonOp::NotEqual:
|
||||
case Maxwell::ComparisonOp::NotEqualOld:
|
||||
return Shader::CompareFunction::NotEqual;
|
||||
case Maxwell::ComparisonOp::GreaterEqual:
|
||||
case Maxwell::ComparisonOp::GreaterEqualOld:
|
||||
return Shader::CompareFunction::GreaterThanEqual;
|
||||
case Maxwell::ComparisonOp::Always:
|
||||
case Maxwell::ComparisonOp::AlwaysOld:
|
||||
return Shader::CompareFunction::Always;
|
||||
}
|
||||
UNIMPLEMENTED_MSG("Unimplemented comparison op={}", comparison);
|
||||
return {};
|
||||
}
|
||||
|
||||
template <VkDescriptorType descriptor_type, class Container>
|
||||
void AddBindings(std::vector<VkDescriptorSetLayoutBinding>& bindings, u32& binding,
|
||||
VkShaderStageFlags stage_flags, const Container& container) {
|
||||
const u32 num_entries = static_cast<u32>(std::size(container));
|
||||
for (std::size_t i = 0; i < num_entries; ++i) {
|
||||
u32 count = 1;
|
||||
if constexpr (descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
|
||||
// Combined image samplers can be arrayed.
|
||||
count = container[i].size;
|
||||
Shader::AttributeType CastAttributeType(const FixedPipelineState::VertexAttribute& attr) {
|
||||
if (attr.enabled == 0) {
|
||||
return Shader::AttributeType::Disabled;
|
||||
}
|
||||
switch (attr.Type()) {
|
||||
case Maxwell::VertexAttribute::Type::SignedNorm:
|
||||
case Maxwell::VertexAttribute::Type::UnsignedNorm:
|
||||
case Maxwell::VertexAttribute::Type::UnsignedScaled:
|
||||
case Maxwell::VertexAttribute::Type::SignedScaled:
|
||||
case Maxwell::VertexAttribute::Type::Float:
|
||||
return Shader::AttributeType::Float;
|
||||
case Maxwell::VertexAttribute::Type::SignedInt:
|
||||
return Shader::AttributeType::SignedInt;
|
||||
case Maxwell::VertexAttribute::Type::UnsignedInt:
|
||||
return Shader::AttributeType::UnsignedInt;
|
||||
}
|
||||
return Shader::AttributeType::Float;
|
||||
}
|
||||
|
||||
Shader::AttributeType AttributeType(const FixedPipelineState& state, size_t index) {
|
||||
switch (state.DynamicAttributeType(index)) {
|
||||
case 0:
|
||||
return Shader::AttributeType::Disabled;
|
||||
case 1:
|
||||
return Shader::AttributeType::Float;
|
||||
case 2:
|
||||
return Shader::AttributeType::SignedInt;
|
||||
case 3:
|
||||
return Shader::AttributeType::UnsignedInt;
|
||||
}
|
||||
return Shader::AttributeType::Disabled;
|
||||
}
|
||||
|
||||
Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> programs,
|
||||
const GraphicsPipelineCacheKey& key,
|
||||
const Shader::IR::Program& program,
|
||||
const Shader::IR::Program* previous_program) {
|
||||
Shader::RuntimeInfo info;
|
||||
if (previous_program) {
|
||||
info.previous_stage_stores = previous_program->info.stores;
|
||||
if (previous_program->is_geometry_passthrough) {
|
||||
info.previous_stage_stores.mask |= previous_program->info.passthrough.mask;
|
||||
}
|
||||
bindings.push_back({
|
||||
.binding = binding++,
|
||||
.descriptorType = descriptor_type,
|
||||
.descriptorCount = count,
|
||||
.stageFlags = stage_flags,
|
||||
.pImmutableSamplers = nullptr,
|
||||
});
|
||||
} else {
|
||||
info.previous_stage_stores.mask.set();
|
||||
}
|
||||
const Shader::Stage stage{program.stage};
|
||||
const bool has_geometry{key.unique_hashes[4] != 0 && !programs[4].is_geometry_passthrough};
|
||||
const bool gl_ndc{key.state.ndc_minus_one_to_one != 0};
|
||||
const float point_size{Common::BitCast<float>(key.state.point_size)};
|
||||
switch (stage) {
|
||||
case Shader::Stage::VertexB:
|
||||
if (!has_geometry) {
|
||||
if (key.state.topology == Maxwell::PrimitiveTopology::Points) {
|
||||
info.fixed_state_point_size = point_size;
|
||||
}
|
||||
if (key.state.xfb_enabled) {
|
||||
info.xfb_varyings = VideoCommon::MakeTransformFeedbackVaryings(key.state.xfb_state);
|
||||
}
|
||||
info.convert_depth_mode = gl_ndc;
|
||||
}
|
||||
if (key.state.dynamic_vertex_input) {
|
||||
for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
|
||||
info.generic_input_types[index] = AttributeType(key.state, index);
|
||||
}
|
||||
} else {
|
||||
std::ranges::transform(key.state.attributes, info.generic_input_types.begin(),
|
||||
&CastAttributeType);
|
||||
}
|
||||
break;
|
||||
case Shader::Stage::TessellationEval:
|
||||
// We have to flip tessellation clockwise for some reason...
|
||||
info.tess_clockwise = key.state.tessellation_clockwise == 0;
|
||||
info.tess_primitive = [&key] {
|
||||
const u32 raw{key.state.tessellation_primitive.Value()};
|
||||
switch (static_cast<Maxwell::TessellationPrimitive>(raw)) {
|
||||
case Maxwell::TessellationPrimitive::Isolines:
|
||||
return Shader::TessPrimitive::Isolines;
|
||||
case Maxwell::TessellationPrimitive::Triangles:
|
||||
return Shader::TessPrimitive::Triangles;
|
||||
case Maxwell::TessellationPrimitive::Quads:
|
||||
return Shader::TessPrimitive::Quads;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return Shader::TessPrimitive::Triangles;
|
||||
}();
|
||||
info.tess_spacing = [&] {
|
||||
const u32 raw{key.state.tessellation_spacing};
|
||||
switch (static_cast<Maxwell::TessellationSpacing>(raw)) {
|
||||
case Maxwell::TessellationSpacing::Equal:
|
||||
return Shader::TessSpacing::Equal;
|
||||
case Maxwell::TessellationSpacing::FractionalOdd:
|
||||
return Shader::TessSpacing::FractionalOdd;
|
||||
case Maxwell::TessellationSpacing::FractionalEven:
|
||||
return Shader::TessSpacing::FractionalEven;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return Shader::TessSpacing::Equal;
|
||||
}();
|
||||
break;
|
||||
case Shader::Stage::Geometry:
|
||||
if (program.output_topology == Shader::OutputTopology::PointList) {
|
||||
info.fixed_state_point_size = point_size;
|
||||
}
|
||||
if (key.state.xfb_enabled != 0) {
|
||||
info.xfb_varyings = VideoCommon::MakeTransformFeedbackVaryings(key.state.xfb_state);
|
||||
}
|
||||
info.convert_depth_mode = gl_ndc;
|
||||
break;
|
||||
case Shader::Stage::Fragment:
|
||||
info.alpha_test_func = MaxwellToCompareFunction(
|
||||
key.state.UnpackComparisonOp(key.state.alpha_test_func.Value()));
|
||||
info.alpha_test_reference = Common::BitCast<float>(key.state.alpha_test_ref);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
switch (key.state.topology) {
|
||||
case Maxwell::PrimitiveTopology::Points:
|
||||
info.input_topology = Shader::InputTopology::Points;
|
||||
break;
|
||||
case Maxwell::PrimitiveTopology::Lines:
|
||||
case Maxwell::PrimitiveTopology::LineLoop:
|
||||
case Maxwell::PrimitiveTopology::LineStrip:
|
||||
info.input_topology = Shader::InputTopology::Lines;
|
||||
break;
|
||||
case Maxwell::PrimitiveTopology::Triangles:
|
||||
case Maxwell::PrimitiveTopology::TriangleStrip:
|
||||
case Maxwell::PrimitiveTopology::TriangleFan:
|
||||
case Maxwell::PrimitiveTopology::Quads:
|
||||
case Maxwell::PrimitiveTopology::QuadStrip:
|
||||
case Maxwell::PrimitiveTopology::Polygon:
|
||||
case Maxwell::PrimitiveTopology::Patches:
|
||||
info.input_topology = Shader::InputTopology::Triangles;
|
||||
break;
|
||||
case Maxwell::PrimitiveTopology::LinesAdjacency:
|
||||
case Maxwell::PrimitiveTopology::LineStripAdjacency:
|
||||
info.input_topology = Shader::InputTopology::LinesAdjacency;
|
||||
break;
|
||||
case Maxwell::PrimitiveTopology::TrianglesAdjacency:
|
||||
case Maxwell::PrimitiveTopology::TriangleStripAdjacency:
|
||||
info.input_topology = Shader::InputTopology::TrianglesAdjacency;
|
||||
break;
|
||||
}
|
||||
info.force_early_z = key.state.early_z != 0;
|
||||
info.y_negate = key.state.y_negate != 0;
|
||||
return info;
|
||||
}
|
||||
|
||||
u32 FillDescriptorLayout(const ShaderEntries& entries,
|
||||
std::vector<VkDescriptorSetLayoutBinding>& bindings,
|
||||
Maxwell::ShaderProgram program_type, u32 base_binding) {
|
||||
const ShaderType stage = GetStageFromProgram(program_type);
|
||||
const VkShaderStageFlags flags = MaxwellToVK::ShaderStage(stage);
|
||||
|
||||
u32 binding = base_binding;
|
||||
AddBindings<UNIFORM_BUFFER>(bindings, binding, flags, entries.const_buffers);
|
||||
AddBindings<STORAGE_BUFFER>(bindings, binding, flags, entries.global_buffers);
|
||||
AddBindings<UNIFORM_TEXEL_BUFFER>(bindings, binding, flags, entries.uniform_texels);
|
||||
AddBindings<COMBINED_IMAGE_SAMPLER>(bindings, binding, flags, entries.samplers);
|
||||
AddBindings<STORAGE_TEXEL_BUFFER>(bindings, binding, flags, entries.storage_texels);
|
||||
AddBindings<STORAGE_IMAGE>(bindings, binding, flags, entries.images);
|
||||
return binding;
|
||||
}
|
||||
|
||||
} // Anonymous namespace
|
||||
|
||||
std::size_t GraphicsPipelineCacheKey::Hash() const noexcept {
|
||||
const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), Size());
|
||||
return static_cast<std::size_t>(hash);
|
||||
}
|
||||
|
||||
bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) const noexcept {
|
||||
return std::memcmp(&rhs, this, Size()) == 0;
|
||||
}
|
||||
|
||||
std::size_t ComputePipelineCacheKey::Hash() const noexcept {
|
||||
size_t ComputePipelineCacheKey::Hash() const noexcept {
|
||||
const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
|
||||
return static_cast<std::size_t>(hash);
|
||||
return static_cast<size_t>(hash);
|
||||
}
|
||||
|
||||
bool ComputePipelineCacheKey::operator==(const ComputePipelineCacheKey& rhs) const noexcept {
|
||||
return std::memcmp(&rhs, this, sizeof *this) == 0;
|
||||
}
|
||||
|
||||
Shader::Shader(Tegra::Engines::ConstBufferEngineInterface& engine_, ShaderType stage_,
|
||||
GPUVAddr gpu_addr_, VAddr cpu_addr_, ProgramCode program_code_, u32 main_offset_)
|
||||
: gpu_addr(gpu_addr_), program_code(std::move(program_code_)), registry(stage_, engine_),
|
||||
shader_ir(program_code, main_offset_, compiler_settings, registry),
|
||||
entries(GenerateShaderEntries(shader_ir)) {}
|
||||
|
||||
Shader::~Shader() = default;
|
||||
|
||||
VKPipelineCache::VKPipelineCache(RasterizerVulkan& rasterizer_, Tegra::GPU& gpu_,
|
||||
Tegra::Engines::Maxwell3D& maxwell3d_,
|
||||
Tegra::Engines::KeplerCompute& kepler_compute_,
|
||||
Tegra::MemoryManager& gpu_memory_, const Device& device_,
|
||||
VKScheduler& scheduler_, VKDescriptorPool& descriptor_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_)
|
||||
: VideoCommon::ShaderCache<Shader>{rasterizer_}, gpu{gpu_}, maxwell3d{maxwell3d_},
|
||||
kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, device{device_},
|
||||
scheduler{scheduler_}, descriptor_pool{descriptor_pool_}, update_descriptor_queue{
|
||||
update_descriptor_queue_} {}
|
||||
|
||||
VKPipelineCache::~VKPipelineCache() = default;
|
||||
|
||||
std::array<Shader*, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() {
|
||||
std::array<Shader*, Maxwell::MaxShaderProgram> shaders{};
|
||||
|
||||
for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
|
||||
const auto program{static_cast<Maxwell::ShaderProgram>(index)};
|
||||
|
||||
// Skip stages that are not enabled
|
||||
if (!maxwell3d.regs.IsShaderConfigEnabled(index)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const GPUVAddr gpu_addr{GetShaderAddress(maxwell3d, program)};
|
||||
const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
|
||||
ASSERT(cpu_addr);
|
||||
|
||||
Shader* result = cpu_addr ? TryGet(*cpu_addr) : null_shader.get();
|
||||
if (!result) {
|
||||
const u8* const host_ptr{gpu_memory.GetPointer(gpu_addr)};
|
||||
|
||||
// No shader found - create a new one
|
||||
static constexpr u32 stage_offset = STAGE_MAIN_OFFSET;
|
||||
const auto stage = static_cast<ShaderType>(index == 0 ? 0 : index - 1);
|
||||
ProgramCode code = GetShaderCode(gpu_memory, gpu_addr, host_ptr, false);
|
||||
const std::size_t size_in_bytes = code.size() * sizeof(u64);
|
||||
|
||||
auto shader = std::make_unique<Shader>(maxwell3d, stage, gpu_addr, *cpu_addr,
|
||||
std::move(code), stage_offset);
|
||||
result = shader.get();
|
||||
|
||||
if (cpu_addr) {
|
||||
Register(std::move(shader), *cpu_addr, size_in_bytes);
|
||||
} else {
|
||||
null_shader = std::move(shader);
|
||||
}
|
||||
}
|
||||
shaders[index] = result;
|
||||
}
|
||||
return last_shaders = shaders;
|
||||
size_t GraphicsPipelineCacheKey::Hash() const noexcept {
|
||||
const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), Size());
|
||||
return static_cast<size_t>(hash);
|
||||
}
|
||||
|
||||
VKGraphicsPipeline* VKPipelineCache::GetGraphicsPipeline(
|
||||
const GraphicsPipelineCacheKey& key, u32 num_color_buffers,
|
||||
VideoCommon::Shader::AsyncShaders& async_shaders) {
|
||||
MICROPROFILE_SCOPE(Vulkan_PipelineCache);
|
||||
|
||||
if (last_graphics_pipeline && last_graphics_key == key) {
|
||||
return last_graphics_pipeline;
|
||||
}
|
||||
last_graphics_key = key;
|
||||
|
||||
if (device.UseAsynchronousShaders() && async_shaders.IsShaderAsync(gpu)) {
|
||||
std::unique_lock lock{pipeline_cache};
|
||||
const auto [pair, is_cache_miss] = graphics_cache.try_emplace(key);
|
||||
if (is_cache_miss) {
|
||||
gpu.ShaderNotify().MarkSharderBuilding();
|
||||
LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash());
|
||||
const auto [program, bindings] = DecompileShaders(key.fixed_state);
|
||||
async_shaders.QueueVulkanShader(this, device, scheduler, descriptor_pool,
|
||||
update_descriptor_queue, bindings, program, key,
|
||||
num_color_buffers);
|
||||
}
|
||||
last_graphics_pipeline = pair->second.get();
|
||||
return last_graphics_pipeline;
|
||||
}
|
||||
|
||||
const auto [pair, is_cache_miss] = graphics_cache.try_emplace(key);
|
||||
auto& entry = pair->second;
|
||||
if (is_cache_miss) {
|
||||
gpu.ShaderNotify().MarkSharderBuilding();
|
||||
LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash());
|
||||
const auto [program, bindings] = DecompileShaders(key.fixed_state);
|
||||
entry = std::make_unique<VKGraphicsPipeline>(device, scheduler, descriptor_pool,
|
||||
update_descriptor_queue, key, bindings,
|
||||
program, num_color_buffers);
|
||||
gpu.ShaderNotify().MarkShaderComplete();
|
||||
}
|
||||
last_graphics_pipeline = entry.get();
|
||||
return last_graphics_pipeline;
|
||||
bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) const noexcept {
|
||||
return std::memcmp(&rhs, this, Size()) == 0;
|
||||
}
|
||||
|
||||
VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCacheKey& key) {
|
||||
PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_,
|
||||
Tegra::Engines::KeplerCompute& kepler_compute_,
|
||||
Tegra::MemoryManager& gpu_memory_, const Device& device_,
|
||||
VKScheduler& scheduler_, DescriptorPool& descriptor_pool_,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
||||
RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_,
|
||||
TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_)
|
||||
: VideoCommon::ShaderCache{rasterizer_, gpu_memory_, maxwell3d_, kepler_compute_},
|
||||
device{device_}, scheduler{scheduler_}, descriptor_pool{descriptor_pool_},
|
||||
update_descriptor_queue{update_descriptor_queue_}, render_pass_cache{render_pass_cache_},
|
||||
buffer_cache{buffer_cache_}, texture_cache{texture_cache_}, shader_notify{shader_notify_},
|
||||
use_asynchronous_shaders{Settings::values.use_asynchronous_shaders.GetValue()},
|
||||
workers(std::max(std::thread::hardware_concurrency(), 2U) - 1, "yuzu:PipelineBuilder"),
|
||||
serialization_thread(1, "yuzu:PipelineSerialization") {
|
||||
const auto& float_control{device.FloatControlProperties()};
|
||||
const VkDriverIdKHR driver_id{device.GetDriverID()};
|
||||
profile = Shader::Profile{
|
||||
.supported_spirv = device.IsKhrSpirv1_4Supported() ? 0x00010400U : 0x00010000U,
|
||||
.unified_descriptor_binding = true,
|
||||
.support_descriptor_aliasing = true,
|
||||
.support_int8 = true,
|
||||
.support_int16 = device.IsShaderInt16Supported(),
|
||||
.support_int64 = device.IsShaderInt64Supported(),
|
||||
.support_vertex_instance_id = false,
|
||||
.support_float_controls = true,
|
||||
.support_separate_denorm_behavior = float_control.denormBehaviorIndependence ==
|
||||
VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR,
|
||||
.support_separate_rounding_mode =
|
||||
float_control.roundingModeIndependence == VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR,
|
||||
.support_fp16_denorm_preserve = float_control.shaderDenormPreserveFloat16 != VK_FALSE,
|
||||
.support_fp32_denorm_preserve = float_control.shaderDenormPreserveFloat32 != VK_FALSE,
|
||||
.support_fp16_denorm_flush = float_control.shaderDenormFlushToZeroFloat16 != VK_FALSE,
|
||||
.support_fp32_denorm_flush = float_control.shaderDenormFlushToZeroFloat32 != VK_FALSE,
|
||||
.support_fp16_signed_zero_nan_preserve =
|
||||
float_control.shaderSignedZeroInfNanPreserveFloat16 != VK_FALSE,
|
||||
.support_fp32_signed_zero_nan_preserve =
|
||||
float_control.shaderSignedZeroInfNanPreserveFloat32 != VK_FALSE,
|
||||
.support_fp64_signed_zero_nan_preserve =
|
||||
float_control.shaderSignedZeroInfNanPreserveFloat64 != VK_FALSE,
|
||||
.support_explicit_workgroup_layout = device.IsKhrWorkgroupMemoryExplicitLayoutSupported(),
|
||||
.support_vote = true,
|
||||
.support_viewport_index_layer_non_geometry =
|
||||
device.IsExtShaderViewportIndexLayerSupported(),
|
||||
.support_viewport_mask = device.IsNvViewportArray2Supported(),
|
||||
.support_typeless_image_loads = device.IsFormatlessImageLoadSupported(),
|
||||
.support_demote_to_helper_invocation = true,
|
||||
.support_int64_atomics = device.IsExtShaderAtomicInt64Supported(),
|
||||
.support_derivative_control = true,
|
||||
.support_geometry_shader_passthrough = device.IsNvGeometryShaderPassthroughSupported(),
|
||||
|
||||
.warp_size_potentially_larger_than_guest = device.IsWarpSizePotentiallyBiggerThanGuest(),
|
||||
|
||||
.lower_left_origin_mode = false,
|
||||
.need_declared_frag_colors = false,
|
||||
|
||||
.has_broken_spirv_clamp = driver_id == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR,
|
||||
.has_broken_unsigned_image_offsets = false,
|
||||
.has_broken_signed_operations = false,
|
||||
.has_broken_fp16_float_controls = driver_id == VK_DRIVER_ID_NVIDIA_PROPRIETARY_KHR,
|
||||
.ignore_nan_fp_comparisons = false,
|
||||
};
|
||||
host_info = Shader::HostTranslateInfo{
|
||||
.support_float16 = device.IsFloat16Supported(),
|
||||
.support_int64 = device.IsShaderInt64Supported(),
|
||||
};
|
||||
}
|
||||
|
||||
PipelineCache::~PipelineCache() = default;
|
||||
|
||||
GraphicsPipeline* PipelineCache::CurrentGraphicsPipeline() {
|
||||
MICROPROFILE_SCOPE(Vulkan_PipelineCache);
|
||||
|
||||
const auto [pair, is_cache_miss] = compute_cache.try_emplace(key);
|
||||
auto& entry = pair->second;
|
||||
if (!is_cache_miss) {
|
||||
return *entry;
|
||||
if (!RefreshStages(graphics_key.unique_hashes)) {
|
||||
current_pipeline = nullptr;
|
||||
return nullptr;
|
||||
}
|
||||
LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash());
|
||||
graphics_key.state.Refresh(maxwell3d, device.IsExtExtendedDynamicStateSupported(),
|
||||
device.IsExtVertexInputDynamicStateSupported());
|
||||
|
||||
const GPUVAddr gpu_addr = key.shader;
|
||||
if (current_pipeline) {
|
||||
GraphicsPipeline* const next{current_pipeline->Next(graphics_key)};
|
||||
if (next) {
|
||||
current_pipeline = next;
|
||||
return BuiltPipeline(current_pipeline);
|
||||
}
|
||||
}
|
||||
return CurrentGraphicsPipelineSlowPath();
|
||||
}
|
||||
|
||||
const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
|
||||
ASSERT(cpu_addr);
|
||||
ComputePipeline* PipelineCache::CurrentComputePipeline() {
|
||||
MICROPROFILE_SCOPE(Vulkan_PipelineCache);
|
||||
|
||||
Shader* shader = cpu_addr ? TryGet(*cpu_addr) : null_kernel.get();
|
||||
const ShaderInfo* const shader{ComputeShader()};
|
||||
if (!shader) {
|
||||
// No shader found - create a new one
|
||||
const auto host_ptr = gpu_memory.GetPointer(gpu_addr);
|
||||
|
||||
ProgramCode code = GetShaderCode(gpu_memory, gpu_addr, host_ptr, true);
|
||||
const std::size_t size_in_bytes = code.size() * sizeof(u64);
|
||||
|
||||
auto shader_info = std::make_unique<Shader>(kepler_compute, ShaderType::Compute, gpu_addr,
|
||||
*cpu_addr, std::move(code), KERNEL_MAIN_OFFSET);
|
||||
shader = shader_info.get();
|
||||
|
||||
if (cpu_addr) {
|
||||
Register(std::move(shader_info), *cpu_addr, size_in_bytes);
|
||||
} else {
|
||||
null_kernel = std::move(shader_info);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const Specialization specialization{
|
||||
.base_binding = 0,
|
||||
.workgroup_size = key.workgroup_size,
|
||||
.shared_memory_size = key.shared_memory_size,
|
||||
.point_size = std::nullopt,
|
||||
.enabled_attributes = {},
|
||||
.attribute_types = {},
|
||||
.ndc_minus_one_to_one = false,
|
||||
const auto& qmd{kepler_compute.launch_description};
|
||||
const ComputePipelineCacheKey key{
|
||||
.unique_hash = shader->unique_hash,
|
||||
.shared_memory_size = qmd.shared_alloc,
|
||||
.workgroup_size{qmd.block_dim_x, qmd.block_dim_y, qmd.block_dim_z},
|
||||
};
|
||||
const SPIRVShader spirv_shader{Decompile(device, shader->GetIR(), ShaderType::Compute,
|
||||
shader->GetRegistry(), specialization),
|
||||
shader->GetEntries()};
|
||||
entry = std::make_unique<VKComputePipeline>(device, scheduler, descriptor_pool,
|
||||
update_descriptor_queue, spirv_shader);
|
||||
return *entry;
|
||||
const auto [pair, is_new]{compute_cache.try_emplace(key)};
|
||||
auto& pipeline{pair->second};
|
||||
if (!is_new) {
|
||||
return pipeline.get();
|
||||
}
|
||||
pipeline = CreateComputePipeline(key, shader);
|
||||
return pipeline.get();
|
||||
}
|
||||
|
||||
void VKPipelineCache::EmplacePipeline(std::unique_ptr<VKGraphicsPipeline> pipeline) {
|
||||
gpu.ShaderNotify().MarkShaderComplete();
|
||||
std::unique_lock lock{pipeline_cache};
|
||||
graphics_cache.at(pipeline->GetCacheKey()) = std::move(pipeline);
|
||||
}
|
||||
|
||||
void VKPipelineCache::OnShaderRemoval(Shader* shader) {
|
||||
bool finished = false;
|
||||
const auto Finish = [&] {
|
||||
// TODO(Rodrigo): Instead of finishing here, wait for the fences that use this pipeline and
|
||||
// flush.
|
||||
if (finished) {
|
||||
return;
|
||||
}
|
||||
finished = true;
|
||||
scheduler.Finish();
|
||||
};
|
||||
|
||||
const GPUVAddr invalidated_addr = shader->GetGpuAddr();
|
||||
for (auto it = graphics_cache.begin(); it != graphics_cache.end();) {
|
||||
auto& entry = it->first;
|
||||
if (std::find(entry.shaders.begin(), entry.shaders.end(), invalidated_addr) ==
|
||||
entry.shaders.end()) {
|
||||
++it;
|
||||
continue;
|
||||
}
|
||||
Finish();
|
||||
it = graphics_cache.erase(it);
|
||||
}
|
||||
for (auto it = compute_cache.begin(); it != compute_cache.end();) {
|
||||
auto& entry = it->first;
|
||||
if (entry.shader != invalidated_addr) {
|
||||
++it;
|
||||
continue;
|
||||
}
|
||||
Finish();
|
||||
it = compute_cache.erase(it);
|
||||
}
|
||||
}
|
||||
|
||||
std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>>
|
||||
VKPipelineCache::DecompileShaders(const FixedPipelineState& fixed_state) {
|
||||
Specialization specialization;
|
||||
if (fixed_state.topology == Maxwell::PrimitiveTopology::Points) {
|
||||
float point_size;
|
||||
std::memcpy(&point_size, &fixed_state.point_size, sizeof(float));
|
||||
specialization.point_size = point_size;
|
||||
ASSERT(point_size != 0.0f);
|
||||
}
|
||||
for (std::size_t i = 0; i < Maxwell::NumVertexAttributes; ++i) {
|
||||
const auto& attribute = fixed_state.attributes[i];
|
||||
specialization.enabled_attributes[i] = attribute.enabled.Value() != 0;
|
||||
specialization.attribute_types[i] = attribute.Type();
|
||||
}
|
||||
specialization.ndc_minus_one_to_one = fixed_state.ndc_minus_one_to_one;
|
||||
specialization.early_fragment_tests = fixed_state.early_z;
|
||||
|
||||
// Alpha test
|
||||
specialization.alpha_test_func =
|
||||
FixedPipelineState::UnpackComparisonOp(fixed_state.alpha_test_func.Value());
|
||||
specialization.alpha_test_ref = Common::BitCast<float>(fixed_state.alpha_test_ref);
|
||||
|
||||
SPIRVProgram program;
|
||||
std::vector<VkDescriptorSetLayoutBinding> bindings;
|
||||
|
||||
for (std::size_t index = 1; index < Maxwell::MaxShaderProgram; ++index) {
|
||||
const auto program_enum = static_cast<Maxwell::ShaderProgram>(index);
|
||||
// Skip stages that are not enabled
|
||||
if (!maxwell3d.regs.IsShaderConfigEnabled(index)) {
|
||||
continue;
|
||||
}
|
||||
const GPUVAddr gpu_addr = GetShaderAddress(maxwell3d, program_enum);
|
||||
const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
|
||||
Shader* const shader = cpu_addr ? TryGet(*cpu_addr) : null_shader.get();
|
||||
|
||||
const std::size_t stage = index == 0 ? 0 : index - 1; // Stage indices are 0 - 5
|
||||
const ShaderType program_type = GetShaderType(program_enum);
|
||||
const auto& entries = shader->GetEntries();
|
||||
program[stage] = {
|
||||
Decompile(device, shader->GetIR(), program_type, shader->GetRegistry(), specialization),
|
||||
entries,
|
||||
};
|
||||
|
||||
const u32 old_binding = specialization.base_binding;
|
||||
specialization.base_binding =
|
||||
FillDescriptorLayout(entries, bindings, program_enum, specialization.base_binding);
|
||||
ASSERT(old_binding + entries.NumBindings() == specialization.base_binding);
|
||||
}
|
||||
return {std::move(program), std::move(bindings)};
|
||||
}
|
||||
|
||||
template <VkDescriptorType descriptor_type, class Container>
|
||||
void AddEntry(std::vector<VkDescriptorUpdateTemplateEntry>& template_entries, u32& binding,
|
||||
u32& offset, const Container& container) {
|
||||
static constexpr u32 entry_size = static_cast<u32>(sizeof(DescriptorUpdateEntry));
|
||||
const u32 count = static_cast<u32>(std::size(container));
|
||||
|
||||
if constexpr (descriptor_type == COMBINED_IMAGE_SAMPLER) {
|
||||
for (u32 i = 0; i < count; ++i) {
|
||||
const u32 num_samplers = container[i].size;
|
||||
template_entries.push_back({
|
||||
.dstBinding = binding,
|
||||
.dstArrayElement = 0,
|
||||
.descriptorCount = num_samplers,
|
||||
.descriptorType = descriptor_type,
|
||||
.offset = offset,
|
||||
.stride = entry_size,
|
||||
});
|
||||
|
||||
++binding;
|
||||
offset += num_samplers * entry_size;
|
||||
}
|
||||
void PipelineCache::LoadDiskResources(u64 title_id, std::stop_token stop_loading,
|
||||
const VideoCore::DiskResourceLoadCallback& callback) {
|
||||
if (title_id == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if constexpr (descriptor_type == UNIFORM_TEXEL_BUFFER ||
|
||||
descriptor_type == STORAGE_TEXEL_BUFFER) {
|
||||
// Nvidia has a bug where updating multiple texels at once causes the driver to crash.
|
||||
// Note: Fixed in driver Windows 443.24, Linux 440.66.15
|
||||
for (u32 i = 0; i < count; ++i) {
|
||||
template_entries.push_back({
|
||||
.dstBinding = binding + i,
|
||||
.dstArrayElement = 0,
|
||||
.descriptorCount = 1,
|
||||
.descriptorType = descriptor_type,
|
||||
.offset = static_cast<std::size_t>(offset + i * entry_size),
|
||||
.stride = entry_size,
|
||||
});
|
||||
}
|
||||
} else if (count > 0) {
|
||||
template_entries.push_back({
|
||||
.dstBinding = binding,
|
||||
.dstArrayElement = 0,
|
||||
.descriptorCount = count,
|
||||
.descriptorType = descriptor_type,
|
||||
.offset = offset,
|
||||
.stride = entry_size,
|
||||
});
|
||||
const auto shader_dir{Common::FS::GetYuzuPath(Common::FS::YuzuPath::ShaderDir)};
|
||||
const auto base_dir{shader_dir / fmt::format("{:016x}", title_id)};
|
||||
if (!Common::FS::CreateDir(shader_dir) || !Common::FS::CreateDir(base_dir)) {
|
||||
LOG_ERROR(Common_Filesystem, "Failed to create pipeline cache directories");
|
||||
return;
|
||||
}
|
||||
offset += count * entry_size;
|
||||
binding += count;
|
||||
pipeline_cache_filename = base_dir / "vulkan.bin";
|
||||
|
||||
struct {
|
||||
std::mutex mutex;
|
||||
size_t total{};
|
||||
size_t built{};
|
||||
bool has_loaded{};
|
||||
} state;
|
||||
|
||||
const auto load_compute{[&](std::ifstream& file, FileEnvironment env) {
|
||||
ComputePipelineCacheKey key;
|
||||
file.read(reinterpret_cast<char*>(&key), sizeof(key));
|
||||
|
||||
workers.QueueWork([this, key, env = std::move(env), &state, &callback]() mutable {
|
||||
ShaderPools pools;
|
||||
auto pipeline{CreateComputePipeline(pools, key, env, false)};
|
||||
std::lock_guard lock{state.mutex};
|
||||
if (pipeline) {
|
||||
compute_cache.emplace(key, std::move(pipeline));
|
||||
}
|
||||
++state.built;
|
||||
if (state.has_loaded) {
|
||||
callback(VideoCore::LoadCallbackStage::Build, state.built, state.total);
|
||||
}
|
||||
});
|
||||
++state.total;
|
||||
}};
|
||||
const bool extended_dynamic_state = device.IsExtExtendedDynamicStateSupported();
|
||||
const bool dynamic_vertex_input = device.IsExtVertexInputDynamicStateSupported();
|
||||
const auto load_graphics{[&](std::ifstream& file, std::vector<FileEnvironment> envs) {
|
||||
GraphicsPipelineCacheKey key;
|
||||
file.read(reinterpret_cast<char*>(&key), sizeof(key));
|
||||
|
||||
if ((key.state.extended_dynamic_state != 0) != extended_dynamic_state ||
|
||||
(key.state.dynamic_vertex_input != 0) != dynamic_vertex_input) {
|
||||
return;
|
||||
}
|
||||
workers.QueueWork([this, key, envs = std::move(envs), &state, &callback]() mutable {
|
||||
ShaderPools pools;
|
||||
boost::container::static_vector<Shader::Environment*, 5> env_ptrs;
|
||||
for (auto& env : envs) {
|
||||
env_ptrs.push_back(&env);
|
||||
}
|
||||
auto pipeline{CreateGraphicsPipeline(pools, key, MakeSpan(env_ptrs), false)};
|
||||
|
||||
std::lock_guard lock{state.mutex};
|
||||
graphics_cache.emplace(key, std::move(pipeline));
|
||||
++state.built;
|
||||
if (state.has_loaded) {
|
||||
callback(VideoCore::LoadCallbackStage::Build, state.built, state.total);
|
||||
}
|
||||
});
|
||||
++state.total;
|
||||
}};
|
||||
VideoCommon::LoadPipelines(stop_loading, pipeline_cache_filename, load_compute, load_graphics);
|
||||
|
||||
std::unique_lock lock{state.mutex};
|
||||
callback(VideoCore::LoadCallbackStage::Build, 0, state.total);
|
||||
state.has_loaded = true;
|
||||
lock.unlock();
|
||||
|
||||
workers.WaitForRequests();
|
||||
}
|
||||
|
||||
void FillDescriptorUpdateTemplateEntries(
|
||||
const ShaderEntries& entries, u32& binding, u32& offset,
|
||||
std::vector<VkDescriptorUpdateTemplateEntryKHR>& template_entries) {
|
||||
AddEntry<UNIFORM_BUFFER>(template_entries, offset, binding, entries.const_buffers);
|
||||
AddEntry<STORAGE_BUFFER>(template_entries, offset, binding, entries.global_buffers);
|
||||
AddEntry<UNIFORM_TEXEL_BUFFER>(template_entries, offset, binding, entries.uniform_texels);
|
||||
AddEntry<COMBINED_IMAGE_SAMPLER>(template_entries, offset, binding, entries.samplers);
|
||||
AddEntry<STORAGE_TEXEL_BUFFER>(template_entries, offset, binding, entries.storage_texels);
|
||||
AddEntry<STORAGE_IMAGE>(template_entries, offset, binding, entries.images);
|
||||
GraphicsPipeline* PipelineCache::CurrentGraphicsPipelineSlowPath() {
|
||||
const auto [pair, is_new]{graphics_cache.try_emplace(graphics_key)};
|
||||
auto& pipeline{pair->second};
|
||||
if (is_new) {
|
||||
pipeline = CreateGraphicsPipeline();
|
||||
}
|
||||
if (!pipeline) {
|
||||
return nullptr;
|
||||
}
|
||||
if (current_pipeline) {
|
||||
current_pipeline->AddTransition(pipeline.get());
|
||||
}
|
||||
current_pipeline = pipeline.get();
|
||||
return BuiltPipeline(current_pipeline);
|
||||
}
|
||||
|
||||
GraphicsPipeline* PipelineCache::BuiltPipeline(GraphicsPipeline* pipeline) const noexcept {
|
||||
if (pipeline->IsBuilt()) {
|
||||
return pipeline;
|
||||
}
|
||||
if (!use_asynchronous_shaders) {
|
||||
return pipeline;
|
||||
}
|
||||
// If something is using depth, we can assume that games are not rendering anything which
|
||||
// will be used one time.
|
||||
if (maxwell3d.regs.zeta_enable) {
|
||||
return nullptr;
|
||||
}
|
||||
// If games are using a small index count, we can assume these are full screen quads.
|
||||
// Usually these shaders are only used once for building textures so we can assume they
|
||||
// can't be built async
|
||||
if (maxwell3d.regs.index_array.count <= 6 || maxwell3d.regs.vertex_buffer.count <= 6) {
|
||||
return pipeline;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline(
|
||||
ShaderPools& pools, const GraphicsPipelineCacheKey& key,
|
||||
std::span<Shader::Environment* const> envs, bool build_in_parallel) try {
|
||||
LOG_INFO(Render_Vulkan, "0x{:016x}", key.Hash());
|
||||
size_t env_index{0};
|
||||
std::array<Shader::IR::Program, Maxwell::MaxShaderProgram> programs;
|
||||
const bool uses_vertex_a{key.unique_hashes[0] != 0};
|
||||
const bool uses_vertex_b{key.unique_hashes[1] != 0};
|
||||
for (size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
|
||||
if (key.unique_hashes[index] == 0) {
|
||||
continue;
|
||||
}
|
||||
Shader::Environment& env{*envs[env_index]};
|
||||
++env_index;
|
||||
|
||||
const u32 cfg_offset{static_cast<u32>(env.StartAddress() + sizeof(Shader::ProgramHeader))};
|
||||
Shader::Maxwell::Flow::CFG cfg(env, pools.flow_block, cfg_offset, index == 0);
|
||||
if (!uses_vertex_a || index != 1) {
|
||||
// Normal path
|
||||
programs[index] = TranslateProgram(pools.inst, pools.block, env, cfg, host_info);
|
||||
} else {
|
||||
// VertexB path when VertexA is present.
|
||||
auto& program_va{programs[0]};
|
||||
auto program_vb{TranslateProgram(pools.inst, pools.block, env, cfg, host_info)};
|
||||
programs[index] = MergeDualVertexPrograms(program_va, program_vb, env);
|
||||
}
|
||||
}
|
||||
std::array<const Shader::Info*, Maxwell::MaxShaderStage> infos{};
|
||||
std::array<vk::ShaderModule, Maxwell::MaxShaderStage> modules;
|
||||
|
||||
const Shader::IR::Program* previous_stage{};
|
||||
Shader::Backend::Bindings binding;
|
||||
for (size_t index = uses_vertex_a && uses_vertex_b ? 1 : 0; index < Maxwell::MaxShaderProgram;
|
||||
++index) {
|
||||
if (key.unique_hashes[index] == 0) {
|
||||
continue;
|
||||
}
|
||||
UNIMPLEMENTED_IF(index == 0);
|
||||
|
||||
Shader::IR::Program& program{programs[index]};
|
||||
const size_t stage_index{index - 1};
|
||||
infos[stage_index] = &program.info;
|
||||
|
||||
const auto runtime_info{MakeRuntimeInfo(programs, key, program, previous_stage)};
|
||||
const std::vector<u32> code{EmitSPIRV(profile, runtime_info, program, binding)};
|
||||
device.SaveShader(code);
|
||||
modules[stage_index] = BuildShader(device, code);
|
||||
if (device.HasDebuggingToolAttached()) {
|
||||
const std::string name{fmt::format("Shader {:016x}", key.unique_hashes[index])};
|
||||
modules[stage_index].SetObjectNameEXT(name.c_str());
|
||||
}
|
||||
previous_stage = &program;
|
||||
}
|
||||
Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr};
|
||||
return std::make_unique<GraphicsPipeline>(
|
||||
maxwell3d, gpu_memory, scheduler, buffer_cache, texture_cache, &shader_notify, device,
|
||||
descriptor_pool, update_descriptor_queue, thread_worker, render_pass_cache, key,
|
||||
std::move(modules), infos);
|
||||
|
||||
} catch (const Shader::Exception& exception) {
|
||||
LOG_ERROR(Render_Vulkan, "{}", exception.what());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline() {
|
||||
GraphicsEnvironments environments;
|
||||
GetGraphicsEnvironments(environments, graphics_key.unique_hashes);
|
||||
|
||||
main_pools.ReleaseContents();
|
||||
auto pipeline{CreateGraphicsPipeline(main_pools, graphics_key, environments.Span(), true)};
|
||||
if (!pipeline || pipeline_cache_filename.empty()) {
|
||||
return pipeline;
|
||||
}
|
||||
serialization_thread.QueueWork([this, key = graphics_key, envs = std::move(environments.envs)] {
|
||||
boost::container::static_vector<const GenericEnvironment*, Maxwell::MaxShaderProgram>
|
||||
env_ptrs;
|
||||
for (size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
|
||||
if (key.unique_hashes[index] != 0) {
|
||||
env_ptrs.push_back(&envs[index]);
|
||||
}
|
||||
}
|
||||
SerializePipeline(key, env_ptrs, pipeline_cache_filename);
|
||||
});
|
||||
return pipeline;
|
||||
}
|
||||
|
||||
std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline(
|
||||
const ComputePipelineCacheKey& key, const ShaderInfo* shader) {
|
||||
const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()};
|
||||
const auto& qmd{kepler_compute.launch_description};
|
||||
ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start};
|
||||
env.SetCachedSize(shader->size_bytes);
|
||||
|
||||
main_pools.ReleaseContents();
|
||||
auto pipeline{CreateComputePipeline(main_pools, key, env, true)};
|
||||
if (!pipeline || pipeline_cache_filename.empty()) {
|
||||
return pipeline;
|
||||
}
|
||||
serialization_thread.QueueWork([this, key, env = std::move(env)] {
|
||||
SerializePipeline(key, std::array<const GenericEnvironment*, 1>{&env},
|
||||
pipeline_cache_filename);
|
||||
});
|
||||
return pipeline;
|
||||
}
|
||||
|
||||
std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline(
|
||||
ShaderPools& pools, const ComputePipelineCacheKey& key, Shader::Environment& env,
|
||||
bool build_in_parallel) try {
|
||||
LOG_INFO(Render_Vulkan, "0x{:016x}", key.Hash());
|
||||
|
||||
Shader::Maxwell::Flow::CFG cfg{env, pools.flow_block, env.StartAddress()};
|
||||
auto program{TranslateProgram(pools.inst, pools.block, env, cfg, host_info)};
|
||||
const std::vector<u32> code{EmitSPIRV(profile, program)};
|
||||
device.SaveShader(code);
|
||||
vk::ShaderModule spv_module{BuildShader(device, code)};
|
||||
if (device.HasDebuggingToolAttached()) {
|
||||
const auto name{fmt::format("Shader {:016x}", key.unique_hash)};
|
||||
spv_module.SetObjectNameEXT(name.c_str());
|
||||
}
|
||||
Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr};
|
||||
return std::make_unique<ComputePipeline>(device, descriptor_pool, update_descriptor_queue,
|
||||
thread_worker, &shader_notify, program.info,
|
||||
std::move(spv_module));
|
||||
|
||||
} catch (const Shader::Exception& exception) {
|
||||
LOG_ERROR(Render_Vulkan, "{}", exception.what());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
||||
|
@@ -6,24 +6,28 @@
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <filesystem>
|
||||
#include <iosfwd>
|
||||
#include <memory>
|
||||
#include <type_traits>
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/functional/hash.hpp>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/engines/const_buffer_engine_interface.h"
|
||||
#include "common/thread_worker.h"
|
||||
#include "shader_recompiler/frontend/ir/basic_block.h"
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
#include "shader_recompiler/frontend/maxwell/control_flow.h"
|
||||
#include "shader_recompiler/host_translate_info.h"
|
||||
#include "shader_recompiler/object_pool.h"
|
||||
#include "shader_recompiler/profile.h"
|
||||
#include "video_core/engines/maxwell_3d.h"
|
||||
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
|
||||
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
|
||||
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
|
||||
#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
|
||||
#include "video_core/shader/async_shaders.h"
|
||||
#include "video_core/shader/memory_util.h"
|
||||
#include "video_core/shader/registry.h"
|
||||
#include "video_core/shader/shader_ir.h"
|
||||
#include "video_core/renderer_vulkan/vk_texture_cache.h"
|
||||
#include "video_core/shader_cache.h"
|
||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||
|
||||
@@ -31,23 +35,24 @@ namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Vulkan {
|
||||
namespace Shader::IR {
|
||||
struct Program;
|
||||
}
|
||||
|
||||
class Device;
|
||||
class RasterizerVulkan;
|
||||
class VKComputePipeline;
|
||||
class VKDescriptorPool;
|
||||
class VKScheduler;
|
||||
class VKUpdateDescriptorQueue;
|
||||
namespace VideoCore {
|
||||
class ShaderNotify;
|
||||
}
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
|
||||
|
||||
struct ComputePipelineCacheKey {
|
||||
GPUVAddr shader;
|
||||
u64 unique_hash;
|
||||
u32 shared_memory_size;
|
||||
std::array<u32, 3> workgroup_size;
|
||||
|
||||
std::size_t Hash() const noexcept;
|
||||
size_t Hash() const noexcept;
|
||||
|
||||
bool operator==(const ComputePipelineCacheKey& rhs) const noexcept;
|
||||
|
||||
@@ -63,16 +68,9 @@ static_assert(std::is_trivially_constructible_v<ComputePipelineCacheKey>);
|
||||
|
||||
namespace std {
|
||||
|
||||
template <>
|
||||
struct hash<Vulkan::GraphicsPipelineCacheKey> {
|
||||
std::size_t operator()(const Vulkan::GraphicsPipelineCacheKey& k) const noexcept {
|
||||
return k.Hash();
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct hash<Vulkan::ComputePipelineCacheKey> {
|
||||
std::size_t operator()(const Vulkan::ComputePipelineCacheKey& k) const noexcept {
|
||||
size_t operator()(const Vulkan::ComputePipelineCacheKey& k) const noexcept {
|
||||
return k.Hash();
|
||||
}
|
||||
};
|
||||
@@ -81,94 +79,90 @@ struct hash<Vulkan::ComputePipelineCacheKey> {
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
class Shader {
|
||||
public:
|
||||
explicit Shader(Tegra::Engines::ConstBufferEngineInterface& engine_,
|
||||
Tegra::Engines::ShaderType stage_, GPUVAddr gpu_addr, VAddr cpu_addr_,
|
||||
VideoCommon::Shader::ProgramCode program_code, u32 main_offset_);
|
||||
~Shader();
|
||||
class ComputePipeline;
|
||||
class Device;
|
||||
class DescriptorPool;
|
||||
class RasterizerVulkan;
|
||||
class RenderPassCache;
|
||||
class VKScheduler;
|
||||
class VKUpdateDescriptorQueue;
|
||||
|
||||
GPUVAddr GetGpuAddr() const {
|
||||
return gpu_addr;
|
||||
using VideoCommon::ShaderInfo;
|
||||
|
||||
struct ShaderPools {
|
||||
void ReleaseContents() {
|
||||
flow_block.ReleaseContents();
|
||||
block.ReleaseContents();
|
||||
inst.ReleaseContents();
|
||||
}
|
||||
|
||||
VideoCommon::Shader::ShaderIR& GetIR() {
|
||||
return shader_ir;
|
||||
}
|
||||
|
||||
const VideoCommon::Shader::ShaderIR& GetIR() const {
|
||||
return shader_ir;
|
||||
}
|
||||
|
||||
const VideoCommon::Shader::Registry& GetRegistry() const {
|
||||
return registry;
|
||||
}
|
||||
|
||||
const ShaderEntries& GetEntries() const {
|
||||
return entries;
|
||||
}
|
||||
|
||||
private:
|
||||
GPUVAddr gpu_addr{};
|
||||
VideoCommon::Shader::ProgramCode program_code;
|
||||
VideoCommon::Shader::Registry registry;
|
||||
VideoCommon::Shader::ShaderIR shader_ir;
|
||||
ShaderEntries entries;
|
||||
Shader::ObjectPool<Shader::IR::Inst> inst;
|
||||
Shader::ObjectPool<Shader::IR::Block> block;
|
||||
Shader::ObjectPool<Shader::Maxwell::Flow::Block> flow_block;
|
||||
};
|
||||
|
||||
class VKPipelineCache final : public VideoCommon::ShaderCache<Shader> {
|
||||
class PipelineCache : public VideoCommon::ShaderCache {
|
||||
public:
|
||||
explicit VKPipelineCache(RasterizerVulkan& rasterizer, Tegra::GPU& gpu,
|
||||
Tegra::Engines::Maxwell3D& maxwell3d,
|
||||
Tegra::Engines::KeplerCompute& kepler_compute,
|
||||
Tegra::MemoryManager& gpu_memory, const Device& device,
|
||||
VKScheduler& scheduler, VKDescriptorPool& descriptor_pool,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue);
|
||||
~VKPipelineCache() override;
|
||||
explicit PipelineCache(RasterizerVulkan& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d,
|
||||
Tegra::Engines::KeplerCompute& kepler_compute,
|
||||
Tegra::MemoryManager& gpu_memory, const Device& device,
|
||||
VKScheduler& scheduler, DescriptorPool& descriptor_pool,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue,
|
||||
RenderPassCache& render_pass_cache, BufferCache& buffer_cache,
|
||||
TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_);
|
||||
~PipelineCache();
|
||||
|
||||
std::array<Shader*, Maxwell::MaxShaderProgram> GetShaders();
|
||||
[[nodiscard]] GraphicsPipeline* CurrentGraphicsPipeline();
|
||||
|
||||
VKGraphicsPipeline* GetGraphicsPipeline(const GraphicsPipelineCacheKey& key,
|
||||
u32 num_color_buffers,
|
||||
VideoCommon::Shader::AsyncShaders& async_shaders);
|
||||
[[nodiscard]] ComputePipeline* CurrentComputePipeline();
|
||||
|
||||
VKComputePipeline& GetComputePipeline(const ComputePipelineCacheKey& key);
|
||||
|
||||
void EmplacePipeline(std::unique_ptr<VKGraphicsPipeline> pipeline);
|
||||
|
||||
protected:
|
||||
void OnShaderRemoval(Shader* shader) final;
|
||||
void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
|
||||
const VideoCore::DiskResourceLoadCallback& callback);
|
||||
|
||||
private:
|
||||
std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>> DecompileShaders(
|
||||
const FixedPipelineState& fixed_state);
|
||||
[[nodiscard]] GraphicsPipeline* CurrentGraphicsPipelineSlowPath();
|
||||
|
||||
Tegra::GPU& gpu;
|
||||
Tegra::Engines::Maxwell3D& maxwell3d;
|
||||
Tegra::Engines::KeplerCompute& kepler_compute;
|
||||
Tegra::MemoryManager& gpu_memory;
|
||||
[[nodiscard]] GraphicsPipeline* BuiltPipeline(GraphicsPipeline* pipeline) const noexcept;
|
||||
|
||||
std::unique_ptr<GraphicsPipeline> CreateGraphicsPipeline();
|
||||
|
||||
std::unique_ptr<GraphicsPipeline> CreateGraphicsPipeline(
|
||||
ShaderPools& pools, const GraphicsPipelineCacheKey& key,
|
||||
std::span<Shader::Environment* const> envs, bool build_in_parallel);
|
||||
|
||||
std::unique_ptr<ComputePipeline> CreateComputePipeline(const ComputePipelineCacheKey& key,
|
||||
const ShaderInfo* shader);
|
||||
|
||||
std::unique_ptr<ComputePipeline> CreateComputePipeline(ShaderPools& pools,
|
||||
const ComputePipelineCacheKey& key,
|
||||
Shader::Environment& env,
|
||||
bool build_in_parallel);
|
||||
|
||||
const Device& device;
|
||||
VKScheduler& scheduler;
|
||||
VKDescriptorPool& descriptor_pool;
|
||||
DescriptorPool& descriptor_pool;
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue;
|
||||
RenderPassCache& render_pass_cache;
|
||||
BufferCache& buffer_cache;
|
||||
TextureCache& texture_cache;
|
||||
VideoCore::ShaderNotify& shader_notify;
|
||||
bool use_asynchronous_shaders{};
|
||||
|
||||
std::unique_ptr<Shader> null_shader;
|
||||
std::unique_ptr<Shader> null_kernel;
|
||||
GraphicsPipelineCacheKey graphics_key{};
|
||||
GraphicsPipeline* current_pipeline{};
|
||||
|
||||
std::array<Shader*, Maxwell::MaxShaderProgram> last_shaders{};
|
||||
std::unordered_map<ComputePipelineCacheKey, std::unique_ptr<ComputePipeline>> compute_cache;
|
||||
std::unordered_map<GraphicsPipelineCacheKey, std::unique_ptr<GraphicsPipeline>> graphics_cache;
|
||||
|
||||
GraphicsPipelineCacheKey last_graphics_key;
|
||||
VKGraphicsPipeline* last_graphics_pipeline = nullptr;
|
||||
ShaderPools main_pools;
|
||||
|
||||
std::mutex pipeline_cache;
|
||||
std::unordered_map<GraphicsPipelineCacheKey, std::unique_ptr<VKGraphicsPipeline>>
|
||||
graphics_cache;
|
||||
std::unordered_map<ComputePipelineCacheKey, std::unique_ptr<VKComputePipeline>> compute_cache;
|
||||
Shader::Profile profile;
|
||||
Shader::HostTranslateInfo host_info;
|
||||
|
||||
std::filesystem::path pipeline_cache_filename;
|
||||
|
||||
Common::ThreadWorker workers;
|
||||
Common::ThreadWorker serialization_thread;
|
||||
};
|
||||
|
||||
void FillDescriptorUpdateTemplateEntries(
|
||||
const ShaderEntries& entries, u32& binding, u32& offset,
|
||||
std::vector<VkDescriptorUpdateTemplateEntryKHR>& template_entries);
|
||||
|
||||
} // namespace Vulkan
|
||||
|
@@ -114,14 +114,10 @@ void HostCounter::EndQuery() {
|
||||
}
|
||||
|
||||
u64 HostCounter::BlockingQuery() const {
|
||||
if (tick >= cache.GetScheduler().CurrentTick()) {
|
||||
cache.GetScheduler().Flush();
|
||||
}
|
||||
|
||||
cache.GetScheduler().Wait(tick);
|
||||
u64 data;
|
||||
const VkResult query_result = cache.GetDevice().GetLogical().GetQueryResults(
|
||||
query.first, query.second, 1, sizeof(data), &data, sizeof(data),
|
||||
VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
|
||||
query.first, query.second, 1, sizeof(data), &data, sizeof(data), VK_QUERY_RESULT_64_BIT);
|
||||
|
||||
switch (query_result) {
|
||||
case VK_SUCCESS:
|
||||
|
@@ -24,7 +24,6 @@
|
||||
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
|
||||
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
|
||||
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
|
||||
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_rasterizer.h"
|
||||
#include "video_core/renderer_vulkan/vk_scheduler.h"
|
||||
@@ -55,11 +54,10 @@ struct DrawParams {
|
||||
u32 num_instances;
|
||||
u32 base_vertex;
|
||||
u32 num_vertices;
|
||||
u32 first_index;
|
||||
bool is_indexed;
|
||||
};
|
||||
|
||||
constexpr auto COMPUTE_SHADER_INDEX = static_cast<size_t>(Tegra::Engines::ShaderType::Compute);
|
||||
|
||||
VkViewport GetViewportState(const Device& device, const Maxwell& regs, size_t index) {
|
||||
const auto& src = regs.viewport_transform[index];
|
||||
const float width = src.scale_x * 2.0f;
|
||||
@@ -97,118 +95,6 @@ VkRect2D GetScissorState(const Maxwell& regs, size_t index) {
|
||||
return scissor;
|
||||
}
|
||||
|
||||
std::array<GPUVAddr, Maxwell::MaxShaderProgram> GetShaderAddresses(
|
||||
const std::array<Shader*, Maxwell::MaxShaderProgram>& shaders) {
|
||||
std::array<GPUVAddr, Maxwell::MaxShaderProgram> addresses;
|
||||
for (size_t i = 0; i < std::size(addresses); ++i) {
|
||||
addresses[i] = shaders[i] ? shaders[i]->GetGpuAddr() : 0;
|
||||
}
|
||||
return addresses;
|
||||
}
|
||||
|
||||
struct TextureHandle {
|
||||
constexpr TextureHandle(u32 data, bool via_header_index) {
|
||||
const Tegra::Texture::TextureHandle handle{data};
|
||||
image = handle.tic_id;
|
||||
sampler = via_header_index ? image : handle.tsc_id.Value();
|
||||
}
|
||||
|
||||
u32 image;
|
||||
u32 sampler;
|
||||
};
|
||||
|
||||
template <typename Engine, typename Entry>
|
||||
TextureHandle GetTextureInfo(const Engine& engine, bool via_header_index, const Entry& entry,
|
||||
size_t stage, size_t index = 0) {
|
||||
const auto shader_type = static_cast<Tegra::Engines::ShaderType>(stage);
|
||||
if constexpr (std::is_same_v<Entry, SamplerEntry>) {
|
||||
if (entry.is_separated) {
|
||||
const u32 buffer_1 = entry.buffer;
|
||||
const u32 buffer_2 = entry.secondary_buffer;
|
||||
const u32 offset_1 = entry.offset;
|
||||
const u32 offset_2 = entry.secondary_offset;
|
||||
const u32 handle_1 = engine.AccessConstBuffer32(shader_type, buffer_1, offset_1);
|
||||
const u32 handle_2 = engine.AccessConstBuffer32(shader_type, buffer_2, offset_2);
|
||||
return TextureHandle(handle_1 | handle_2, via_header_index);
|
||||
}
|
||||
}
|
||||
if (entry.is_bindless) {
|
||||
const u32 raw = engine.AccessConstBuffer32(shader_type, entry.buffer, entry.offset);
|
||||
return TextureHandle(raw, via_header_index);
|
||||
}
|
||||
const u32 buffer = engine.GetBoundBuffer();
|
||||
const u64 offset = (entry.offset + index) * sizeof(u32);
|
||||
return TextureHandle(engine.AccessConstBuffer32(shader_type, buffer, offset), via_header_index);
|
||||
}
|
||||
|
||||
ImageViewType ImageViewTypeFromEntry(const SamplerEntry& entry) {
|
||||
if (entry.is_buffer) {
|
||||
return ImageViewType::e2D;
|
||||
}
|
||||
switch (entry.type) {
|
||||
case Tegra::Shader::TextureType::Texture1D:
|
||||
return entry.is_array ? ImageViewType::e1DArray : ImageViewType::e1D;
|
||||
case Tegra::Shader::TextureType::Texture2D:
|
||||
return entry.is_array ? ImageViewType::e2DArray : ImageViewType::e2D;
|
||||
case Tegra::Shader::TextureType::Texture3D:
|
||||
return ImageViewType::e3D;
|
||||
case Tegra::Shader::TextureType::TextureCube:
|
||||
return entry.is_array ? ImageViewType::CubeArray : ImageViewType::Cube;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return ImageViewType::e2D;
|
||||
}
|
||||
|
||||
ImageViewType ImageViewTypeFromEntry(const ImageEntry& entry) {
|
||||
switch (entry.type) {
|
||||
case Tegra::Shader::ImageType::Texture1D:
|
||||
return ImageViewType::e1D;
|
||||
case Tegra::Shader::ImageType::Texture1DArray:
|
||||
return ImageViewType::e1DArray;
|
||||
case Tegra::Shader::ImageType::Texture2D:
|
||||
return ImageViewType::e2D;
|
||||
case Tegra::Shader::ImageType::Texture2DArray:
|
||||
return ImageViewType::e2DArray;
|
||||
case Tegra::Shader::ImageType::Texture3D:
|
||||
return ImageViewType::e3D;
|
||||
case Tegra::Shader::ImageType::TextureBuffer:
|
||||
return ImageViewType::Buffer;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return ImageViewType::e2D;
|
||||
}
|
||||
|
||||
void PushImageDescriptors(const ShaderEntries& entries, TextureCache& texture_cache,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue,
|
||||
ImageViewId*& image_view_id_ptr, VkSampler*& sampler_ptr) {
|
||||
for ([[maybe_unused]] const auto& entry : entries.uniform_texels) {
|
||||
const ImageViewId image_view_id = *image_view_id_ptr++;
|
||||
const ImageView& image_view = texture_cache.GetImageView(image_view_id);
|
||||
update_descriptor_queue.AddTexelBuffer(image_view.BufferView());
|
||||
}
|
||||
for (const auto& entry : entries.samplers) {
|
||||
for (size_t i = 0; i < entry.size; ++i) {
|
||||
const VkSampler sampler = *sampler_ptr++;
|
||||
const ImageViewId image_view_id = *image_view_id_ptr++;
|
||||
const ImageView& image_view = texture_cache.GetImageView(image_view_id);
|
||||
const VkImageView handle = image_view.Handle(ImageViewTypeFromEntry(entry));
|
||||
update_descriptor_queue.AddSampledImage(handle, sampler);
|
||||
}
|
||||
}
|
||||
for ([[maybe_unused]] const auto& entry : entries.storage_texels) {
|
||||
const ImageViewId image_view_id = *image_view_id_ptr++;
|
||||
const ImageView& image_view = texture_cache.GetImageView(image_view_id);
|
||||
update_descriptor_queue.AddTexelBuffer(image_view.BufferView());
|
||||
}
|
||||
for (const auto& entry : entries.images) {
|
||||
// TODO: Mark as modified
|
||||
const ImageViewId image_view_id = *image_view_id_ptr++;
|
||||
const ImageView& image_view = texture_cache.GetImageView(image_view_id);
|
||||
const VkImageView handle = image_view.Handle(ImageViewTypeFromEntry(entry));
|
||||
update_descriptor_queue.AddImage(handle);
|
||||
}
|
||||
}
|
||||
|
||||
DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_instanced,
|
||||
bool is_indexed) {
|
||||
DrawParams params{
|
||||
@@ -216,6 +102,7 @@ DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_instan
|
||||
.num_instances = is_instanced ? num_instances : 1,
|
||||
.base_vertex = is_indexed ? regs.vb_element_base : regs.vertex_buffer.first,
|
||||
.num_vertices = is_indexed ? regs.index_array.count : regs.vertex_buffer.count,
|
||||
.first_index = is_indexed ? regs.index_array.first : 0,
|
||||
.is_indexed = is_indexed,
|
||||
};
|
||||
if (regs.draw.topology == Maxwell::PrimitiveTopology::Quads) {
|
||||
@@ -243,21 +130,21 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
|
||||
blit_image(device, scheduler, state_tracker, descriptor_pool),
|
||||
astc_decoder_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue,
|
||||
memory_allocator),
|
||||
texture_cache_runtime{device, scheduler, memory_allocator,
|
||||
staging_pool, blit_image, astc_decoder_pass},
|
||||
render_pass_cache(device), texture_cache_runtime{device, scheduler,
|
||||
memory_allocator, staging_pool,
|
||||
blit_image, astc_decoder_pass,
|
||||
render_pass_cache},
|
||||
texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory),
|
||||
buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool,
|
||||
update_descriptor_queue, descriptor_pool),
|
||||
buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime),
|
||||
pipeline_cache(*this, gpu, maxwell3d, kepler_compute, gpu_memory, device, scheduler,
|
||||
descriptor_pool, update_descriptor_queue),
|
||||
pipeline_cache(*this, maxwell3d, kepler_compute, gpu_memory, device, scheduler,
|
||||
descriptor_pool, update_descriptor_queue, render_pass_cache, buffer_cache,
|
||||
texture_cache, gpu.ShaderNotify()),
|
||||
query_cache{*this, maxwell3d, gpu_memory, device, scheduler},
|
||||
fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler),
|
||||
wfi_event(device.GetLogical().CreateEvent()), async_shaders(emu_window_) {
|
||||
wfi_event(device.GetLogical().CreateEvent()) {
|
||||
scheduler.SetQueryCache(query_cache);
|
||||
if (device.UseAsynchronousShaders()) {
|
||||
async_shaders.AllocateWorkers();
|
||||
}
|
||||
}
|
||||
|
||||
RasterizerVulkan::~RasterizerVulkan() = default;
|
||||
@@ -270,53 +157,30 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
|
||||
|
||||
query_cache.UpdateCounters();
|
||||
|
||||
graphics_key.fixed_state.Refresh(maxwell3d, device.IsExtExtendedDynamicStateSupported());
|
||||
|
||||
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
||||
|
||||
texture_cache.SynchronizeGraphicsDescriptors();
|
||||
texture_cache.UpdateRenderTargets(false);
|
||||
|
||||
const auto shaders = pipeline_cache.GetShaders();
|
||||
graphics_key.shaders = GetShaderAddresses(shaders);
|
||||
|
||||
SetupShaderDescriptors(shaders, is_indexed);
|
||||
|
||||
const Framebuffer* const framebuffer = texture_cache.GetFramebuffer();
|
||||
graphics_key.renderpass = framebuffer->RenderPass();
|
||||
|
||||
VKGraphicsPipeline* const pipeline = pipeline_cache.GetGraphicsPipeline(
|
||||
graphics_key, framebuffer->NumColorBuffers(), async_shaders);
|
||||
if (pipeline == nullptr || pipeline->GetHandle() == VK_NULL_HANDLE) {
|
||||
// Async graphics pipeline was not ready.
|
||||
GraphicsPipeline* const pipeline{pipeline_cache.CurrentGraphicsPipeline()};
|
||||
if (!pipeline) {
|
||||
return;
|
||||
}
|
||||
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
||||
pipeline->Configure(is_indexed);
|
||||
|
||||
BeginTransformFeedback();
|
||||
|
||||
scheduler.RequestRenderpass(framebuffer);
|
||||
scheduler.BindGraphicsPipeline(pipeline->GetHandle());
|
||||
UpdateDynamicStates();
|
||||
|
||||
const auto& regs = maxwell3d.regs;
|
||||
const u32 num_instances = maxwell3d.mme_draw.instance_count;
|
||||
const DrawParams draw_params = MakeDrawParams(regs, num_instances, is_instanced, is_indexed);
|
||||
const VkPipelineLayout pipeline_layout = pipeline->GetLayout();
|
||||
const VkDescriptorSet descriptor_set = pipeline->CommitDescriptorSet();
|
||||
scheduler.Record([pipeline_layout, descriptor_set, draw_params](vk::CommandBuffer cmdbuf) {
|
||||
if (descriptor_set) {
|
||||
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout,
|
||||
DESCRIPTOR_SET, descriptor_set, nullptr);
|
||||
}
|
||||
const auto& regs{maxwell3d.regs};
|
||||
const u32 num_instances{maxwell3d.mme_draw.instance_count};
|
||||
const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_instanced, is_indexed)};
|
||||
scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) {
|
||||
if (draw_params.is_indexed) {
|
||||
cmdbuf.DrawIndexed(draw_params.num_vertices, draw_params.num_instances, 0,
|
||||
draw_params.base_vertex, draw_params.base_instance);
|
||||
cmdbuf.DrawIndexed(draw_params.num_vertices, draw_params.num_instances,
|
||||
draw_params.first_index, draw_params.base_vertex,
|
||||
draw_params.base_instance);
|
||||
} else {
|
||||
cmdbuf.Draw(draw_params.num_vertices, draw_params.num_instances,
|
||||
draw_params.base_vertex, draw_params.base_instance);
|
||||
}
|
||||
});
|
||||
|
||||
EndTransformFeedback();
|
||||
}
|
||||
|
||||
@@ -326,6 +190,7 @@ void RasterizerVulkan::Clear() {
|
||||
if (!maxwell3d.ShouldExecute()) {
|
||||
return;
|
||||
}
|
||||
FlushWork();
|
||||
|
||||
query_cache.UpdateCounters();
|
||||
|
||||
@@ -393,73 +258,20 @@ void RasterizerVulkan::Clear() {
|
||||
});
|
||||
}
|
||||
|
||||
void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
|
||||
MICROPROFILE_SCOPE(Vulkan_Compute);
|
||||
void RasterizerVulkan::DispatchCompute() {
|
||||
FlushWork();
|
||||
|
||||
query_cache.UpdateCounters();
|
||||
|
||||
const auto& launch_desc = kepler_compute.launch_description;
|
||||
auto& pipeline = pipeline_cache.GetComputePipeline({
|
||||
.shader = code_addr,
|
||||
.shared_memory_size = launch_desc.shared_alloc,
|
||||
.workgroup_size{
|
||||
launch_desc.block_dim_x,
|
||||
launch_desc.block_dim_y,
|
||||
launch_desc.block_dim_z,
|
||||
},
|
||||
});
|
||||
|
||||
// Compute dispatches can't be executed inside a renderpass
|
||||
scheduler.RequestOutsideRenderPassOperationContext();
|
||||
|
||||
image_view_indices.clear();
|
||||
sampler_handles.clear();
|
||||
|
||||
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
||||
|
||||
const auto& entries = pipeline.GetEntries();
|
||||
buffer_cache.SetEnabledComputeUniformBuffers(entries.enabled_uniform_buffers);
|
||||
buffer_cache.UnbindComputeStorageBuffers();
|
||||
u32 ssbo_index = 0;
|
||||
for (const auto& buffer : entries.global_buffers) {
|
||||
buffer_cache.BindComputeStorageBuffer(ssbo_index, buffer.cbuf_index, buffer.cbuf_offset,
|
||||
buffer.is_written);
|
||||
++ssbo_index;
|
||||
ComputePipeline* const pipeline{pipeline_cache.CurrentComputePipeline()};
|
||||
if (!pipeline) {
|
||||
return;
|
||||
}
|
||||
buffer_cache.UpdateComputeBuffers();
|
||||
std::scoped_lock lock{texture_cache.mutex, buffer_cache.mutex};
|
||||
pipeline->Configure(kepler_compute, gpu_memory, scheduler, buffer_cache, texture_cache);
|
||||
|
||||
texture_cache.SynchronizeComputeDescriptors();
|
||||
|
||||
SetupComputeUniformTexels(entries);
|
||||
SetupComputeTextures(entries);
|
||||
SetupComputeStorageTexels(entries);
|
||||
SetupComputeImages(entries);
|
||||
|
||||
const std::span indices_span(image_view_indices.data(), image_view_indices.size());
|
||||
texture_cache.FillComputeImageViews(indices_span, image_view_ids);
|
||||
|
||||
update_descriptor_queue.Acquire();
|
||||
|
||||
buffer_cache.BindHostComputeBuffers();
|
||||
|
||||
ImageViewId* image_view_id_ptr = image_view_ids.data();
|
||||
VkSampler* sampler_ptr = sampler_handles.data();
|
||||
PushImageDescriptors(entries, texture_cache, update_descriptor_queue, image_view_id_ptr,
|
||||
sampler_ptr);
|
||||
|
||||
const VkPipeline pipeline_handle = pipeline.GetHandle();
|
||||
const VkPipelineLayout pipeline_layout = pipeline.GetLayout();
|
||||
const VkDescriptorSet descriptor_set = pipeline.CommitDescriptorSet();
|
||||
scheduler.Record([grid_x = launch_desc.grid_dim_x, grid_y = launch_desc.grid_dim_y,
|
||||
grid_z = launch_desc.grid_dim_z, pipeline_handle, pipeline_layout,
|
||||
descriptor_set](vk::CommandBuffer cmdbuf) {
|
||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_handle);
|
||||
if (descriptor_set) {
|
||||
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_layout,
|
||||
DESCRIPTOR_SET, descriptor_set, nullptr);
|
||||
}
|
||||
cmdbuf.Dispatch(grid_x, grid_y, grid_z);
|
||||
});
|
||||
const auto& qmd{kepler_compute.launch_description};
|
||||
const std::array<u32, 3> dim{qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z};
|
||||
scheduler.RequestOutsideRenderPassOperationContext();
|
||||
scheduler.Record([dim](vk::CommandBuffer cmdbuf) { cmdbuf.Dispatch(dim[0], dim[1], dim[2]); });
|
||||
}
|
||||
|
||||
void RasterizerVulkan::ResetCounter(VideoCore::QueryType type) {
|
||||
@@ -644,6 +456,7 @@ void RasterizerVulkan::WaitForIdle() {
|
||||
|
||||
void RasterizerVulkan::FragmentBarrier() {
|
||||
// We already put barriers when a render pass finishes
|
||||
scheduler.RequestOutsideRenderPassOperationContext();
|
||||
}
|
||||
|
||||
void RasterizerVulkan::TiledCacheBarrier() {
|
||||
@@ -651,10 +464,11 @@ void RasterizerVulkan::TiledCacheBarrier() {
|
||||
}
|
||||
|
||||
void RasterizerVulkan::FlushCommands() {
|
||||
if (draw_counter > 0) {
|
||||
draw_counter = 0;
|
||||
scheduler.Flush();
|
||||
if (draw_counter == 0) {
|
||||
return;
|
||||
}
|
||||
draw_counter = 0;
|
||||
scheduler.Flush();
|
||||
}
|
||||
|
||||
void RasterizerVulkan::TickFrame() {
|
||||
@@ -690,13 +504,18 @@ bool RasterizerVulkan::AccelerateDisplay(const Tegra::FramebufferConfig& config,
|
||||
if (!image_view) {
|
||||
return false;
|
||||
}
|
||||
screen_info.image_view = image_view->Handle(VideoCommon::ImageViewType::e2D);
|
||||
screen_info.image_view = image_view->Handle(Shader::TextureType::Color2D);
|
||||
screen_info.width = image_view->size.width;
|
||||
screen_info.height = image_view->size.height;
|
||||
screen_info.is_srgb = VideoCore::Surface::IsPixelFormatSRGB(image_view->format);
|
||||
return true;
|
||||
}
|
||||
|
||||
void RasterizerVulkan::LoadDiskResources(u64 title_id, std::stop_token stop_loading,
|
||||
const VideoCore::DiskResourceLoadCallback& callback) {
|
||||
pipeline_cache.LoadDiskResources(title_id, stop_loading, callback);
|
||||
}
|
||||
|
||||
void RasterizerVulkan::FlushWork() {
|
||||
static constexpr u32 DRAWS_TO_DISPATCH = 4096;
|
||||
|
||||
@@ -705,65 +524,17 @@ void RasterizerVulkan::FlushWork() {
|
||||
if ((++draw_counter & 7) != 7) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (draw_counter < DRAWS_TO_DISPATCH) {
|
||||
// Send recorded tasks to the worker thread
|
||||
scheduler.DispatchWork();
|
||||
return;
|
||||
}
|
||||
|
||||
// Otherwise (every certain number of draws) flush execution.
|
||||
// This submits commands to the Vulkan driver.
|
||||
scheduler.Flush();
|
||||
draw_counter = 0;
|
||||
}
|
||||
|
||||
void RasterizerVulkan::SetupShaderDescriptors(
|
||||
const std::array<Shader*, Maxwell::MaxShaderProgram>& shaders, bool is_indexed) {
|
||||
image_view_indices.clear();
|
||||
sampler_handles.clear();
|
||||
for (size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
|
||||
Shader* const shader = shaders[stage + 1];
|
||||
if (!shader) {
|
||||
continue;
|
||||
}
|
||||
const ShaderEntries& entries = shader->GetEntries();
|
||||
SetupGraphicsUniformTexels(entries, stage);
|
||||
SetupGraphicsTextures(entries, stage);
|
||||
SetupGraphicsStorageTexels(entries, stage);
|
||||
SetupGraphicsImages(entries, stage);
|
||||
|
||||
buffer_cache.SetEnabledUniformBuffers(stage, entries.enabled_uniform_buffers);
|
||||
buffer_cache.UnbindGraphicsStorageBuffers(stage);
|
||||
u32 ssbo_index = 0;
|
||||
for (const auto& buffer : entries.global_buffers) {
|
||||
buffer_cache.BindGraphicsStorageBuffer(stage, ssbo_index, buffer.cbuf_index,
|
||||
buffer.cbuf_offset, buffer.is_written);
|
||||
++ssbo_index;
|
||||
}
|
||||
}
|
||||
const std::span indices_span(image_view_indices.data(), image_view_indices.size());
|
||||
buffer_cache.UpdateGraphicsBuffers(is_indexed);
|
||||
texture_cache.FillGraphicsImageViews(indices_span, image_view_ids);
|
||||
|
||||
buffer_cache.BindHostGeometryBuffers(is_indexed);
|
||||
|
||||
update_descriptor_queue.Acquire();
|
||||
|
||||
ImageViewId* image_view_id_ptr = image_view_ids.data();
|
||||
VkSampler* sampler_ptr = sampler_handles.data();
|
||||
for (size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
|
||||
// Skip VertexA stage
|
||||
Shader* const shader = shaders[stage + 1];
|
||||
if (!shader) {
|
||||
continue;
|
||||
}
|
||||
buffer_cache.BindHostStageBuffers(stage);
|
||||
PushImageDescriptors(shader->GetEntries(), texture_cache, update_descriptor_queue,
|
||||
image_view_id_ptr, sampler_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerVulkan::UpdateDynamicStates() {
|
||||
auto& regs = maxwell3d.regs;
|
||||
UpdateViewportsState(regs);
|
||||
@@ -772,6 +543,7 @@ void RasterizerVulkan::UpdateDynamicStates() {
|
||||
UpdateBlendConstants(regs);
|
||||
UpdateDepthBounds(regs);
|
||||
UpdateStencilFaces(regs);
|
||||
UpdateLineWidth(regs);
|
||||
if (device.IsExtExtendedDynamicStateSupported()) {
|
||||
UpdateCullMode(regs);
|
||||
UpdateDepthBoundsTestEnable(regs);
|
||||
@@ -781,6 +553,9 @@ void RasterizerVulkan::UpdateDynamicStates() {
|
||||
UpdateFrontFace(regs);
|
||||
UpdateStencilOp(regs);
|
||||
UpdateStencilTestEnable(regs);
|
||||
if (device.IsExtVertexInputDynamicStateSupported()) {
|
||||
UpdateVertexInput(regs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -812,89 +587,6 @@ void RasterizerVulkan::EndTransformFeedback() {
|
||||
[](vk::CommandBuffer cmdbuf) { cmdbuf.EndTransformFeedbackEXT(0, 0, nullptr, nullptr); });
|
||||
}
|
||||
|
||||
void RasterizerVulkan::SetupGraphicsUniformTexels(const ShaderEntries& entries, size_t stage) {
|
||||
const auto& regs = maxwell3d.regs;
|
||||
const bool via_header_index = regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex;
|
||||
for (const auto& entry : entries.uniform_texels) {
|
||||
const TextureHandle handle = GetTextureInfo(maxwell3d, via_header_index, entry, stage);
|
||||
image_view_indices.push_back(handle.image);
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, size_t stage) {
|
||||
const auto& regs = maxwell3d.regs;
|
||||
const bool via_header_index = regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex;
|
||||
for (const auto& entry : entries.samplers) {
|
||||
for (size_t index = 0; index < entry.size; ++index) {
|
||||
const TextureHandle handle =
|
||||
GetTextureInfo(maxwell3d, via_header_index, entry, stage, index);
|
||||
image_view_indices.push_back(handle.image);
|
||||
|
||||
Sampler* const sampler = texture_cache.GetGraphicsSampler(handle.sampler);
|
||||
sampler_handles.push_back(sampler->Handle());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerVulkan::SetupGraphicsStorageTexels(const ShaderEntries& entries, size_t stage) {
|
||||
const auto& regs = maxwell3d.regs;
|
||||
const bool via_header_index = regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex;
|
||||
for (const auto& entry : entries.storage_texels) {
|
||||
const TextureHandle handle = GetTextureInfo(maxwell3d, via_header_index, entry, stage);
|
||||
image_view_indices.push_back(handle.image);
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerVulkan::SetupGraphicsImages(const ShaderEntries& entries, size_t stage) {
|
||||
const auto& regs = maxwell3d.regs;
|
||||
const bool via_header_index = regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex;
|
||||
for (const auto& entry : entries.images) {
|
||||
const TextureHandle handle = GetTextureInfo(maxwell3d, via_header_index, entry, stage);
|
||||
image_view_indices.push_back(handle.image);
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerVulkan::SetupComputeUniformTexels(const ShaderEntries& entries) {
|
||||
const bool via_header_index = kepler_compute.launch_description.linked_tsc;
|
||||
for (const auto& entry : entries.uniform_texels) {
|
||||
const TextureHandle handle =
|
||||
GetTextureInfo(kepler_compute, via_header_index, entry, COMPUTE_SHADER_INDEX);
|
||||
image_view_indices.push_back(handle.image);
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) {
|
||||
const bool via_header_index = kepler_compute.launch_description.linked_tsc;
|
||||
for (const auto& entry : entries.samplers) {
|
||||
for (size_t index = 0; index < entry.size; ++index) {
|
||||
const TextureHandle handle = GetTextureInfo(kepler_compute, via_header_index, entry,
|
||||
COMPUTE_SHADER_INDEX, index);
|
||||
image_view_indices.push_back(handle.image);
|
||||
|
||||
Sampler* const sampler = texture_cache.GetComputeSampler(handle.sampler);
|
||||
sampler_handles.push_back(sampler->Handle());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerVulkan::SetupComputeStorageTexels(const ShaderEntries& entries) {
|
||||
const bool via_header_index = kepler_compute.launch_description.linked_tsc;
|
||||
for (const auto& entry : entries.storage_texels) {
|
||||
const TextureHandle handle =
|
||||
GetTextureInfo(kepler_compute, via_header_index, entry, COMPUTE_SHADER_INDEX);
|
||||
image_view_indices.push_back(handle.image);
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerVulkan::SetupComputeImages(const ShaderEntries& entries) {
|
||||
const bool via_header_index = kepler_compute.launch_description.linked_tsc;
|
||||
for (const auto& entry : entries.images) {
|
||||
const TextureHandle handle =
|
||||
GetTextureInfo(kepler_compute, via_header_index, entry, COMPUTE_SHADER_INDEX);
|
||||
image_view_indices.push_back(handle.image);
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerVulkan::UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs) {
|
||||
if (!state_tracker.TouchViewports()) {
|
||||
return;
|
||||
@@ -987,6 +679,14 @@ void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs)
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerVulkan::UpdateLineWidth(Tegra::Engines::Maxwell3D::Regs& regs) {
|
||||
if (!state_tracker.TouchLineWidth()) {
|
||||
return;
|
||||
}
|
||||
const float width = regs.line_smooth_enable ? regs.line_width_smooth : regs.line_width_aliased;
|
||||
scheduler.Record([width](vk::CommandBuffer cmdbuf) { cmdbuf.SetLineWidth(width); });
|
||||
}
|
||||
|
||||
void RasterizerVulkan::UpdateCullMode(Tegra::Engines::Maxwell3D::Regs& regs) {
|
||||
if (!state_tracker.TouchCullMode()) {
|
||||
return;
|
||||
@@ -1001,6 +701,11 @@ void RasterizerVulkan::UpdateDepthBoundsTestEnable(Tegra::Engines::Maxwell3D::Re
|
||||
if (!state_tracker.TouchDepthBoundsTestEnable()) {
|
||||
return;
|
||||
}
|
||||
bool enabled = regs.depth_bounds_enable;
|
||||
if (enabled && !device.IsDepthBoundsSupported()) {
|
||||
LOG_WARNING(Render_Vulkan, "Depth bounds is enabled but not supported");
|
||||
enabled = false;
|
||||
}
|
||||
scheduler.Record([enable = regs.depth_bounds_enable](vk::CommandBuffer cmdbuf) {
|
||||
cmdbuf.SetDepthBoundsTestEnableEXT(enable);
|
||||
});
|
||||
@@ -1088,4 +793,62 @@ void RasterizerVulkan::UpdateStencilTestEnable(Tegra::Engines::Maxwell3D::Regs&
|
||||
});
|
||||
}
|
||||
|
||||
void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) {
|
||||
auto& dirty{maxwell3d.dirty.flags};
|
||||
if (!dirty[Dirty::VertexInput]) {
|
||||
return;
|
||||
}
|
||||
dirty[Dirty::VertexInput] = false;
|
||||
|
||||
boost::container::static_vector<VkVertexInputBindingDescription2EXT, 32> bindings;
|
||||
boost::container::static_vector<VkVertexInputAttributeDescription2EXT, 32> attributes;
|
||||
|
||||
// There seems to be a bug on Nvidia's driver where updating only higher attributes ends up
|
||||
// generating dirty state. Track the highest dirty attribute and update all attributes until
|
||||
// that one.
|
||||
size_t highest_dirty_attr{};
|
||||
for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
|
||||
if (dirty[Dirty::VertexAttribute0 + index]) {
|
||||
highest_dirty_attr = index;
|
||||
}
|
||||
}
|
||||
for (size_t index = 0; index < highest_dirty_attr; ++index) {
|
||||
const Maxwell::VertexAttribute attribute{regs.vertex_attrib_format[index]};
|
||||
const u32 binding{attribute.buffer};
|
||||
dirty[Dirty::VertexAttribute0 + index] = false;
|
||||
dirty[Dirty::VertexBinding0 + static_cast<size_t>(binding)] = true;
|
||||
if (!attribute.constant) {
|
||||
attributes.push_back({
|
||||
.sType = VK_STRUCTURE_TYPE_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION_2_EXT,
|
||||
.pNext = nullptr,
|
||||
.location = static_cast<u32>(index),
|
||||
.binding = binding,
|
||||
.format = MaxwellToVK::VertexFormat(attribute.type, attribute.size),
|
||||
.offset = attribute.offset,
|
||||
});
|
||||
}
|
||||
}
|
||||
for (size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
|
||||
if (!dirty[Dirty::VertexBinding0 + index]) {
|
||||
continue;
|
||||
}
|
||||
dirty[Dirty::VertexBinding0 + index] = false;
|
||||
|
||||
const u32 binding{static_cast<u32>(index)};
|
||||
const auto& input_binding{regs.vertex_array[binding]};
|
||||
const bool is_instanced{regs.instanced_arrays.IsInstancingEnabled(binding)};
|
||||
bindings.push_back({
|
||||
.sType = VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT,
|
||||
.pNext = nullptr,
|
||||
.binding = binding,
|
||||
.stride = input_binding.stride,
|
||||
.inputRate = is_instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX,
|
||||
.divisor = is_instanced ? input_binding.divisor : 1,
|
||||
});
|
||||
}
|
||||
scheduler.Record([bindings, attributes](vk::CommandBuffer cmdbuf) {
|
||||
cmdbuf.SetVertexInputEXT(bindings, attributes);
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
||||
|
@@ -20,14 +20,13 @@
|
||||
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
|
||||
#include "video_core/renderer_vulkan/vk_fence_manager.h"
|
||||
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
|
||||
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_query_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_render_pass_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_scheduler.h"
|
||||
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
|
||||
#include "video_core/renderer_vulkan/vk_texture_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
|
||||
#include "video_core/shader/async_shaders.h"
|
||||
#include "video_core/vulkan_common/vulkan_memory_allocator.h"
|
||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||
|
||||
@@ -60,7 +59,7 @@ public:
|
||||
|
||||
void Draw(bool is_indexed, bool is_instanced) override;
|
||||
void Clear() override;
|
||||
void DispatchCompute(GPUVAddr code_addr) override;
|
||||
void DispatchCompute() override;
|
||||
void ResetCounter(VideoCore::QueryType type) override;
|
||||
void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
|
||||
void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override;
|
||||
@@ -90,19 +89,8 @@ public:
|
||||
const Tegra::Engines::Fermi2D::Config& copy_config) override;
|
||||
bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
|
||||
u32 pixel_stride) override;
|
||||
|
||||
VideoCommon::Shader::AsyncShaders& GetAsyncShaders() {
|
||||
return async_shaders;
|
||||
}
|
||||
|
||||
const VideoCommon::Shader::AsyncShaders& GetAsyncShaders() const {
|
||||
return async_shaders;
|
||||
}
|
||||
|
||||
/// Maximum supported size that a constbuffer can have in bytes.
|
||||
static constexpr size_t MaxConstbufferSize = 0x10000;
|
||||
static_assert(MaxConstbufferSize % (4 * sizeof(float)) == 0,
|
||||
"The maximum size of a constbuffer must be a multiple of the size of GLvec4");
|
||||
void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
|
||||
const VideoCore::DiskResourceLoadCallback& callback) override;
|
||||
|
||||
private:
|
||||
static constexpr size_t MAX_TEXTURES = 192;
|
||||
@@ -113,46 +101,19 @@ private:
|
||||
|
||||
void FlushWork();
|
||||
|
||||
/// Setup descriptors in the graphics pipeline.
|
||||
void SetupShaderDescriptors(const std::array<Shader*, Maxwell::MaxShaderProgram>& shaders,
|
||||
bool is_indexed);
|
||||
|
||||
void UpdateDynamicStates();
|
||||
|
||||
void BeginTransformFeedback();
|
||||
|
||||
void EndTransformFeedback();
|
||||
|
||||
/// Setup uniform texels in the graphics pipeline.
|
||||
void SetupGraphicsUniformTexels(const ShaderEntries& entries, std::size_t stage);
|
||||
|
||||
/// Setup textures in the graphics pipeline.
|
||||
void SetupGraphicsTextures(const ShaderEntries& entries, std::size_t stage);
|
||||
|
||||
/// Setup storage texels in the graphics pipeline.
|
||||
void SetupGraphicsStorageTexels(const ShaderEntries& entries, std::size_t stage);
|
||||
|
||||
/// Setup images in the graphics pipeline.
|
||||
void SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage);
|
||||
|
||||
/// Setup texel buffers in the compute pipeline.
|
||||
void SetupComputeUniformTexels(const ShaderEntries& entries);
|
||||
|
||||
/// Setup textures in the compute pipeline.
|
||||
void SetupComputeTextures(const ShaderEntries& entries);
|
||||
|
||||
/// Setup storage texels in the compute pipeline.
|
||||
void SetupComputeStorageTexels(const ShaderEntries& entries);
|
||||
|
||||
/// Setup images in the compute pipeline.
|
||||
void SetupComputeImages(const ShaderEntries& entries);
|
||||
|
||||
void UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs);
|
||||
void UpdateScissorsState(Tegra::Engines::Maxwell3D::Regs& regs);
|
||||
void UpdateDepthBias(Tegra::Engines::Maxwell3D::Regs& regs);
|
||||
void UpdateBlendConstants(Tegra::Engines::Maxwell3D::Regs& regs);
|
||||
void UpdateDepthBounds(Tegra::Engines::Maxwell3D::Regs& regs);
|
||||
void UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs);
|
||||
void UpdateLineWidth(Tegra::Engines::Maxwell3D::Regs& regs);
|
||||
|
||||
void UpdateCullMode(Tegra::Engines::Maxwell3D::Regs& regs);
|
||||
void UpdateDepthBoundsTestEnable(Tegra::Engines::Maxwell3D::Regs& regs);
|
||||
@@ -163,6 +124,8 @@ private:
|
||||
void UpdateStencilOp(Tegra::Engines::Maxwell3D::Regs& regs);
|
||||
void UpdateStencilTestEnable(Tegra::Engines::Maxwell3D::Regs& regs);
|
||||
|
||||
void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs);
|
||||
|
||||
Tegra::GPU& gpu;
|
||||
Tegra::MemoryManager& gpu_memory;
|
||||
Tegra::Engines::Maxwell3D& maxwell3d;
|
||||
@@ -175,23 +138,21 @@ private:
|
||||
VKScheduler& scheduler;
|
||||
|
||||
StagingBufferPool staging_pool;
|
||||
VKDescriptorPool descriptor_pool;
|
||||
DescriptorPool descriptor_pool;
|
||||
VKUpdateDescriptorQueue update_descriptor_queue;
|
||||
BlitImageHelper blit_image;
|
||||
ASTCDecoderPass astc_decoder_pass;
|
||||
|
||||
GraphicsPipelineCacheKey graphics_key;
|
||||
RenderPassCache render_pass_cache;
|
||||
|
||||
TextureCacheRuntime texture_cache_runtime;
|
||||
TextureCache texture_cache;
|
||||
BufferCacheRuntime buffer_cache_runtime;
|
||||
BufferCache buffer_cache;
|
||||
VKPipelineCache pipeline_cache;
|
||||
PipelineCache pipeline_cache;
|
||||
VKQueryCache query_cache;
|
||||
VKFenceManager fence_manager;
|
||||
|
||||
vk::Event wfi_event;
|
||||
VideoCommon::Shader::AsyncShaders async_shaders;
|
||||
|
||||
boost::container::static_vector<u32, MAX_IMAGE_VIEWS> image_view_indices;
|
||||
std::array<VideoCommon::ImageViewId, MAX_IMAGE_VIEWS> image_view_ids;
|
||||
|
96
src/video_core/renderer_vulkan/vk_render_pass_cache.cpp
Executable file
96
src/video_core/renderer_vulkan/vk_render_pass_cache.cpp
Executable file
@@ -0,0 +1,96 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <unordered_map>
|
||||
|
||||
#include <boost/container/static_vector.hpp>
|
||||
|
||||
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
|
||||
#include "video_core/renderer_vulkan/vk_render_pass_cache.h"
|
||||
#include "video_core/surface.h"
|
||||
#include "video_core/vulkan_common/vulkan_device.h"
|
||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||
|
||||
namespace Vulkan {
|
||||
namespace {
|
||||
using VideoCore::Surface::PixelFormat;
|
||||
|
||||
VkAttachmentDescription AttachmentDescription(const Device& device, PixelFormat format,
|
||||
VkSampleCountFlagBits samples) {
|
||||
using MaxwellToVK::SurfaceFormat;
|
||||
return {
|
||||
.flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT,
|
||||
.format = SurfaceFormat(device, FormatType::Optimal, true, format).format,
|
||||
.samples = samples,
|
||||
.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
|
||||
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
|
||||
.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
|
||||
.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE,
|
||||
.initialLayout = VK_IMAGE_LAYOUT_GENERAL,
|
||||
.finalLayout = VK_IMAGE_LAYOUT_GENERAL,
|
||||
};
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
RenderPassCache::RenderPassCache(const Device& device_) : device{&device_} {}
|
||||
|
||||
VkRenderPass RenderPassCache::Get(const RenderPassKey& key) {
|
||||
std::lock_guard lock{mutex};
|
||||
const auto [pair, is_new] = cache.try_emplace(key);
|
||||
if (!is_new) {
|
||||
return *pair->second;
|
||||
}
|
||||
boost::container::static_vector<VkAttachmentDescription, 9> descriptions;
|
||||
std::array<VkAttachmentReference, 8> references{};
|
||||
u32 num_attachments{};
|
||||
u32 num_colors{};
|
||||
for (size_t index = 0; index < key.color_formats.size(); ++index) {
|
||||
const PixelFormat format{key.color_formats[index]};
|
||||
const bool is_valid{format != PixelFormat::Invalid};
|
||||
references[index] = VkAttachmentReference{
|
||||
.attachment = is_valid ? num_colors : VK_ATTACHMENT_UNUSED,
|
||||
.layout = VK_IMAGE_LAYOUT_GENERAL,
|
||||
};
|
||||
if (is_valid) {
|
||||
descriptions.push_back(AttachmentDescription(*device, format, key.samples));
|
||||
num_attachments = static_cast<u32>(index + 1);
|
||||
++num_colors;
|
||||
}
|
||||
}
|
||||
const bool has_depth{key.depth_format != PixelFormat::Invalid};
|
||||
VkAttachmentReference depth_reference{};
|
||||
if (key.depth_format != PixelFormat::Invalid) {
|
||||
depth_reference = VkAttachmentReference{
|
||||
.attachment = num_colors,
|
||||
.layout = VK_IMAGE_LAYOUT_GENERAL,
|
||||
};
|
||||
descriptions.push_back(AttachmentDescription(*device, key.depth_format, key.samples));
|
||||
}
|
||||
const VkSubpassDescription subpass{
|
||||
.flags = 0,
|
||||
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
|
||||
.inputAttachmentCount = 0,
|
||||
.pInputAttachments = nullptr,
|
||||
.colorAttachmentCount = num_attachments,
|
||||
.pColorAttachments = references.data(),
|
||||
.pResolveAttachments = nullptr,
|
||||
.pDepthStencilAttachment = has_depth ? &depth_reference : nullptr,
|
||||
.preserveAttachmentCount = 0,
|
||||
.pPreserveAttachments = nullptr,
|
||||
};
|
||||
pair->second = device->GetLogical().CreateRenderPass({
|
||||
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.attachmentCount = static_cast<u32>(descriptions.size()),
|
||||
.pAttachments = descriptions.empty() ? nullptr : descriptions.data(),
|
||||
.subpassCount = 1,
|
||||
.pSubpasses = &subpass,
|
||||
.dependencyCount = 0,
|
||||
.pDependencies = nullptr,
|
||||
});
|
||||
return *pair->second;
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
55
src/video_core/renderer_vulkan/vk_render_pass_cache.h
Executable file
55
src/video_core/renderer_vulkan/vk_render_pass_cache.h
Executable file
@@ -0,0 +1,55 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "video_core/surface.h"
|
||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
struct RenderPassKey {
|
||||
auto operator<=>(const RenderPassKey&) const noexcept = default;
|
||||
|
||||
std::array<VideoCore::Surface::PixelFormat, 8> color_formats;
|
||||
VideoCore::Surface::PixelFormat depth_format;
|
||||
VkSampleCountFlagBits samples;
|
||||
};
|
||||
|
||||
} // namespace Vulkan
|
||||
|
||||
namespace std {
|
||||
template <>
|
||||
struct hash<Vulkan::RenderPassKey> {
|
||||
[[nodiscard]] size_t operator()(const Vulkan::RenderPassKey& key) const noexcept {
|
||||
size_t value = static_cast<size_t>(key.depth_format) << 48;
|
||||
value ^= static_cast<size_t>(key.samples) << 52;
|
||||
for (size_t i = 0; i < key.color_formats.size(); ++i) {
|
||||
value ^= static_cast<size_t>(key.color_formats[i]) << (i * 6);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
};
|
||||
} // namespace std
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
class Device;
|
||||
|
||||
class RenderPassCache {
|
||||
public:
|
||||
explicit RenderPassCache(const Device& device_);
|
||||
|
||||
VkRenderPass Get(const RenderPassKey& key);
|
||||
|
||||
private:
|
||||
const Device* device{};
|
||||
std::unordered_map<RenderPassKey, vk::RenderPass> cache;
|
||||
std::mutex mutex;
|
||||
};
|
||||
|
||||
} // namespace Vulkan
|
@@ -10,18 +10,16 @@
|
||||
namespace Vulkan {
|
||||
|
||||
ResourcePool::ResourcePool(MasterSemaphore& master_semaphore_, size_t grow_step_)
|
||||
: master_semaphore{master_semaphore_}, grow_step{grow_step_} {}
|
||||
|
||||
ResourcePool::~ResourcePool() = default;
|
||||
: master_semaphore{&master_semaphore_}, grow_step{grow_step_} {}
|
||||
|
||||
size_t ResourcePool::CommitResource() {
|
||||
// Refresh semaphore to query updated results
|
||||
master_semaphore.Refresh();
|
||||
const u64 gpu_tick = master_semaphore.KnownGpuTick();
|
||||
master_semaphore->Refresh();
|
||||
const u64 gpu_tick = master_semaphore->KnownGpuTick();
|
||||
const auto search = [this, gpu_tick](size_t begin, size_t end) -> std::optional<size_t> {
|
||||
for (size_t iterator = begin; iterator < end; ++iterator) {
|
||||
if (gpu_tick >= ticks[iterator]) {
|
||||
ticks[iterator] = master_semaphore.CurrentTick();
|
||||
ticks[iterator] = master_semaphore->CurrentTick();
|
||||
return iterator;
|
||||
}
|
||||
}
|
||||
@@ -36,7 +34,7 @@ size_t ResourcePool::CommitResource() {
|
||||
// Both searches failed, the pool is full; handle it.
|
||||
const size_t free_resource = ManageOverflow();
|
||||
|
||||
ticks[free_resource] = master_semaphore.CurrentTick();
|
||||
ticks[free_resource] = master_semaphore->CurrentTick();
|
||||
found = free_resource;
|
||||
}
|
||||
}
|
||||
|
@@ -18,8 +18,16 @@ class MasterSemaphore;
|
||||
*/
|
||||
class ResourcePool {
|
||||
public:
|
||||
explicit ResourcePool() = default;
|
||||
explicit ResourcePool(MasterSemaphore& master_semaphore, size_t grow_step);
|
||||
virtual ~ResourcePool();
|
||||
|
||||
virtual ~ResourcePool() = default;
|
||||
|
||||
ResourcePool& operator=(ResourcePool&&) noexcept = default;
|
||||
ResourcePool(ResourcePool&&) noexcept = default;
|
||||
|
||||
ResourcePool& operator=(const ResourcePool&) = default;
|
||||
ResourcePool(const ResourcePool&) = default;
|
||||
|
||||
protected:
|
||||
size_t CommitResource();
|
||||
@@ -34,7 +42,7 @@ private:
|
||||
/// Allocates a new page of resources.
|
||||
void Grow();
|
||||
|
||||
MasterSemaphore& master_semaphore;
|
||||
MasterSemaphore* master_semaphore{};
|
||||
size_t grow_step = 0; ///< Number of new resources created after an overflow
|
||||
size_t hint_iterator = 0; ///< Hint to where the next free resources is likely to be found
|
||||
std::vector<u64> ticks; ///< Ticks for each resource
|
||||
|
@@ -31,7 +31,7 @@ void VKScheduler::CommandChunk::ExecuteAll(vk::CommandBuffer cmdbuf) {
|
||||
command->~Command();
|
||||
command = next;
|
||||
}
|
||||
|
||||
submit = false;
|
||||
command_offset = 0;
|
||||
first = nullptr;
|
||||
last = nullptr;
|
||||
@@ -42,13 +42,16 @@ VKScheduler::VKScheduler(const Device& device_, StateTracker& state_tracker_)
|
||||
master_semaphore{std::make_unique<MasterSemaphore>(device)},
|
||||
command_pool{std::make_unique<CommandPool>(*master_semaphore, device)} {
|
||||
AcquireNewChunk();
|
||||
AllocateNewContext();
|
||||
AllocateWorkerCommandBuffer();
|
||||
worker_thread = std::thread(&VKScheduler::WorkerThread, this);
|
||||
}
|
||||
|
||||
VKScheduler::~VKScheduler() {
|
||||
quit = true;
|
||||
cv.notify_all();
|
||||
{
|
||||
std::lock_guard lock{work_mutex};
|
||||
quit = true;
|
||||
}
|
||||
work_cv.notify_all();
|
||||
worker_thread.join();
|
||||
}
|
||||
|
||||
@@ -60,6 +63,7 @@ void VKScheduler::Flush(VkSemaphore semaphore) {
|
||||
void VKScheduler::Finish(VkSemaphore semaphore) {
|
||||
const u64 presubmit_tick = CurrentTick();
|
||||
SubmitExecution(semaphore);
|
||||
WaitWorker();
|
||||
Wait(presubmit_tick);
|
||||
AllocateNewContext();
|
||||
}
|
||||
@@ -68,20 +72,19 @@ void VKScheduler::WaitWorker() {
|
||||
MICROPROFILE_SCOPE(Vulkan_WaitForWorker);
|
||||
DispatchWork();
|
||||
|
||||
bool finished = false;
|
||||
do {
|
||||
cv.notify_all();
|
||||
std::unique_lock lock{mutex};
|
||||
finished = chunk_queue.Empty();
|
||||
} while (!finished);
|
||||
std::unique_lock lock{work_mutex};
|
||||
wait_cv.wait(lock, [this] { return work_queue.empty(); });
|
||||
}
|
||||
|
||||
void VKScheduler::DispatchWork() {
|
||||
if (chunk->Empty()) {
|
||||
return;
|
||||
}
|
||||
chunk_queue.Push(std::move(chunk));
|
||||
cv.notify_all();
|
||||
{
|
||||
std::lock_guard lock{work_mutex};
|
||||
work_queue.push(std::move(chunk));
|
||||
}
|
||||
work_cv.notify_one();
|
||||
AcquireNewChunk();
|
||||
}
|
||||
|
||||
@@ -124,85 +127,41 @@ void VKScheduler::RequestOutsideRenderPassOperationContext() {
|
||||
EndRenderPass();
|
||||
}
|
||||
|
||||
void VKScheduler::BindGraphicsPipeline(VkPipeline pipeline) {
|
||||
bool VKScheduler::UpdateGraphicsPipeline(GraphicsPipeline* pipeline) {
|
||||
if (state.graphics_pipeline == pipeline) {
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
state.graphics_pipeline = pipeline;
|
||||
Record([pipeline](vk::CommandBuffer cmdbuf) {
|
||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
|
||||
});
|
||||
return true;
|
||||
}
|
||||
|
||||
void VKScheduler::WorkerThread() {
|
||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
|
||||
std::unique_lock lock{mutex};
|
||||
Common::SetCurrentThreadName("yuzu:VulkanWorker");
|
||||
do {
|
||||
cv.wait(lock, [this] { return !chunk_queue.Empty() || quit; });
|
||||
if (quit) {
|
||||
continue;
|
||||
if (work_queue.empty()) {
|
||||
wait_cv.notify_all();
|
||||
}
|
||||
auto extracted_chunk = std::move(chunk_queue.Front());
|
||||
chunk_queue.Pop();
|
||||
extracted_chunk->ExecuteAll(current_cmdbuf);
|
||||
chunk_reserve.Push(std::move(extracted_chunk));
|
||||
std::unique_ptr<CommandChunk> work;
|
||||
{
|
||||
std::unique_lock lock{work_mutex};
|
||||
work_cv.wait(lock, [this] { return !work_queue.empty() || quit; });
|
||||
if (quit) {
|
||||
continue;
|
||||
}
|
||||
work = std::move(work_queue.front());
|
||||
work_queue.pop();
|
||||
}
|
||||
const bool has_submit = work->HasSubmit();
|
||||
work->ExecuteAll(current_cmdbuf);
|
||||
if (has_submit) {
|
||||
AllocateWorkerCommandBuffer();
|
||||
}
|
||||
std::lock_guard reserve_lock{reserve_mutex};
|
||||
chunk_reserve.push_back(std::move(work));
|
||||
} while (!quit);
|
||||
}
|
||||
|
||||
void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
|
||||
EndPendingOperations();
|
||||
InvalidateState();
|
||||
WaitWorker();
|
||||
|
||||
std::unique_lock lock{mutex};
|
||||
|
||||
current_cmdbuf.End();
|
||||
|
||||
const VkSemaphore timeline_semaphore = master_semaphore->Handle();
|
||||
const u32 num_signal_semaphores = semaphore ? 2U : 1U;
|
||||
|
||||
const u64 signal_value = master_semaphore->CurrentTick();
|
||||
const u64 wait_value = signal_value - 1;
|
||||
const VkPipelineStageFlags wait_stage_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
|
||||
|
||||
master_semaphore->NextTick();
|
||||
|
||||
const std::array signal_values{signal_value, u64(0)};
|
||||
const std::array signal_semaphores{timeline_semaphore, semaphore};
|
||||
|
||||
const VkTimelineSemaphoreSubmitInfoKHR timeline_si{
|
||||
.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR,
|
||||
.pNext = nullptr,
|
||||
.waitSemaphoreValueCount = 1,
|
||||
.pWaitSemaphoreValues = &wait_value,
|
||||
.signalSemaphoreValueCount = num_signal_semaphores,
|
||||
.pSignalSemaphoreValues = signal_values.data(),
|
||||
};
|
||||
const VkSubmitInfo submit_info{
|
||||
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
||||
.pNext = &timeline_si,
|
||||
.waitSemaphoreCount = 1,
|
||||
.pWaitSemaphores = &timeline_semaphore,
|
||||
.pWaitDstStageMask = &wait_stage_mask,
|
||||
.commandBufferCount = 1,
|
||||
.pCommandBuffers = current_cmdbuf.address(),
|
||||
.signalSemaphoreCount = num_signal_semaphores,
|
||||
.pSignalSemaphores = signal_semaphores.data(),
|
||||
};
|
||||
switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info)) {
|
||||
case VK_SUCCESS:
|
||||
break;
|
||||
case VK_ERROR_DEVICE_LOST:
|
||||
device.ReportLoss();
|
||||
[[fallthrough]];
|
||||
default:
|
||||
vk::Check(result);
|
||||
}
|
||||
}
|
||||
|
||||
void VKScheduler::AllocateNewContext() {
|
||||
std::unique_lock lock{mutex};
|
||||
|
||||
void VKScheduler::AllocateWorkerCommandBuffer() {
|
||||
current_cmdbuf = vk::CommandBuffer(command_pool->Commit(), device.GetDispatchLoader());
|
||||
current_cmdbuf.Begin({
|
||||
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
|
||||
@@ -210,7 +169,59 @@ void VKScheduler::AllocateNewContext() {
|
||||
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
|
||||
.pInheritanceInfo = nullptr,
|
||||
});
|
||||
}
|
||||
|
||||
void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
|
||||
EndPendingOperations();
|
||||
InvalidateState();
|
||||
|
||||
const u64 signal_value = master_semaphore->NextTick();
|
||||
Record([semaphore, signal_value, this](vk::CommandBuffer cmdbuf) {
|
||||
cmdbuf.End();
|
||||
|
||||
const u32 num_signal_semaphores = semaphore ? 2U : 1U;
|
||||
|
||||
const u64 wait_value = signal_value - 1;
|
||||
const VkPipelineStageFlags wait_stage_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
|
||||
|
||||
const VkSemaphore timeline_semaphore = master_semaphore->Handle();
|
||||
const std::array signal_values{signal_value, u64(0)};
|
||||
const std::array signal_semaphores{timeline_semaphore, semaphore};
|
||||
|
||||
const VkTimelineSemaphoreSubmitInfoKHR timeline_si{
|
||||
.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR,
|
||||
.pNext = nullptr,
|
||||
.waitSemaphoreValueCount = 1,
|
||||
.pWaitSemaphoreValues = &wait_value,
|
||||
.signalSemaphoreValueCount = num_signal_semaphores,
|
||||
.pSignalSemaphoreValues = signal_values.data(),
|
||||
};
|
||||
const VkSubmitInfo submit_info{
|
||||
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
||||
.pNext = &timeline_si,
|
||||
.waitSemaphoreCount = 1,
|
||||
.pWaitSemaphores = &timeline_semaphore,
|
||||
.pWaitDstStageMask = &wait_stage_mask,
|
||||
.commandBufferCount = 1,
|
||||
.pCommandBuffers = cmdbuf.address(),
|
||||
.signalSemaphoreCount = num_signal_semaphores,
|
||||
.pSignalSemaphores = signal_semaphores.data(),
|
||||
};
|
||||
switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info)) {
|
||||
case VK_SUCCESS:
|
||||
break;
|
||||
case VK_ERROR_DEVICE_LOST:
|
||||
device.ReportLoss();
|
||||
[[fallthrough]];
|
||||
default:
|
||||
vk::Check(result);
|
||||
}
|
||||
});
|
||||
chunk->MarkSubmit();
|
||||
DispatchWork();
|
||||
}
|
||||
|
||||
void VKScheduler::AllocateNewContext() {
|
||||
// Enable counters once again. These are disabled when a command buffer is finished.
|
||||
if (query_cache) {
|
||||
query_cache->UpdateCounters();
|
||||
@@ -265,12 +276,13 @@ void VKScheduler::EndRenderPass() {
|
||||
}
|
||||
|
||||
void VKScheduler::AcquireNewChunk() {
|
||||
if (chunk_reserve.Empty()) {
|
||||
std::lock_guard lock{reserve_mutex};
|
||||
if (chunk_reserve.empty()) {
|
||||
chunk = std::make_unique<CommandChunk>();
|
||||
return;
|
||||
}
|
||||
chunk = std::move(chunk_reserve.Front());
|
||||
chunk_reserve.Pop();
|
||||
chunk = std::move(chunk_reserve.back());
|
||||
chunk_reserve.pop_back();
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
||||
|
@@ -8,12 +8,12 @@
|
||||
#include <condition_variable>
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
#include <stack>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
#include <queue>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/threadsafe_queue.h"
|
||||
#include "video_core/renderer_vulkan/vk_master_semaphore.h"
|
||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||
|
||||
@@ -22,6 +22,7 @@ namespace Vulkan {
|
||||
class CommandPool;
|
||||
class Device;
|
||||
class Framebuffer;
|
||||
class GraphicsPipeline;
|
||||
class StateTracker;
|
||||
class VKQueryCache;
|
||||
|
||||
@@ -52,8 +53,8 @@ public:
|
||||
/// of a renderpass.
|
||||
void RequestOutsideRenderPassOperationContext();
|
||||
|
||||
/// Binds a pipeline to the current execution context.
|
||||
void BindGraphicsPipeline(VkPipeline pipeline);
|
||||
/// Update the pipeline to the current execution context.
|
||||
bool UpdateGraphicsPipeline(GraphicsPipeline* pipeline);
|
||||
|
||||
/// Invalidates current command buffer state except for render passes
|
||||
void InvalidateState();
|
||||
@@ -85,6 +86,10 @@ public:
|
||||
|
||||
/// Waits for the given tick to trigger on the GPU.
|
||||
void Wait(u64 tick) {
|
||||
if (tick >= master_semaphore->CurrentTick()) {
|
||||
// Make sure we are not waiting for the current tick without signalling
|
||||
Flush();
|
||||
}
|
||||
master_semaphore->Wait(tick);
|
||||
}
|
||||
|
||||
@@ -154,15 +159,24 @@ private:
|
||||
return true;
|
||||
}
|
||||
|
||||
void MarkSubmit() {
|
||||
submit = true;
|
||||
}
|
||||
|
||||
bool Empty() const {
|
||||
return command_offset == 0;
|
||||
}
|
||||
|
||||
bool HasSubmit() const {
|
||||
return submit;
|
||||
}
|
||||
|
||||
private:
|
||||
Command* first = nullptr;
|
||||
Command* last = nullptr;
|
||||
|
||||
size_t command_offset = 0;
|
||||
bool submit = false;
|
||||
alignas(std::max_align_t) std::array<u8, 0x8000> data{};
|
||||
};
|
||||
|
||||
@@ -170,11 +184,13 @@ private:
|
||||
VkRenderPass renderpass = nullptr;
|
||||
VkFramebuffer framebuffer = nullptr;
|
||||
VkExtent2D render_area = {0, 0};
|
||||
VkPipeline graphics_pipeline = nullptr;
|
||||
GraphicsPipeline* graphics_pipeline = nullptr;
|
||||
};
|
||||
|
||||
void WorkerThread();
|
||||
|
||||
void AllocateWorkerCommandBuffer();
|
||||
|
||||
void SubmitExecution(VkSemaphore semaphore);
|
||||
|
||||
void AllocateNewContext();
|
||||
@@ -204,11 +220,13 @@ private:
|
||||
std::array<VkImage, 9> renderpass_images{};
|
||||
std::array<VkImageSubresourceRange, 9> renderpass_image_ranges{};
|
||||
|
||||
Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_queue;
|
||||
Common::SPSCQueue<std::unique_ptr<CommandChunk>> chunk_reserve;
|
||||
std::mutex mutex;
|
||||
std::condition_variable cv;
|
||||
bool quit = false;
|
||||
std::queue<std::unique_ptr<CommandChunk>> work_queue;
|
||||
std::vector<std::unique_ptr<CommandChunk>> chunk_reserve;
|
||||
std::mutex reserve_mutex;
|
||||
std::mutex work_mutex;
|
||||
std::condition_variable work_cv;
|
||||
std::condition_variable wait_cv;
|
||||
std::atomic_bool quit{};
|
||||
};
|
||||
|
||||
} // namespace Vulkan
|
||||
|
@@ -91,7 +91,7 @@ StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& mem
|
||||
.flags = 0,
|
||||
.size = STREAM_BUFFER_SIZE,
|
||||
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
|
||||
VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
|
||||
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT,
|
||||
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||
.queueFamilyIndexCount = 0,
|
||||
.pQueueFamilyIndices = nullptr,
|
||||
|
@@ -29,9 +29,10 @@ using Flags = Maxwell3D::DirtyState::Flags;
|
||||
|
||||
Flags MakeInvalidationFlags() {
|
||||
static constexpr int INVALIDATION_FLAGS[]{
|
||||
Viewports, Scissors, DepthBias, BlendConstants, DepthBounds,
|
||||
StencilProperties, CullMode, DepthBoundsEnable, DepthTestEnable, DepthWriteEnable,
|
||||
DepthCompareOp, FrontFace, StencilOp, StencilTestEnable, VertexBuffers,
|
||||
Viewports, Scissors, DepthBias, BlendConstants, DepthBounds,
|
||||
StencilProperties, LineWidth, CullMode, DepthBoundsEnable, DepthTestEnable,
|
||||
DepthWriteEnable, DepthCompareOp, FrontFace, StencilOp, StencilTestEnable,
|
||||
VertexBuffers, VertexInput,
|
||||
};
|
||||
Flags flags{};
|
||||
for (const int flag : INVALIDATION_FLAGS) {
|
||||
@@ -40,6 +41,12 @@ Flags MakeInvalidationFlags() {
|
||||
for (int index = VertexBuffer0; index <= VertexBuffer31; ++index) {
|
||||
flags[index] = true;
|
||||
}
|
||||
for (int index = VertexAttribute0; index <= VertexAttribute31; ++index) {
|
||||
flags[index] = true;
|
||||
}
|
||||
for (int index = VertexBinding0; index <= VertexBinding31; ++index) {
|
||||
flags[index] = true;
|
||||
}
|
||||
return flags;
|
||||
}
|
||||
|
||||
@@ -79,6 +86,11 @@ void SetupDirtyStencilProperties(Tables& tables) {
|
||||
table[OFF(stencil_back_func_mask)] = StencilProperties;
|
||||
}
|
||||
|
||||
void SetupDirtyLineWidth(Tables& tables) {
|
||||
tables[0][OFF(line_width_smooth)] = LineWidth;
|
||||
tables[0][OFF(line_width_aliased)] = LineWidth;
|
||||
}
|
||||
|
||||
void SetupDirtyCullMode(Tables& tables) {
|
||||
auto& table = tables[0];
|
||||
table[OFF(cull_face)] = CullMode;
|
||||
@@ -134,19 +146,6 @@ void SetupDirtyBlending(Tables& tables) {
|
||||
FillBlock(tables[0], OFF(independent_blend), NUM(independent_blend), Blending);
|
||||
}
|
||||
|
||||
void SetupDirtyInstanceDivisors(Tables& tables) {
|
||||
static constexpr size_t divisor_offset = 3;
|
||||
for (size_t index = 0; index < Regs::NumVertexArrays; ++index) {
|
||||
tables[0][OFF(instanced_arrays) + index] = InstanceDivisors;
|
||||
tables[0][OFF(vertex_array) + index * NUM(vertex_array[0]) + divisor_offset] =
|
||||
InstanceDivisors;
|
||||
}
|
||||
}
|
||||
|
||||
void SetupDirtyVertexAttributes(Tables& tables) {
|
||||
FillBlock(tables[0], OFF(vertex_attrib_format), NUM(vertex_attrib_format), VertexAttributes);
|
||||
}
|
||||
|
||||
void SetupDirtyViewportSwizzles(Tables& tables) {
|
||||
static constexpr size_t swizzle_offset = 6;
|
||||
for (size_t index = 0; index < Regs::NumViewports; ++index) {
|
||||
@@ -154,11 +153,31 @@ void SetupDirtyViewportSwizzles(Tables& tables) {
|
||||
ViewportSwizzles;
|
||||
}
|
||||
}
|
||||
|
||||
void SetupDirtyVertexAttributes(Tables& tables) {
|
||||
for (size_t i = 0; i < Regs::NumVertexAttributes; ++i) {
|
||||
const size_t offset = OFF(vertex_attrib_format) + i * NUM(vertex_attrib_format[0]);
|
||||
FillBlock(tables[0], offset, NUM(vertex_attrib_format[0]), VertexAttribute0 + i);
|
||||
}
|
||||
FillBlock(tables[1], OFF(vertex_attrib_format), Regs::NumVertexAttributes, VertexInput);
|
||||
}
|
||||
|
||||
void SetupDirtyVertexBindings(Tables& tables) {
|
||||
// Do NOT include stride here, it's implicit in VertexBuffer
|
||||
static constexpr size_t divisor_offset = 3;
|
||||
for (size_t i = 0; i < Regs::NumVertexArrays; ++i) {
|
||||
const u8 flag = static_cast<u8>(VertexBinding0 + i);
|
||||
tables[0][OFF(instanced_arrays) + i] = VertexInput;
|
||||
tables[1][OFF(instanced_arrays) + i] = flag;
|
||||
tables[0][OFF(vertex_array) + i * NUM(vertex_array[0]) + divisor_offset] = VertexInput;
|
||||
tables[1][OFF(vertex_array) + i * NUM(vertex_array[0]) + divisor_offset] = flag;
|
||||
}
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
StateTracker::StateTracker(Tegra::GPU& gpu)
|
||||
: flags{gpu.Maxwell3D().dirty.flags}, invalidation_flags{MakeInvalidationFlags()} {
|
||||
auto& tables = gpu.Maxwell3D().dirty.tables;
|
||||
auto& tables{gpu.Maxwell3D().dirty.tables};
|
||||
SetupDirtyFlags(tables);
|
||||
SetupDirtyViewports(tables);
|
||||
SetupDirtyScissors(tables);
|
||||
@@ -166,6 +185,7 @@ StateTracker::StateTracker(Tegra::GPU& gpu)
|
||||
SetupDirtyBlendConstants(tables);
|
||||
SetupDirtyDepthBounds(tables);
|
||||
SetupDirtyStencilProperties(tables);
|
||||
SetupDirtyLineWidth(tables);
|
||||
SetupDirtyCullMode(tables);
|
||||
SetupDirtyDepthBoundsEnable(tables);
|
||||
SetupDirtyDepthTestEnable(tables);
|
||||
@@ -175,9 +195,9 @@ StateTracker::StateTracker(Tegra::GPU& gpu)
|
||||
SetupDirtyStencilOp(tables);
|
||||
SetupDirtyStencilTestEnable(tables);
|
||||
SetupDirtyBlending(tables);
|
||||
SetupDirtyInstanceDivisors(tables);
|
||||
SetupDirtyVertexAttributes(tables);
|
||||
SetupDirtyViewportSwizzles(tables);
|
||||
SetupDirtyVertexAttributes(tables);
|
||||
SetupDirtyVertexBindings(tables);
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
||||
|
@@ -19,12 +19,19 @@ namespace Dirty {
|
||||
enum : u8 {
|
||||
First = VideoCommon::Dirty::LastCommonEntry,
|
||||
|
||||
VertexInput,
|
||||
VertexAttribute0,
|
||||
VertexAttribute31 = VertexAttribute0 + 31,
|
||||
VertexBinding0,
|
||||
VertexBinding31 = VertexBinding0 + 31,
|
||||
|
||||
Viewports,
|
||||
Scissors,
|
||||
DepthBias,
|
||||
BlendConstants,
|
||||
DepthBounds,
|
||||
StencilProperties,
|
||||
LineWidth,
|
||||
|
||||
CullMode,
|
||||
DepthBoundsEnable,
|
||||
@@ -36,11 +43,9 @@ enum : u8 {
|
||||
StencilTestEnable,
|
||||
|
||||
Blending,
|
||||
InstanceDivisors,
|
||||
VertexAttributes,
|
||||
ViewportSwizzles,
|
||||
|
||||
Last
|
||||
Last,
|
||||
};
|
||||
static_assert(Last <= std::numeric_limits<u8>::max());
|
||||
|
||||
@@ -89,6 +94,10 @@ public:
|
||||
return Exchange(Dirty::StencilProperties, false);
|
||||
}
|
||||
|
||||
bool TouchLineWidth() const {
|
||||
return Exchange(Dirty::LineWidth, false);
|
||||
}
|
||||
|
||||
bool TouchCullMode() {
|
||||
return Exchange(Dirty::CullMode, false);
|
||||
}
|
||||
|
@@ -65,6 +65,9 @@ VKSwapchain::VKSwapchain(VkSurfaceKHR surface_, const Device& device_, VKSchedul
|
||||
VKSwapchain::~VKSwapchain() = default;
|
||||
|
||||
void VKSwapchain::Create(u32 width, u32 height, bool srgb) {
|
||||
is_outdated = false;
|
||||
is_suboptimal = false;
|
||||
|
||||
const auto physical_device = device.GetPhysical();
|
||||
const auto capabilities{physical_device.GetSurfaceCapabilitiesKHR(surface)};
|
||||
if (capabilities.maxImageExtent.width == 0 || capabilities.maxImageExtent.height == 0) {
|
||||
@@ -82,21 +85,31 @@ void VKSwapchain::Create(u32 width, u32 height, bool srgb) {
|
||||
resource_ticks.resize(image_count);
|
||||
}
|
||||
|
||||
bool VKSwapchain::AcquireNextImage() {
|
||||
const VkResult result =
|
||||
device.GetLogical().AcquireNextImageKHR(*swapchain, std::numeric_limits<u64>::max(),
|
||||
*present_semaphores[frame_index], {}, &image_index);
|
||||
|
||||
void VKSwapchain::AcquireNextImage() {
|
||||
const VkResult result = device.GetLogical().AcquireNextImageKHR(
|
||||
*swapchain, std::numeric_limits<u64>::max(), *present_semaphores[frame_index],
|
||||
VK_NULL_HANDLE, &image_index);
|
||||
switch (result) {
|
||||
case VK_SUCCESS:
|
||||
break;
|
||||
case VK_SUBOPTIMAL_KHR:
|
||||
is_suboptimal = true;
|
||||
break;
|
||||
case VK_ERROR_OUT_OF_DATE_KHR:
|
||||
is_outdated = true;
|
||||
break;
|
||||
default:
|
||||
LOG_ERROR(Render_Vulkan, "vkAcquireNextImageKHR returned {}", vk::ToString(result));
|
||||
break;
|
||||
}
|
||||
scheduler.Wait(resource_ticks[image_index]);
|
||||
return result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR;
|
||||
resource_ticks[image_index] = scheduler.CurrentTick();
|
||||
}
|
||||
|
||||
bool VKSwapchain::Present(VkSemaphore render_semaphore) {
|
||||
void VKSwapchain::Present(VkSemaphore render_semaphore) {
|
||||
const VkSemaphore present_semaphore{*present_semaphores[frame_index]};
|
||||
const std::array<VkSemaphore, 2> semaphores{present_semaphore, render_semaphore};
|
||||
const auto present_queue{device.GetPresentQueue()};
|
||||
bool recreated = false;
|
||||
|
||||
const VkPresentInfoKHR present_info{
|
||||
.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
|
||||
.pNext = nullptr,
|
||||
@@ -107,7 +120,6 @@ bool VKSwapchain::Present(VkSemaphore render_semaphore) {
|
||||
.pImageIndices = &image_index,
|
||||
.pResults = nullptr,
|
||||
};
|
||||
|
||||
switch (const VkResult result = present_queue.Present(present_info)) {
|
||||
case VK_SUCCESS:
|
||||
break;
|
||||
@@ -115,24 +127,16 @@ bool VKSwapchain::Present(VkSemaphore render_semaphore) {
|
||||
LOG_DEBUG(Render_Vulkan, "Suboptimal swapchain");
|
||||
break;
|
||||
case VK_ERROR_OUT_OF_DATE_KHR:
|
||||
if (current_width > 0 && current_height > 0) {
|
||||
Create(current_width, current_height, current_srgb);
|
||||
recreated = true;
|
||||
}
|
||||
is_outdated = true;
|
||||
break;
|
||||
default:
|
||||
LOG_CRITICAL(Render_Vulkan, "Failed to present with error {}", vk::ToString(result));
|
||||
break;
|
||||
}
|
||||
|
||||
resource_ticks[image_index] = scheduler.CurrentTick();
|
||||
frame_index = (frame_index + 1) % static_cast<u32>(image_count);
|
||||
return recreated;
|
||||
}
|
||||
|
||||
bool VKSwapchain::HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const {
|
||||
// TODO(Rodrigo): Handle framebuffer pixel format changes
|
||||
return framebuffer.width != current_width || framebuffer.height != current_height;
|
||||
++frame_index;
|
||||
if (frame_index >= image_count) {
|
||||
frame_index = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u32 width,
|
||||
@@ -148,7 +152,6 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
|
||||
if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) {
|
||||
requested_image_count = capabilities.maxImageCount;
|
||||
}
|
||||
|
||||
VkSwapchainCreateInfoKHR swapchain_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
|
||||
.pNext = nullptr,
|
||||
@@ -169,7 +172,6 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
|
||||
.clipped = VK_FALSE,
|
||||
.oldSwapchain = nullptr,
|
||||
};
|
||||
|
||||
const u32 graphics_family{device.GetGraphicsFamily()};
|
||||
const u32 present_family{device.GetPresentFamily()};
|
||||
const std::array<u32, 2> queue_indices{graphics_family, present_family};
|
||||
@@ -178,7 +180,6 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
|
||||
swapchain_ci.queueFamilyIndexCount = static_cast<u32>(queue_indices.size());
|
||||
swapchain_ci.pQueueFamilyIndices = queue_indices.data();
|
||||
}
|
||||
|
||||
// Request the size again to reduce the possibility of a TOCTOU race condition.
|
||||
const auto updated_capabilities = physical_device.GetSurfaceCapabilitiesKHR(surface);
|
||||
swapchain_ci.imageExtent = ChooseSwapExtent(updated_capabilities, width, height);
|
||||
@@ -186,8 +187,6 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
|
||||
swapchain = device.GetLogical().CreateSwapchainKHR(swapchain_ci);
|
||||
|
||||
extent = swapchain_ci.imageExtent;
|
||||
current_width = extent.width;
|
||||
current_height = extent.height;
|
||||
current_srgb = srgb;
|
||||
|
||||
images = swapchain.GetImages();
|
||||
@@ -197,8 +196,8 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
|
||||
|
||||
void VKSwapchain::CreateSemaphores() {
|
||||
present_semaphores.resize(image_count);
|
||||
std::generate(present_semaphores.begin(), present_semaphores.end(),
|
||||
[this] { return device.GetLogical().CreateSemaphore(); });
|
||||
std::ranges::generate(present_semaphores,
|
||||
[this] { return device.GetLogical().CreateSemaphore(); });
|
||||
}
|
||||
|
||||
void VKSwapchain::CreateImageViews() {
|
||||
|
@@ -28,14 +28,25 @@ public:
|
||||
void Create(u32 width, u32 height, bool srgb);
|
||||
|
||||
/// Acquires the next image in the swapchain, waits as needed.
|
||||
bool AcquireNextImage();
|
||||
void AcquireNextImage();
|
||||
|
||||
/// Presents the rendered image to the swapchain. Returns true when the swapchains had to be
|
||||
/// recreated. Takes responsability for the ownership of fence.
|
||||
bool Present(VkSemaphore render_semaphore);
|
||||
/// Presents the rendered image to the swapchain.
|
||||
void Present(VkSemaphore render_semaphore);
|
||||
|
||||
/// Returns true when the framebuffer layout has changed.
|
||||
bool HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const;
|
||||
/// Returns true when the color space has changed.
|
||||
bool HasColorSpaceChanged(bool is_srgb) const {
|
||||
return current_srgb != is_srgb;
|
||||
}
|
||||
|
||||
/// Returns true when the swapchain is outdated.
|
||||
bool IsOutDated() const {
|
||||
return is_outdated;
|
||||
}
|
||||
|
||||
/// Returns true when the swapchain is suboptimal.
|
||||
bool IsSubOptimal() const {
|
||||
return is_suboptimal;
|
||||
}
|
||||
|
||||
VkExtent2D GetSize() const {
|
||||
return extent;
|
||||
@@ -61,10 +72,6 @@ public:
|
||||
return image_format;
|
||||
}
|
||||
|
||||
bool GetSrgbState() const {
|
||||
return current_srgb;
|
||||
}
|
||||
|
||||
private:
|
||||
void CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u32 width, u32 height,
|
||||
bool srgb);
|
||||
@@ -92,9 +99,9 @@ private:
|
||||
VkFormat image_format{};
|
||||
VkExtent2D extent{};
|
||||
|
||||
u32 current_width{};
|
||||
u32 current_height{};
|
||||
bool current_srgb{};
|
||||
bool is_outdated{};
|
||||
bool is_suboptimal{};
|
||||
};
|
||||
|
||||
} // namespace Vulkan
|
||||
|
@@ -15,6 +15,7 @@
|
||||
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
|
||||
#include "video_core/renderer_vulkan/vk_compute_pass.h"
|
||||
#include "video_core/renderer_vulkan/vk_rasterizer.h"
|
||||
#include "video_core/renderer_vulkan/vk_render_pass_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_scheduler.h"
|
||||
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
|
||||
#include "video_core/renderer_vulkan/vk_texture_cache.h"
|
||||
@@ -34,19 +35,6 @@ using VideoCommon::SubresourceRange;
|
||||
using VideoCore::Surface::IsPixelFormatASTC;
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr std::array ATTACHMENT_REFERENCES{
|
||||
VkAttachmentReference{0, VK_IMAGE_LAYOUT_GENERAL},
|
||||
VkAttachmentReference{1, VK_IMAGE_LAYOUT_GENERAL},
|
||||
VkAttachmentReference{2, VK_IMAGE_LAYOUT_GENERAL},
|
||||
VkAttachmentReference{3, VK_IMAGE_LAYOUT_GENERAL},
|
||||
VkAttachmentReference{4, VK_IMAGE_LAYOUT_GENERAL},
|
||||
VkAttachmentReference{5, VK_IMAGE_LAYOUT_GENERAL},
|
||||
VkAttachmentReference{6, VK_IMAGE_LAYOUT_GENERAL},
|
||||
VkAttachmentReference{7, VK_IMAGE_LAYOUT_GENERAL},
|
||||
VkAttachmentReference{8, VK_IMAGE_LAYOUT_GENERAL},
|
||||
};
|
||||
|
||||
constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
|
||||
if (color == std::array<float, 4>{0, 0, 0, 0}) {
|
||||
return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
|
||||
@@ -174,25 +162,6 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
|
||||
return device.GetLogical().CreateImage(MakeImageCreateInfo(device, info));
|
||||
}
|
||||
|
||||
[[nodiscard]] vk::Buffer MakeBuffer(const Device& device, const ImageInfo& info) {
|
||||
if (info.type != ImageType::Buffer) {
|
||||
return vk::Buffer{};
|
||||
}
|
||||
const size_t bytes_per_block = VideoCore::Surface::BytesPerBlock(info.format);
|
||||
return device.GetLogical().CreateBuffer(VkBufferCreateInfo{
|
||||
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.size = info.size.width * bytes_per_block,
|
||||
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
|
||||
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT |
|
||||
VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT,
|
||||
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||
.queueFamilyIndexCount = 0,
|
||||
.pQueueFamilyIndices = nullptr,
|
||||
});
|
||||
}
|
||||
|
||||
[[nodiscard]] VkImageAspectFlags ImageAspectMask(PixelFormat format) {
|
||||
switch (VideoCore::Surface::GetFormatType(format)) {
|
||||
case VideoCore::Surface::SurfaceType::ColorTexture:
|
||||
@@ -226,23 +195,6 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
|
||||
}
|
||||
}
|
||||
|
||||
[[nodiscard]] VkAttachmentDescription AttachmentDescription(const Device& device,
|
||||
const ImageView* image_view) {
|
||||
using MaxwellToVK::SurfaceFormat;
|
||||
const PixelFormat pixel_format = image_view->format;
|
||||
return VkAttachmentDescription{
|
||||
.flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT,
|
||||
.format = SurfaceFormat(device, FormatType::Optimal, true, pixel_format).format,
|
||||
.samples = image_view->Samples(),
|
||||
.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
|
||||
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
|
||||
.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
|
||||
.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE,
|
||||
.initialLayout = VK_IMAGE_LAYOUT_GENERAL,
|
||||
.finalLayout = VK_IMAGE_LAYOUT_GENERAL,
|
||||
};
|
||||
}
|
||||
|
||||
[[nodiscard]] VkComponentSwizzle ComponentSwizzle(SwizzleSource swizzle) {
|
||||
switch (swizzle) {
|
||||
case SwizzleSource::Zero:
|
||||
@@ -263,6 +215,30 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
|
||||
return VK_COMPONENT_SWIZZLE_ZERO;
|
||||
}
|
||||
|
||||
[[nodiscard]] VkImageViewType ImageViewType(Shader::TextureType type) {
|
||||
switch (type) {
|
||||
case Shader::TextureType::Color1D:
|
||||
return VK_IMAGE_VIEW_TYPE_1D;
|
||||
case Shader::TextureType::Color2D:
|
||||
return VK_IMAGE_VIEW_TYPE_2D;
|
||||
case Shader::TextureType::ColorCube:
|
||||
return VK_IMAGE_VIEW_TYPE_CUBE;
|
||||
case Shader::TextureType::Color3D:
|
||||
return VK_IMAGE_VIEW_TYPE_3D;
|
||||
case Shader::TextureType::ColorArray1D:
|
||||
return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
|
||||
case Shader::TextureType::ColorArray2D:
|
||||
return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
|
||||
case Shader::TextureType::ColorArrayCube:
|
||||
return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
|
||||
case Shader::TextureType::Buffer:
|
||||
UNREACHABLE_MSG("Texture buffers can't be image views");
|
||||
return VK_IMAGE_VIEW_TYPE_1D;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid image view type={}", type);
|
||||
return VK_IMAGE_VIEW_TYPE_2D;
|
||||
}
|
||||
|
||||
[[nodiscard]] VkImageViewType ImageViewType(VideoCommon::ImageViewType type) {
|
||||
switch (type) {
|
||||
case VideoCommon::ImageViewType::e1D:
|
||||
@@ -280,7 +256,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
|
||||
case VideoCommon::ImageViewType::CubeArray:
|
||||
return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
|
||||
case VideoCommon::ImageViewType::Rect:
|
||||
LOG_WARNING(Render_Vulkan, "Unnormalized image view type not supported");
|
||||
UNIMPLEMENTED_MSG("Rect image view");
|
||||
return VK_IMAGE_VIEW_TYPE_2D;
|
||||
case VideoCommon::ImageViewType::Buffer:
|
||||
UNREACHABLE_MSG("Texture buffers can't be image views");
|
||||
@@ -327,7 +303,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
|
||||
};
|
||||
}
|
||||
|
||||
[[nodiscard]] std::vector<VkBufferCopy> TransformBufferCopies(
|
||||
[[maybe_unused]] [[nodiscard]] std::vector<VkBufferCopy> TransformBufferCopies(
|
||||
std::span<const VideoCommon::BufferCopy> copies, size_t buffer_offset) {
|
||||
std::vector<VkBufferCopy> result(copies.size());
|
||||
std::ranges::transform(
|
||||
@@ -587,6 +563,28 @@ struct RangedBarrierRange {
|
||||
}
|
||||
};
|
||||
|
||||
[[nodiscard]] VkFormat Format(Shader::ImageFormat format) {
|
||||
switch (format) {
|
||||
case Shader::ImageFormat::Typeless:
|
||||
break;
|
||||
case Shader::ImageFormat::R8_SINT:
|
||||
return VK_FORMAT_R8_SINT;
|
||||
case Shader::ImageFormat::R8_UINT:
|
||||
return VK_FORMAT_R8_UINT;
|
||||
case Shader::ImageFormat::R16_UINT:
|
||||
return VK_FORMAT_R16_UINT;
|
||||
case Shader::ImageFormat::R16_SINT:
|
||||
return VK_FORMAT_R16_SINT;
|
||||
case Shader::ImageFormat::R32_UINT:
|
||||
return VK_FORMAT_R32_UINT;
|
||||
case Shader::ImageFormat::R32G32_UINT:
|
||||
return VK_FORMAT_R32G32_UINT;
|
||||
case Shader::ImageFormat::R32G32B32A32_UINT:
|
||||
return VK_FORMAT_R32G32B32A32_UINT;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid image format={}", format);
|
||||
return VK_FORMAT_R32_UINT;
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
void TextureCacheRuntime::Finish() {
|
||||
@@ -622,7 +620,7 @@ void TextureCacheRuntime::BlitImage(Framebuffer* dst_framebuffer, ImageView& dst
|
||||
return;
|
||||
}
|
||||
}
|
||||
ASSERT(src.ImageFormat() == dst.ImageFormat());
|
||||
ASSERT(src.format == dst.format);
|
||||
ASSERT(!(is_dst_msaa && !is_src_msaa));
|
||||
ASSERT(operation == Fermi2D::Operation::SrcCopy);
|
||||
|
||||
@@ -825,13 +823,9 @@ u64 TextureCacheRuntime::GetDeviceLocalMemory() const {
|
||||
Image::Image(TextureCacheRuntime& runtime, const ImageInfo& info_, GPUVAddr gpu_addr_,
|
||||
VAddr cpu_addr_)
|
||||
: VideoCommon::ImageBase(info_, gpu_addr_, cpu_addr_), scheduler{&runtime.scheduler},
|
||||
image(MakeImage(runtime.device, info)), buffer(MakeBuffer(runtime.device, info)),
|
||||
image(MakeImage(runtime.device, info)),
|
||||
commit(runtime.memory_allocator.Commit(image, MemoryUsage::DeviceLocal)),
|
||||
aspect_mask(ImageAspectMask(info.format)) {
|
||||
if (image) {
|
||||
commit = runtime.memory_allocator.Commit(image, MemoryUsage::DeviceLocal);
|
||||
} else {
|
||||
commit = runtime.memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal);
|
||||
}
|
||||
if (IsPixelFormatASTC(info.format) && !runtime.device.IsOptimalAstcSupported()) {
|
||||
if (Settings::values.accelerate_astc.GetValue()) {
|
||||
flags |= VideoCommon::ImageFlagBits::AcceleratedUpload;
|
||||
@@ -840,11 +834,7 @@ Image::Image(TextureCacheRuntime& runtime, const ImageInfo& info_, GPUVAddr gpu_
|
||||
}
|
||||
}
|
||||
if (runtime.device.HasDebuggingToolAttached()) {
|
||||
if (image) {
|
||||
image.SetObjectNameEXT(VideoCommon::Name(*this).c_str());
|
||||
} else {
|
||||
buffer.SetObjectNameEXT(VideoCommon::Name(*this).c_str());
|
||||
}
|
||||
image.SetObjectNameEXT(VideoCommon::Name(*this).c_str());
|
||||
}
|
||||
static constexpr VkImageViewUsageCreateInfo storage_image_view_usage_create_info{
|
||||
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO,
|
||||
@@ -896,19 +886,6 @@ void Image::UploadMemory(const StagingBufferRef& map, std::span<const BufferImag
|
||||
});
|
||||
}
|
||||
|
||||
void Image::UploadMemory(const StagingBufferRef& map,
|
||||
std::span<const VideoCommon::BufferCopy> copies) {
|
||||
// TODO: Move this to another API
|
||||
scheduler->RequestOutsideRenderPassOperationContext();
|
||||
std::vector vk_copies = TransformBufferCopies(copies, map.offset);
|
||||
const VkBuffer src_buffer = map.buffer;
|
||||
const VkBuffer dst_buffer = *buffer;
|
||||
scheduler->Record([src_buffer, dst_buffer, vk_copies](vk::CommandBuffer cmdbuf) {
|
||||
// TODO: Barriers
|
||||
cmdbuf.CopyBuffer(src_buffer, dst_buffer, vk_copies);
|
||||
});
|
||||
}
|
||||
|
||||
void Image::DownloadMemory(const StagingBufferRef& map, std::span<const BufferImageCopy> copies) {
|
||||
std::vector vk_copies = TransformBufferImageCopies(copies, map.offset, aspect_mask);
|
||||
scheduler->Record([buffer = map.buffer, image = *image, aspect_mask = aspect_mask,
|
||||
@@ -966,8 +943,9 @@ void Image::DownloadMemory(const StagingBufferRef& map, std::span<const BufferIm
|
||||
ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewInfo& info,
|
||||
ImageId image_id_, Image& image)
|
||||
: VideoCommon::ImageViewBase{info, image.info, image_id_}, device{&runtime.device},
|
||||
image_handle{image.Handle()}, image_format{image.info.format}, samples{ConvertSampleCount(
|
||||
image.info.num_samples)} {
|
||||
image_handle{image.Handle()}, samples{ConvertSampleCount(image.info.num_samples)} {
|
||||
using Shader::TextureType;
|
||||
|
||||
const VkImageAspectFlags aspect_mask = ImageViewAspectMask(info);
|
||||
std::array<SwizzleSource, 4> swizzle{
|
||||
SwizzleSource::R,
|
||||
@@ -1005,57 +983,54 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
|
||||
},
|
||||
.subresourceRange = MakeSubresourceRange(aspect_mask, info.range),
|
||||
};
|
||||
const auto create = [&](VideoCommon::ImageViewType view_type, std::optional<u32> num_layers) {
|
||||
const auto create = [&](TextureType tex_type, std::optional<u32> num_layers) {
|
||||
VkImageViewCreateInfo ci{create_info};
|
||||
ci.viewType = ImageViewType(view_type);
|
||||
ci.viewType = ImageViewType(tex_type);
|
||||
if (num_layers) {
|
||||
ci.subresourceRange.layerCount = *num_layers;
|
||||
}
|
||||
vk::ImageView handle = device->GetLogical().CreateImageView(ci);
|
||||
if (device->HasDebuggingToolAttached()) {
|
||||
handle.SetObjectNameEXT(VideoCommon::Name(*this, view_type).c_str());
|
||||
handle.SetObjectNameEXT(VideoCommon::Name(*this).c_str());
|
||||
}
|
||||
image_views[static_cast<size_t>(view_type)] = std::move(handle);
|
||||
image_views[static_cast<size_t>(tex_type)] = std::move(handle);
|
||||
};
|
||||
switch (info.type) {
|
||||
case VideoCommon::ImageViewType::e1D:
|
||||
case VideoCommon::ImageViewType::e1DArray:
|
||||
create(VideoCommon::ImageViewType::e1D, 1);
|
||||
create(VideoCommon::ImageViewType::e1DArray, std::nullopt);
|
||||
render_target = Handle(VideoCommon::ImageViewType::e1DArray);
|
||||
create(TextureType::Color1D, 1);
|
||||
create(TextureType::ColorArray1D, std::nullopt);
|
||||
render_target = Handle(TextureType::ColorArray1D);
|
||||
break;
|
||||
case VideoCommon::ImageViewType::e2D:
|
||||
case VideoCommon::ImageViewType::e2DArray:
|
||||
create(VideoCommon::ImageViewType::e2D, 1);
|
||||
create(VideoCommon::ImageViewType::e2DArray, std::nullopt);
|
||||
render_target = Handle(VideoCommon::ImageViewType::e2DArray);
|
||||
create(TextureType::Color2D, 1);
|
||||
create(TextureType::ColorArray2D, std::nullopt);
|
||||
render_target = Handle(Shader::TextureType::ColorArray2D);
|
||||
break;
|
||||
case VideoCommon::ImageViewType::e3D:
|
||||
create(VideoCommon::ImageViewType::e3D, std::nullopt);
|
||||
render_target = Handle(VideoCommon::ImageViewType::e3D);
|
||||
create(TextureType::Color3D, std::nullopt);
|
||||
render_target = Handle(Shader::TextureType::Color3D);
|
||||
break;
|
||||
case VideoCommon::ImageViewType::Cube:
|
||||
case VideoCommon::ImageViewType::CubeArray:
|
||||
create(VideoCommon::ImageViewType::Cube, 6);
|
||||
create(VideoCommon::ImageViewType::CubeArray, std::nullopt);
|
||||
create(TextureType::ColorCube, 6);
|
||||
create(TextureType::ColorArrayCube, std::nullopt);
|
||||
break;
|
||||
case VideoCommon::ImageViewType::Rect:
|
||||
UNIMPLEMENTED();
|
||||
break;
|
||||
case VideoCommon::ImageViewType::Buffer:
|
||||
buffer_view = device->GetLogical().CreateBufferView(VkBufferViewCreateInfo{
|
||||
.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.buffer = image.Buffer(),
|
||||
.format = format_info.format,
|
||||
.offset = 0, // TODO: Redesign buffer cache to support this
|
||||
.range = image.guest_size_bytes,
|
||||
});
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ImageView::ImageView(TextureCacheRuntime&, const VideoCommon::ImageInfo& info,
|
||||
const VideoCommon::ImageViewInfo& view_info, GPUVAddr gpu_addr_)
|
||||
: VideoCommon::ImageViewBase{info, view_info}, gpu_addr{gpu_addr_},
|
||||
buffer_size{VideoCommon::CalculateGuestSizeInBytes(info)} {}
|
||||
|
||||
ImageView::ImageView(TextureCacheRuntime&, const VideoCommon::NullImageParams& params)
|
||||
: VideoCommon::ImageViewBase{params} {}
|
||||
|
||||
@@ -1063,7 +1038,8 @@ VkImageView ImageView::DepthView() {
|
||||
if (depth_view) {
|
||||
return *depth_view;
|
||||
}
|
||||
depth_view = MakeDepthStencilView(VK_IMAGE_ASPECT_DEPTH_BIT);
|
||||
const auto& info = MaxwellToVK::SurfaceFormat(*device, FormatType::Optimal, true, format);
|
||||
depth_view = MakeView(info.format, VK_IMAGE_ASPECT_DEPTH_BIT);
|
||||
return *depth_view;
|
||||
}
|
||||
|
||||
@@ -1071,18 +1047,38 @@ VkImageView ImageView::StencilView() {
|
||||
if (stencil_view) {
|
||||
return *stencil_view;
|
||||
}
|
||||
stencil_view = MakeDepthStencilView(VK_IMAGE_ASPECT_STENCIL_BIT);
|
||||
const auto& info = MaxwellToVK::SurfaceFormat(*device, FormatType::Optimal, true, format);
|
||||
stencil_view = MakeView(info.format, VK_IMAGE_ASPECT_STENCIL_BIT);
|
||||
return *stencil_view;
|
||||
}
|
||||
|
||||
vk::ImageView ImageView::MakeDepthStencilView(VkImageAspectFlags aspect_mask) {
|
||||
VkImageView ImageView::StorageView(Shader::TextureType texture_type,
|
||||
Shader::ImageFormat image_format) {
|
||||
if (image_format == Shader::ImageFormat::Typeless) {
|
||||
return Handle(texture_type);
|
||||
}
|
||||
const bool is_signed{image_format == Shader::ImageFormat::R8_SINT ||
|
||||
image_format == Shader::ImageFormat::R16_SINT};
|
||||
if (!storage_views) {
|
||||
storage_views = std::make_unique<StorageViews>();
|
||||
}
|
||||
auto& views{is_signed ? storage_views->signeds : storage_views->unsigneds};
|
||||
auto& view{views[static_cast<size_t>(texture_type)]};
|
||||
if (view) {
|
||||
return *view;
|
||||
}
|
||||
view = MakeView(Format(image_format), VK_IMAGE_ASPECT_COLOR_BIT);
|
||||
return *view;
|
||||
}
|
||||
|
||||
vk::ImageView ImageView::MakeView(VkFormat vk_format, VkImageAspectFlags aspect_mask) {
|
||||
return device->GetLogical().CreateImageView({
|
||||
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.image = image_handle,
|
||||
.viewType = ImageViewType(type),
|
||||
.format = MaxwellToVK::SurfaceFormat(*device, FormatType::Optimal, true, format).format,
|
||||
.format = vk_format,
|
||||
.components{
|
||||
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
|
||||
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
|
||||
@@ -1146,7 +1142,6 @@ Sampler::Sampler(TextureCacheRuntime& runtime, const Tegra::Texture::TSCEntry& t
|
||||
|
||||
Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM_RT> color_buffers,
|
||||
ImageView* depth_buffer, const VideoCommon::RenderTargets& key) {
|
||||
std::vector<VkAttachmentDescription> descriptions;
|
||||
std::vector<VkImageView> attachments;
|
||||
RenderPassKey renderpass_key{};
|
||||
s32 num_layers = 1;
|
||||
@@ -1157,7 +1152,6 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
|
||||
renderpass_key.color_formats[index] = PixelFormat::Invalid;
|
||||
continue;
|
||||
}
|
||||
descriptions.push_back(AttachmentDescription(runtime.device, color_buffer));
|
||||
attachments.push_back(color_buffer->RenderTarget());
|
||||
renderpass_key.color_formats[index] = color_buffer->format;
|
||||
num_layers = std::max(num_layers, color_buffer->range.extent.layers);
|
||||
@@ -1167,10 +1161,7 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
|
||||
++num_images;
|
||||
}
|
||||
const size_t num_colors = attachments.size();
|
||||
const VkAttachmentReference* depth_attachment =
|
||||
depth_buffer ? &ATTACHMENT_REFERENCES[num_colors] : nullptr;
|
||||
if (depth_buffer) {
|
||||
descriptions.push_back(AttachmentDescription(runtime.device, depth_buffer));
|
||||
attachments.push_back(depth_buffer->RenderTarget());
|
||||
renderpass_key.depth_format = depth_buffer->format;
|
||||
num_layers = std::max(num_layers, depth_buffer->range.extent.layers);
|
||||
@@ -1183,40 +1174,14 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
|
||||
}
|
||||
renderpass_key.samples = samples;
|
||||
|
||||
const auto& device = runtime.device.GetLogical();
|
||||
const auto [cache_pair, is_new] = runtime.renderpass_cache.try_emplace(renderpass_key);
|
||||
if (is_new) {
|
||||
const VkSubpassDescription subpass{
|
||||
.flags = 0,
|
||||
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
|
||||
.inputAttachmentCount = 0,
|
||||
.pInputAttachments = nullptr,
|
||||
.colorAttachmentCount = static_cast<u32>(num_colors),
|
||||
.pColorAttachments = num_colors != 0 ? ATTACHMENT_REFERENCES.data() : nullptr,
|
||||
.pResolveAttachments = nullptr,
|
||||
.pDepthStencilAttachment = depth_attachment,
|
||||
.preserveAttachmentCount = 0,
|
||||
.pPreserveAttachments = nullptr,
|
||||
};
|
||||
cache_pair->second = device.CreateRenderPass(VkRenderPassCreateInfo{
|
||||
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.attachmentCount = static_cast<u32>(descriptions.size()),
|
||||
.pAttachments = descriptions.data(),
|
||||
.subpassCount = 1,
|
||||
.pSubpasses = &subpass,
|
||||
.dependencyCount = 0,
|
||||
.pDependencies = nullptr,
|
||||
});
|
||||
}
|
||||
renderpass = *cache_pair->second;
|
||||
renderpass = runtime.render_pass_cache.Get(renderpass_key);
|
||||
|
||||
render_area = VkExtent2D{
|
||||
.width = key.size.width,
|
||||
.height = key.size.height,
|
||||
};
|
||||
num_color_buffers = static_cast<u32>(num_colors);
|
||||
framebuffer = device.CreateFramebuffer(VkFramebufferCreateInfo{
|
||||
framebuffer = runtime.device.GetLogical().CreateFramebuffer({
|
||||
.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
|
@@ -7,6 +7,7 @@
|
||||
#include <compare>
|
||||
#include <span>
|
||||
|
||||
#include "shader_recompiler/shader_info.h"
|
||||
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
|
||||
#include "video_core/texture_cache/texture_cache.h"
|
||||
#include "video_core/vulkan_common/vulkan_memory_allocator.h"
|
||||
@@ -26,35 +27,10 @@ class Device;
|
||||
class Image;
|
||||
class ImageView;
|
||||
class Framebuffer;
|
||||
class RenderPassCache;
|
||||
class StagingBufferPool;
|
||||
class VKScheduler;
|
||||
|
||||
struct RenderPassKey {
|
||||
constexpr auto operator<=>(const RenderPassKey&) const noexcept = default;
|
||||
|
||||
std::array<PixelFormat, NUM_RT> color_formats;
|
||||
PixelFormat depth_format;
|
||||
VkSampleCountFlagBits samples;
|
||||
};
|
||||
|
||||
} // namespace Vulkan
|
||||
|
||||
namespace std {
|
||||
template <>
|
||||
struct hash<Vulkan::RenderPassKey> {
|
||||
[[nodiscard]] constexpr size_t operator()(const Vulkan::RenderPassKey& key) const noexcept {
|
||||
size_t value = static_cast<size_t>(key.depth_format) << 48;
|
||||
value ^= static_cast<size_t>(key.samples) << 52;
|
||||
for (size_t i = 0; i < key.color_formats.size(); ++i) {
|
||||
value ^= static_cast<size_t>(key.color_formats[i]) << (i * 6);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
};
|
||||
} // namespace std
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
struct TextureCacheRuntime {
|
||||
const Device& device;
|
||||
VKScheduler& scheduler;
|
||||
@@ -62,13 +38,13 @@ struct TextureCacheRuntime {
|
||||
StagingBufferPool& staging_buffer_pool;
|
||||
BlitImageHelper& blit_image_helper;
|
||||
ASTCDecoderPass& astc_decoder_pass;
|
||||
std::unordered_map<RenderPassKey, vk::RenderPass> renderpass_cache{};
|
||||
RenderPassCache& render_pass_cache;
|
||||
|
||||
void Finish();
|
||||
|
||||
[[nodiscard]] StagingBufferRef UploadStagingBuffer(size_t size);
|
||||
StagingBufferRef UploadStagingBuffer(size_t size);
|
||||
|
||||
[[nodiscard]] StagingBufferRef DownloadStagingBuffer(size_t size);
|
||||
StagingBufferRef DownloadStagingBuffer(size_t size);
|
||||
|
||||
void BlitImage(Framebuffer* dst_framebuffer, ImageView& dst, ImageView& src,
|
||||
const Region2D& dst_region, const Region2D& src_region,
|
||||
@@ -79,7 +55,7 @@ struct TextureCacheRuntime {
|
||||
|
||||
void ConvertImage(Framebuffer* dst, ImageView& dst_view, ImageView& src_view);
|
||||
|
||||
[[nodiscard]] bool CanAccelerateImageUpload(Image&) const noexcept {
|
||||
bool CanAccelerateImageUpload(Image&) const noexcept {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -117,8 +93,6 @@ public:
|
||||
void UploadMemory(const StagingBufferRef& map,
|
||||
std::span<const VideoCommon::BufferImageCopy> copies);
|
||||
|
||||
void UploadMemory(const StagingBufferRef& map, std::span<const VideoCommon::BufferCopy> copies);
|
||||
|
||||
void DownloadMemory(const StagingBufferRef& map,
|
||||
std::span<const VideoCommon::BufferImageCopy> copies);
|
||||
|
||||
@@ -126,10 +100,6 @@ public:
|
||||
return *image;
|
||||
}
|
||||
|
||||
[[nodiscard]] VkBuffer Buffer() const noexcept {
|
||||
return *buffer;
|
||||
}
|
||||
|
||||
[[nodiscard]] VkImageAspectFlags AspectMask() const noexcept {
|
||||
return aspect_mask;
|
||||
}
|
||||
@@ -146,7 +116,6 @@ public:
|
||||
private:
|
||||
VKScheduler* scheduler;
|
||||
vk::Image image;
|
||||
vk::Buffer buffer;
|
||||
MemoryCommit commit;
|
||||
vk::ImageView image_view;
|
||||
std::vector<vk::ImageView> storage_image_views;
|
||||
@@ -157,18 +126,19 @@ private:
|
||||
class ImageView : public VideoCommon::ImageViewBase {
|
||||
public:
|
||||
explicit ImageView(TextureCacheRuntime&, const VideoCommon::ImageViewInfo&, ImageId, Image&);
|
||||
explicit ImageView(TextureCacheRuntime&, const VideoCommon::ImageInfo&,
|
||||
const VideoCommon::ImageViewInfo&, GPUVAddr);
|
||||
explicit ImageView(TextureCacheRuntime&, const VideoCommon::NullImageParams&);
|
||||
|
||||
[[nodiscard]] VkImageView DepthView();
|
||||
|
||||
[[nodiscard]] VkImageView StencilView();
|
||||
|
||||
[[nodiscard]] VkImageView Handle(VideoCommon::ImageViewType query_type) const noexcept {
|
||||
return *image_views[static_cast<size_t>(query_type)];
|
||||
}
|
||||
[[nodiscard]] VkImageView StorageView(Shader::TextureType texture_type,
|
||||
Shader::ImageFormat image_format);
|
||||
|
||||
[[nodiscard]] VkBufferView BufferView() const noexcept {
|
||||
return *buffer_view;
|
||||
[[nodiscard]] VkImageView Handle(Shader::TextureType texture_type) const noexcept {
|
||||
return *image_views[static_cast<size_t>(texture_type)];
|
||||
}
|
||||
|
||||
[[nodiscard]] VkImage ImageHandle() const noexcept {
|
||||
@@ -179,26 +149,36 @@ public:
|
||||
return render_target;
|
||||
}
|
||||
|
||||
[[nodiscard]] PixelFormat ImageFormat() const noexcept {
|
||||
return image_format;
|
||||
}
|
||||
|
||||
[[nodiscard]] VkSampleCountFlagBits Samples() const noexcept {
|
||||
return samples;
|
||||
}
|
||||
|
||||
[[nodiscard]] GPUVAddr GpuAddr() const noexcept {
|
||||
return gpu_addr;
|
||||
}
|
||||
|
||||
[[nodiscard]] u32 BufferSize() const noexcept {
|
||||
return buffer_size;
|
||||
}
|
||||
|
||||
private:
|
||||
[[nodiscard]] vk::ImageView MakeDepthStencilView(VkImageAspectFlags aspect_mask);
|
||||
struct StorageViews {
|
||||
std::array<vk::ImageView, Shader::NUM_TEXTURE_TYPES> signeds;
|
||||
std::array<vk::ImageView, Shader::NUM_TEXTURE_TYPES> unsigneds;
|
||||
};
|
||||
|
||||
[[nodiscard]] vk::ImageView MakeView(VkFormat vk_format, VkImageAspectFlags aspect_mask);
|
||||
|
||||
const Device* device = nullptr;
|
||||
std::array<vk::ImageView, VideoCommon::NUM_IMAGE_VIEW_TYPES> image_views;
|
||||
std::array<vk::ImageView, Shader::NUM_TEXTURE_TYPES> image_views;
|
||||
std::unique_ptr<StorageViews> storage_views;
|
||||
vk::ImageView depth_view;
|
||||
vk::ImageView stencil_view;
|
||||
vk::BufferView buffer_view;
|
||||
VkImage image_handle = VK_NULL_HANDLE;
|
||||
VkImageView render_target = VK_NULL_HANDLE;
|
||||
PixelFormat image_format = PixelFormat::Invalid;
|
||||
VkSampleCountFlagBits samples = VK_SAMPLE_COUNT_1_BIT;
|
||||
GPUVAddr gpu_addr = 0;
|
||||
u32 buffer_size = 0;
|
||||
};
|
||||
|
||||
class ImageAlloc : public VideoCommon::ImageAllocBase {};
|
||||
|
@@ -15,7 +15,9 @@
|
||||
namespace Vulkan {
|
||||
|
||||
VKUpdateDescriptorQueue::VKUpdateDescriptorQueue(const Device& device_, VKScheduler& scheduler_)
|
||||
: device{device_}, scheduler{scheduler_} {}
|
||||
: device{device_}, scheduler{scheduler_} {
|
||||
payload_cursor = payload.data();
|
||||
}
|
||||
|
||||
VKUpdateDescriptorQueue::~VKUpdateDescriptorQueue() = default;
|
||||
|
||||
@@ -36,13 +38,4 @@ void VKUpdateDescriptorQueue::Acquire() {
|
||||
upload_start = payload_cursor;
|
||||
}
|
||||
|
||||
void VKUpdateDescriptorQueue::Send(VkDescriptorUpdateTemplateKHR update_template,
|
||||
VkDescriptorSet set) {
|
||||
const void* const data = upload_start;
|
||||
const vk::Device* const logical = &device.GetLogical();
|
||||
scheduler.Record([data, logical, set, update_template](vk::CommandBuffer) {
|
||||
logical->UpdateDescriptorSet(set, update_template, data);
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
||||
|
@@ -39,7 +39,9 @@ public:
|
||||
|
||||
void Acquire();
|
||||
|
||||
void Send(VkDescriptorUpdateTemplateKHR update_template, VkDescriptorSet set);
|
||||
const DescriptorUpdateEntry* UpdateData() const noexcept {
|
||||
return upload_start;
|
||||
}
|
||||
|
||||
void AddSampledImage(VkImageView image_view, VkSampler sampler) {
|
||||
*(payload_cursor++) = VkDescriptorImageInfo{
|
||||
|
Reference in New Issue
Block a user