From 210a322d7457f4fa4905d2cbafa69b3100951f2e Mon Sep 17 00:00:00 2001 From: pineappleEA Date: Wed, 6 Jan 2021 04:42:38 +0100 Subject: [PATCH] early-access version 1291 --- README.md | 2 +- src/core/CMakeLists.txt | 2 + src/core/crypto/key_manager.cpp | 11 +- src/core/file_sys/nca_patch.cpp | 2 +- src/core/file_sys/registered_cache.cpp | 3 +- .../hle/kernel/memory/address_space_info.cpp | 2 + .../hle/service/sockets/sockets_translate.cpp | 1 + src/video_core/CMakeLists.txt | 4 +- .../renderer_vulkan/renderer_vulkan.cpp | 10 +- .../renderer_vulkan/renderer_vulkan.h | 4 +- .../renderer_vulkan/vk_blit_screen.cpp | 22 +- .../renderer_vulkan/vk_blit_screen.h | 10 +- .../renderer_vulkan/vk_buffer_cache.cpp | 40 ++- .../renderer_vulkan/vk_buffer_cache.h | 22 +- .../renderer_vulkan/vk_compute_pass.cpp | 32 +-- .../renderer_vulkan/vk_compute_pass.h | 15 +- .../renderer_vulkan/vk_rasterizer.cpp | 14 +- .../renderer_vulkan/vk_rasterizer.h | 10 +- .../vk_staging_buffer_pool.cpp | 124 ++++---- .../renderer_vulkan/vk_staging_buffer_pool.h | 62 ++-- .../renderer_vulkan/vk_texture_cache.cpp | 20 +- .../renderer_vulkan/vk_texture_cache.h | 22 +- .../vulkan_common/vulkan_memory_allocator.cpp | 268 ++++++++++++++++++ .../vulkan_common/vulkan_memory_allocator.h | 118 ++++++++ 24 files changed, 621 insertions(+), 199 deletions(-) create mode 100755 src/video_core/vulkan_common/vulkan_memory_allocator.cpp create mode 100755 src/video_core/vulkan_common/vulkan_memory_allocator.h diff --git a/README.md b/README.md index 2572e410e..757752d9a 100755 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ yuzu emulator early access ============= -This is the source code for early-access 1290. +This is the source code for early-access 1291. ## Legal Notice diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index fbca16015..7016e1a67 100755 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -634,6 +634,8 @@ if (MSVC) /we4267 # 'context' : truncation from 'type1' to 'type2' /we4305 + # 'function' : not all control paths return a value + /we4715 ) else() target_compile_options(core PRIVATE diff --git a/src/core/crypto/key_manager.cpp b/src/core/crypto/key_manager.cpp index da15f764a..cebe2ce37 100755 --- a/src/core/crypto/key_manager.cpp +++ b/src/core/crypto/key_manager.cpp @@ -143,6 +143,7 @@ u64 GetSignatureTypeDataSize(SignatureType type) { return 0x3C; } UNREACHABLE(); + return 0; } u64 GetSignatureTypePaddingSize(SignatureType type) { @@ -157,6 +158,7 @@ u64 GetSignatureTypePaddingSize(SignatureType type) { return 0x40; } UNREACHABLE(); + return 0; } SignatureType Ticket::GetSignatureType() const { @@ -169,8 +171,7 @@ SignatureType Ticket::GetSignatureType() const { if (const auto* ticket = std::get_if(&data)) { return ticket->sig_type; } - - UNREACHABLE(); + throw std::bad_variant_access{}; } TicketData& Ticket::GetData() { @@ -183,8 +184,7 @@ TicketData& Ticket::GetData() { if (auto* ticket = std::get_if(&data)) { return ticket->data; } - - UNREACHABLE(); + throw std::bad_variant_access{}; } const TicketData& Ticket::GetData() const { @@ -197,8 +197,7 @@ const TicketData& Ticket::GetData() const { if (const auto* ticket = std::get_if(&data)) { return ticket->data; } - - UNREACHABLE(); + throw std::bad_variant_access{}; } u64 Ticket::GetSize() const { diff --git a/src/core/file_sys/nca_patch.cpp b/src/core/file_sys/nca_patch.cpp index adcf0732f..a65ec6798 100755 --- a/src/core/file_sys/nca_patch.cpp +++ b/src/core/file_sys/nca_patch.cpp @@ -51,8 +51,8 @@ std::pair SearchBucketEntry(u64 offset, const BlockTyp low = mid + 1; } } - UNREACHABLE_MSG("Offset could not be found in BKTR block."); + return {0, 0}; } } // Anonymous namespace diff --git a/src/core/file_sys/registered_cache.cpp b/src/core/file_sys/registered_cache.cpp index da01002d5..431302f55 100755 --- a/src/core/file_sys/registered_cache.cpp +++ b/src/core/file_sys/registered_cache.cpp @@ -105,7 +105,8 @@ ContentRecordType GetCRTypeFromNCAType(NCAContentType type) { // TODO(DarkLordZach): Peek at NCA contents to differentiate Manual and Legal. return ContentRecordType::HtmlDocument; default: - UNREACHABLE_MSG("Invalid NCAContentType={:02X}", static_cast(type)); + UNREACHABLE_MSG("Invalid NCAContentType={:02X}", type); + return ContentRecordType{}; } } diff --git a/src/core/hle/kernel/memory/address_space_info.cpp b/src/core/hle/kernel/memory/address_space_info.cpp index e4288cab4..6cf43ba24 100755 --- a/src/core/hle/kernel/memory/address_space_info.cpp +++ b/src/core/hle/kernel/memory/address_space_info.cpp @@ -96,6 +96,7 @@ u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) { return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address; } UNREACHABLE(); + return 0; } std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) { @@ -112,6 +113,7 @@ std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size; } UNREACHABLE(); + return 0; } } // namespace Kernel::Memory diff --git a/src/core/hle/service/sockets/sockets_translate.cpp b/src/core/hle/service/sockets/sockets_translate.cpp index c822d21b8..ca61d72ca 100755 --- a/src/core/hle/service/sockets/sockets_translate.cpp +++ b/src/core/hle/service/sockets/sockets_translate.cpp @@ -64,6 +64,7 @@ Network::Type Translate(Type type) { return Network::Type::DGRAM; default: UNIMPLEMENTED_MSG("Unimplemented type={}", type); + return Network::Type{}; } } diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index c3d0f4c31..e59d6d0ad 100755 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -136,8 +136,6 @@ add_library(video_core STATIC renderer_vulkan/vk_graphics_pipeline.h renderer_vulkan/vk_master_semaphore.cpp renderer_vulkan/vk_master_semaphore.h - renderer_vulkan/vk_memory_manager.cpp - renderer_vulkan/vk_memory_manager.h renderer_vulkan/vk_pipeline_cache.cpp renderer_vulkan/vk_pipeline_cache.h renderer_vulkan/vk_query_cache.cpp @@ -260,6 +258,8 @@ add_library(video_core STATIC vulkan_common/vulkan_instance.h vulkan_common/vulkan_library.cpp vulkan_common/vulkan_library.h + vulkan_common/vulkan_memory_allocator.cpp + vulkan_common/vulkan_memory_allocator.h vulkan_common/vulkan_surface.cpp vulkan_common/vulkan_surface.h vulkan_common/vulkan_wrapper.cpp diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index d7437e185..61796e33a 100755 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp @@ -23,7 +23,6 @@ #include "video_core/renderer_vulkan/renderer_vulkan.h" #include "video_core/renderer_vulkan/vk_blit_screen.h" #include "video_core/renderer_vulkan/vk_master_semaphore.h" -#include "video_core/renderer_vulkan/vk_memory_manager.h" #include "video_core/renderer_vulkan/vk_rasterizer.h" #include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_state_tracker.h" @@ -32,6 +31,7 @@ #include "video_core/vulkan_common/vulkan_device.h" #include "video_core/vulkan_common/vulkan_instance.h" #include "video_core/vulkan_common/vulkan_library.h" +#include "video_core/vulkan_common/vulkan_memory_allocator.h" #include "video_core/vulkan_common/vulkan_surface.h" #include "video_core/vulkan_common/vulkan_wrapper.h" @@ -137,7 +137,7 @@ bool RendererVulkan::Init() try { InitializeDevice(); Report(); - memory_manager = std::make_unique(*device); + memory_allocator = std::make_unique(*device); state_tracker = std::make_unique(gpu); @@ -149,11 +149,11 @@ bool RendererVulkan::Init() try { rasterizer = std::make_unique(render_window, gpu, gpu.MemoryManager(), cpu_memory, screen_info, *device, - *memory_manager, *state_tracker, *scheduler); + *memory_allocator, *state_tracker, *scheduler); blit_screen = std::make_unique(cpu_memory, render_window, *rasterizer, *device, - *memory_manager, *swapchain, *scheduler, screen_info); + *memory_allocator, *swapchain, *scheduler, screen_info); return true; } catch (const vk::Exception& exception) { @@ -172,7 +172,7 @@ void RendererVulkan::ShutDown() { blit_screen.reset(); scheduler.reset(); swapchain.reset(); - memory_manager.reset(); + memory_allocator.reset(); device.reset(); } diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.h b/src/video_core/renderer_vulkan/renderer_vulkan.h index 5575ffc54..daf55b9b4 100755 --- a/src/video_core/renderer_vulkan/renderer_vulkan.h +++ b/src/video_core/renderer_vulkan/renderer_vulkan.h @@ -29,8 +29,8 @@ namespace Vulkan { class Device; class StateTracker; +class MemoryAllocator; class VKBlitScreen; -class VKMemoryManager; class VKSwapchain; class VKScheduler; @@ -75,7 +75,7 @@ private: vk::DebugUtilsMessenger debug_callback; std::unique_ptr device; - std::unique_ptr memory_manager; + std::unique_ptr memory_allocator; std::unique_ptr state_tracker; std::unique_ptr scheduler; std::unique_ptr swapchain; diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp index 5e184eb42..3e3b895e0 100755 --- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp +++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp @@ -22,13 +22,13 @@ #include "video_core/renderer_vulkan/renderer_vulkan.h" #include "video_core/renderer_vulkan/vk_blit_screen.h" #include "video_core/renderer_vulkan/vk_master_semaphore.h" -#include "video_core/renderer_vulkan/vk_memory_manager.h" #include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_shader_util.h" #include "video_core/renderer_vulkan/vk_swapchain.h" #include "video_core/surface.h" #include "video_core/textures/decoders.h" #include "video_core/vulkan_common/vulkan_device.h" +#include "video_core/vulkan_common/vulkan_memory_allocator.h" #include "video_core/vulkan_common/vulkan_wrapper.h" namespace Vulkan { @@ -115,10 +115,10 @@ struct VKBlitScreen::BufferData { VKBlitScreen::VKBlitScreen(Core::Memory::Memory& cpu_memory_, Core::Frontend::EmuWindow& render_window_, VideoCore::RasterizerInterface& rasterizer_, const Device& device_, - VKMemoryManager& memory_manager_, VKSwapchain& swapchain_, + MemoryAllocator& memory_allocator_, VKSwapchain& swapchain_, VKScheduler& scheduler_, const VKScreenInfo& screen_info_) : cpu_memory{cpu_memory_}, render_window{render_window_}, rasterizer{rasterizer_}, - device{device_}, memory_manager{memory_manager_}, swapchain{swapchain_}, + device{device_}, memory_allocator{memory_allocator_}, swapchain{swapchain_}, scheduler{scheduler_}, image_count{swapchain.GetImageCount()}, screen_info{screen_info_} { resource_ticks.resize(image_count); @@ -150,8 +150,8 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool SetUniformData(data, framebuffer); SetVertexData(data, framebuffer); - auto map = buffer_commit->Map(); - std::memcpy(map.Address(), &data, sizeof(data)); + const std::span map = buffer_commit.Map(); + std::memcpy(map.data(), &data, sizeof(data)); if (!use_accelerated) { const u64 image_offset = GetRawImageOffset(framebuffer, image_index); @@ -165,8 +165,8 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool constexpr u32 block_height_log2 = 4; const u32 bytes_per_pixel = GetBytesPerPixel(framebuffer); Tegra::Texture::UnswizzleTexture( - std::span(map.Address() + image_offset, size_bytes), std::span(host_ptr, size_bytes), - bytes_per_pixel, framebuffer.width, framebuffer.height, 1, block_height_log2, 0); + map.subspan(image_offset, size_bytes), std::span(host_ptr, size_bytes), bytes_per_pixel, + framebuffer.width, framebuffer.height, 1, block_height_log2, 0); const VkBufferImageCopy copy{ .bufferOffset = image_offset, @@ -224,8 +224,6 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, write_barrier); }); } - map.Release(); - scheduler.Record([renderpass = *renderpass, framebuffer = *framebuffers[image_index], descriptor_set = descriptor_sets[image_index], buffer = *buffer, size = swapchain.GetSize(), pipeline = *pipeline, @@ -642,7 +640,7 @@ void VKBlitScreen::ReleaseRawImages() { raw_images.clear(); raw_buffer_commits.clear(); buffer.reset(); - buffer_commit.reset(); + buffer_commit = MemoryCommit{}; } void VKBlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer) { @@ -659,7 +657,7 @@ void VKBlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuff }; buffer = device.GetLogical().CreateBuffer(ci); - buffer_commit = memory_manager.Commit(buffer, true); + buffer_commit = memory_allocator.Commit(buffer, MemoryUsage::Upload); } void VKBlitScreen::CreateRawImages(const Tegra::FramebufferConfig& framebuffer) { @@ -690,7 +688,7 @@ void VKBlitScreen::CreateRawImages(const Tegra::FramebufferConfig& framebuffer) .pQueueFamilyIndices = nullptr, .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, }); - raw_buffer_commits[i] = memory_manager.Commit(raw_images[i], false); + raw_buffer_commits[i] = memory_allocator.Commit(raw_images[i], MemoryUsage::DeviceLocal); raw_image_views[i] = device.GetLogical().CreateImageView(VkImageViewCreateInfo{ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, .pNext = nullptr, diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h index 69ed61770..b52576957 100755 --- a/src/video_core/renderer_vulkan/vk_blit_screen.h +++ b/src/video_core/renderer_vulkan/vk_blit_screen.h @@ -6,7 +6,7 @@ #include -#include "video_core/renderer_vulkan/vk_memory_manager.h" +#include "video_core/vulkan_common/vulkan_memory_allocator.h" #include "video_core/vulkan_common/vulkan_wrapper.h" namespace Core { @@ -43,7 +43,7 @@ public: explicit VKBlitScreen(Core::Memory::Memory& cpu_memory, Core::Frontend::EmuWindow& render_window, VideoCore::RasterizerInterface& rasterizer, const Device& device, - VKMemoryManager& memory_manager, VKSwapchain& swapchain, + MemoryAllocator& memory_allocator, VKSwapchain& swapchain, VKScheduler& scheduler, const VKScreenInfo& screen_info); ~VKBlitScreen(); @@ -86,7 +86,7 @@ private: Core::Frontend::EmuWindow& render_window; VideoCore::RasterizerInterface& rasterizer; const Device& device; - VKMemoryManager& memory_manager; + MemoryAllocator& memory_allocator; VKSwapchain& swapchain; VKScheduler& scheduler; const std::size_t image_count; @@ -104,14 +104,14 @@ private: vk::Sampler sampler; vk::Buffer buffer; - VKMemoryCommit buffer_commit; + MemoryCommit buffer_commit; std::vector resource_ticks; std::vector semaphores; std::vector raw_images; std::vector raw_image_views; - std::vector raw_buffer_commits; + std::vector raw_buffer_commits; u32 raw_width = 0; u32 raw_height = 0; }; diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp index 4d517c547..e1714cea9 100755 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp @@ -40,11 +40,11 @@ std::unique_ptr CreateStreamBuffer(const Device& device, VKSched } // Anonymous namespace -Buffer::Buffer(const Device& device_, VKMemoryManager& memory_manager, VKScheduler& scheduler_, - VKStagingBufferPool& staging_pool_, VAddr cpu_addr_, std::size_t size_) +Buffer::Buffer(const Device& device_, MemoryAllocator& memory_allocator, VKScheduler& scheduler_, + StagingBufferPool& staging_pool_, VAddr cpu_addr_, std::size_t size_) : BufferBlock{cpu_addr_, size_}, device{device_}, scheduler{scheduler_}, staging_pool{ staging_pool_} { - const VkBufferCreateInfo ci{ + buffer = device.GetLogical().CreateBuffer(VkBufferCreateInfo{ .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, .pNext = nullptr, .flags = 0, @@ -53,22 +53,20 @@ Buffer::Buffer(const Device& device_, VKMemoryManager& memory_manager, VKSchedul .sharingMode = VK_SHARING_MODE_EXCLUSIVE, .queueFamilyIndexCount = 0, .pQueueFamilyIndices = nullptr, - }; - - buffer.handle = device.GetLogical().CreateBuffer(ci); - buffer.commit = memory_manager.Commit(buffer.handle, false); + }); + commit = memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal); } Buffer::~Buffer() = default; void Buffer::Upload(std::size_t offset, std::size_t data_size, const u8* data) { - const auto& staging = staging_pool.GetUnusedBuffer(data_size, true); - std::memcpy(staging.commit->Map(data_size), data, data_size); + const auto& staging = staging_pool.Request(data_size, MemoryUsage::Upload); + std::memcpy(staging.mapped_span.data(), data, data_size); scheduler.RequestOutsideRenderPassOperationContext(); const VkBuffer handle = Handle(); - scheduler.Record([staging = *staging.handle, handle, offset, data_size, + scheduler.Record([staging = staging.buffer, handle, offset, data_size, &device = device](vk::CommandBuffer cmdbuf) { const VkBufferMemoryBarrier read_barrier{ .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, @@ -104,12 +102,12 @@ void Buffer::Upload(std::size_t offset, std::size_t data_size, const u8* data) { } void Buffer::Download(std::size_t offset, std::size_t data_size, u8* data) { - const auto& staging = staging_pool.GetUnusedBuffer(data_size, true); + auto staging = staging_pool.Request(data_size, MemoryUsage::Download); scheduler.RequestOutsideRenderPassOperationContext(); const VkBuffer handle = Handle(); scheduler.Record( - [staging = *staging.handle, handle, offset, data_size](vk::CommandBuffer cmdbuf) { + [staging = staging.buffer, handle, offset, data_size](vk::CommandBuffer cmdbuf) { const VkBufferMemoryBarrier barrier{ .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, .pNext = nullptr, @@ -130,7 +128,7 @@ void Buffer::Download(std::size_t offset, std::size_t data_size, u8* data) { }); scheduler.Finish(); - std::memcpy(data, staging.commit->Map(data_size), data_size); + std::memcpy(data, staging.mapped_span.data(), data_size); } void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst_offset, @@ -168,29 +166,29 @@ void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer_, Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, - const Device& device_, VKMemoryManager& memory_manager_, + const Device& device_, MemoryAllocator& memory_allocator_, VKScheduler& scheduler_, VKStreamBuffer& stream_buffer_, - VKStagingBufferPool& staging_pool_) + StagingBufferPool& staging_pool_) : VideoCommon::BufferCache{rasterizer_, gpu_memory_, cpu_memory_, stream_buffer_}, - device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_}, staging_pool{ - staging_pool_} {} + device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_}, + staging_pool{staging_pool_} {} VKBufferCache::~VKBufferCache() = default; std::shared_ptr VKBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) { - return std::make_shared(device, memory_manager, scheduler, staging_pool, cpu_addr, + return std::make_shared(device, memory_allocator, scheduler, staging_pool, cpu_addr, size); } VKBufferCache::BufferInfo VKBufferCache::GetEmptyBuffer(std::size_t size) { size = std::max(size, std::size_t(4)); - const auto& empty = staging_pool.GetUnusedBuffer(size, false); + const auto& empty = staging_pool.Request(size, MemoryUsage::DeviceLocal); scheduler.RequestOutsideRenderPassOperationContext(); - scheduler.Record([size, buffer = *empty.handle](vk::CommandBuffer cmdbuf) { + scheduler.Record([size, buffer = empty.buffer](vk::CommandBuffer cmdbuf) { cmdbuf.FillBuffer(buffer, 0, size, 0); }); - return {*empty.handle, 0, 0}; + return {empty.buffer, 0, 0}; } } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h index 1c39aed34..41d577510 100755 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.h +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h @@ -8,21 +8,20 @@ #include "common/common_types.h" #include "video_core/buffer_cache/buffer_cache.h" -#include "video_core/renderer_vulkan/vk_memory_manager.h" #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" #include "video_core/renderer_vulkan/vk_stream_buffer.h" +#include "video_core/vulkan_common/vulkan_memory_allocator.h" #include "video_core/vulkan_common/vulkan_wrapper.h" namespace Vulkan { class Device; -class VKMemoryManager; class VKScheduler; class Buffer final : public VideoCommon::BufferBlock { public: - explicit Buffer(const Device& device, VKMemoryManager& memory_manager, VKScheduler& scheduler, - VKStagingBufferPool& staging_pool, VAddr cpu_addr_, std::size_t size_); + explicit Buffer(const Device& device, MemoryAllocator& memory_allocator, VKScheduler& scheduler, + StagingBufferPool& staging_pool, VAddr cpu_addr_, std::size_t size_); ~Buffer(); void Upload(std::size_t offset, std::size_t data_size, const u8* data); @@ -33,7 +32,7 @@ public: std::size_t copy_size); VkBuffer Handle() const { - return *buffer.handle; + return *buffer; } u64 Address() const { @@ -43,18 +42,19 @@ public: private: const Device& device; VKScheduler& scheduler; - VKStagingBufferPool& staging_pool; + StagingBufferPool& staging_pool; - VKBuffer buffer; + vk::Buffer buffer; + MemoryCommit commit; }; class VKBufferCache final : public VideoCommon::BufferCache { public: explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory, - const Device& device, VKMemoryManager& memory_manager, + const Device& device, MemoryAllocator& memory_allocator, VKScheduler& scheduler, VKStreamBuffer& stream_buffer, - VKStagingBufferPool& staging_pool); + StagingBufferPool& staging_pool); ~VKBufferCache(); BufferInfo GetEmptyBuffer(std::size_t size) override; @@ -64,9 +64,9 @@ protected: private: const Device& device; - VKMemoryManager& memory_manager; + MemoryAllocator& memory_allocator; VKScheduler& scheduler; - VKStagingBufferPool& staging_pool; + StagingBufferPool& staging_pool; }; } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp index 02a6d54b7..5eb6a54be 100755 --- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp +++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp @@ -164,7 +164,7 @@ VkDescriptorSet VKComputePass::CommitDescriptorSet( QuadArrayPass::QuadArrayPass(const Device& device_, VKScheduler& scheduler_, VKDescriptorPool& descriptor_pool_, - VKStagingBufferPool& staging_buffer_pool_, + StagingBufferPool& staging_buffer_pool_, VKUpdateDescriptorQueue& update_descriptor_queue_) : VKComputePass(device_, descriptor_pool_, BuildQuadArrayPassDescriptorSetLayoutBinding(), BuildQuadArrayPassDescriptorUpdateTemplateEntry(), @@ -177,18 +177,18 @@ QuadArrayPass::~QuadArrayPass() = default; std::pair QuadArrayPass::Assemble(u32 num_vertices, u32 first) { const u32 num_triangle_vertices = (num_vertices / 4) * 6; const std::size_t staging_size = num_triangle_vertices * sizeof(u32); - auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); + const auto staging_ref = staging_buffer_pool.Request(staging_size, MemoryUsage::DeviceLocal); update_descriptor_queue.Acquire(); - update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size); + update_descriptor_queue.AddBuffer(staging_ref.buffer, 0, staging_size); const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); scheduler.RequestOutsideRenderPassOperationContext(); ASSERT(num_vertices % 4 == 0); const u32 num_quads = num_vertices / 4; - scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, num_quads, - first, set](vk::CommandBuffer cmdbuf) { + scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging_ref.buffer, + num_quads, first, set](vk::CommandBuffer cmdbuf) { constexpr u32 dispatch_size = 1024; cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline); cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {}); @@ -208,11 +208,11 @@ std::pair QuadArrayPass::Assemble(u32 num_vertices, u32 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, {barrier}, {}); }); - return {*buffer.handle, 0}; + return {staging_ref.buffer, 0}; } Uint8Pass::Uint8Pass(const Device& device, VKScheduler& scheduler_, - VKDescriptorPool& descriptor_pool, VKStagingBufferPool& staging_buffer_pool_, + VKDescriptorPool& descriptor_pool, StagingBufferPool& staging_buffer_pool_, VKUpdateDescriptorQueue& update_descriptor_queue_) : VKComputePass(device, descriptor_pool, BuildInputOutputDescriptorSetBindings(), BuildInputOutputDescriptorUpdateTemplate(), {}, VULKAN_UINT8_COMP_SPV), @@ -224,15 +224,15 @@ Uint8Pass::~Uint8Pass() = default; std::pair Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer, u64 src_offset) { const u32 staging_size = static_cast(num_vertices * sizeof(u16)); - auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); + const auto staging_ref = staging_buffer_pool.Request(staging_size, MemoryUsage::DeviceLocal); update_descriptor_queue.Acquire(); update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices); - update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size); + update_descriptor_queue.AddBuffer(staging_ref.buffer, 0, staging_size); const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); scheduler.RequestOutsideRenderPassOperationContext(); - scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set, + scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging_ref.buffer, set, num_vertices](vk::CommandBuffer cmdbuf) { constexpr u32 dispatch_size = 1024; cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline); @@ -252,12 +252,12 @@ std::pair Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buff cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {}); }); - return {*buffer.handle, 0}; + return {staging_ref.buffer, 0}; } QuadIndexedPass::QuadIndexedPass(const Device& device_, VKScheduler& scheduler_, VKDescriptorPool& descriptor_pool_, - VKStagingBufferPool& staging_buffer_pool_, + StagingBufferPool& staging_buffer_pool_, VKUpdateDescriptorQueue& update_descriptor_queue_) : VKComputePass(device_, descriptor_pool_, BuildInputOutputDescriptorSetBindings(), BuildInputOutputDescriptorUpdateTemplate(), @@ -286,15 +286,15 @@ std::pair QuadIndexedPass::Assemble( const u32 num_tri_vertices = (num_vertices / 4) * 6; const std::size_t staging_size = num_tri_vertices * sizeof(u32); - auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); + const auto staging_ref = staging_buffer_pool.Request(staging_size, MemoryUsage::DeviceLocal); update_descriptor_queue.Acquire(); update_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size); - update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size); + update_descriptor_queue.AddBuffer(staging_ref.buffer, 0, staging_size); const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); scheduler.RequestOutsideRenderPassOperationContext(); - scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set, + scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging_ref.buffer, set, num_tri_vertices, base_vertex, index_shift](vk::CommandBuffer cmdbuf) { static constexpr u32 dispatch_size = 1024; const std::array push_constants = {base_vertex, index_shift}; @@ -317,7 +317,7 @@ std::pair QuadIndexedPass::Assemble( cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {}); }); - return {*buffer.handle, 0}; + return {staging_ref.buffer, 0}; } } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h index 7ddb09afb..f5c6f5f17 100755 --- a/src/video_core/renderer_vulkan/vk_compute_pass.h +++ b/src/video_core/renderer_vulkan/vk_compute_pass.h @@ -16,8 +16,8 @@ namespace Vulkan { class Device; +class StagingBufferPool; class VKScheduler; -class VKStagingBufferPool; class VKUpdateDescriptorQueue; class VKComputePass { @@ -45,7 +45,7 @@ class QuadArrayPass final : public VKComputePass { public: explicit QuadArrayPass(const Device& device_, VKScheduler& scheduler_, VKDescriptorPool& descriptor_pool_, - VKStagingBufferPool& staging_buffer_pool_, + StagingBufferPool& staging_buffer_pool_, VKUpdateDescriptorQueue& update_descriptor_queue_); ~QuadArrayPass(); @@ -53,15 +53,14 @@ public: private: VKScheduler& scheduler; - VKStagingBufferPool& staging_buffer_pool; + StagingBufferPool& staging_buffer_pool; VKUpdateDescriptorQueue& update_descriptor_queue; }; class Uint8Pass final : public VKComputePass { public: explicit Uint8Pass(const Device& device_, VKScheduler& scheduler_, - VKDescriptorPool& descriptor_pool_, - VKStagingBufferPool& staging_buffer_pool_, + VKDescriptorPool& descriptor_pool_, StagingBufferPool& staging_buffer_pool_, VKUpdateDescriptorQueue& update_descriptor_queue_); ~Uint8Pass(); @@ -69,7 +68,7 @@ public: private: VKScheduler& scheduler; - VKStagingBufferPool& staging_buffer_pool; + StagingBufferPool& staging_buffer_pool; VKUpdateDescriptorQueue& update_descriptor_queue; }; @@ -77,7 +76,7 @@ class QuadIndexedPass final : public VKComputePass { public: explicit QuadIndexedPass(const Device& device_, VKScheduler& scheduler_, VKDescriptorPool& descriptor_pool_, - VKStagingBufferPool& staging_buffer_pool_, + StagingBufferPool& staging_buffer_pool_, VKUpdateDescriptorQueue& update_descriptor_queue_); ~QuadIndexedPass(); @@ -87,7 +86,7 @@ public: private: VKScheduler& scheduler; - VKStagingBufferPool& staging_buffer_pool; + StagingBufferPool& staging_buffer_pool; VKUpdateDescriptorQueue& update_descriptor_queue; }; diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 586b7ad27..7d609db97 100755 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -409,24 +409,24 @@ void RasterizerVulkan::DrawParameters::Draw(vk::CommandBuffer cmdbuf) const { RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, VKScreenInfo& screen_info_, - const Device& device_, VKMemoryManager& memory_manager_, + const Device& device_, MemoryAllocator& memory_allocator_, StateTracker& state_tracker_, VKScheduler& scheduler_) : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, gpu_memory{gpu_memory_}, maxwell3d{gpu.Maxwell3D()}, kepler_compute{gpu.KeplerCompute()}, - screen_info{screen_info_}, device{device_}, memory_manager{memory_manager_}, + screen_info{screen_info_}, device{device_}, memory_allocator{memory_allocator_}, state_tracker{state_tracker_}, scheduler{scheduler_}, stream_buffer(device, scheduler), - staging_pool(device, memory_manager, scheduler), descriptor_pool(device, scheduler), + staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler), update_descriptor_queue(device, scheduler), blit_image(device, scheduler, state_tracker, descriptor_pool), quad_array_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), quad_indexed_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), - texture_cache_runtime{device, scheduler, memory_manager, staging_pool, blit_image}, + texture_cache_runtime{device, scheduler, memory_allocator, staging_pool, blit_image}, texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory), pipeline_cache(*this, gpu, maxwell3d, kepler_compute, gpu_memory, device, scheduler, descriptor_pool, update_descriptor_queue), - buffer_cache(*this, gpu_memory, cpu_memory_, device, memory_manager, scheduler, stream_buffer, - staging_pool), + buffer_cache(*this, gpu_memory, cpu_memory_, device, memory_allocator, scheduler, + stream_buffer, staging_pool), query_cache{*this, maxwell3d, gpu_memory, device, scheduler}, fence_manager(*this, gpu, gpu_memory, texture_cache, buffer_cache, query_cache, device, scheduler), @@ -1463,7 +1463,7 @@ VkBuffer RasterizerVulkan::DefaultBuffer() { .queueFamilyIndexCount = 0, .pQueueFamilyIndices = nullptr, }); - default_buffer_commit = memory_manager.Commit(default_buffer, false); + default_buffer_commit = memory_allocator.Commit(default_buffer, MemoryUsage::DeviceLocal); scheduler.RequestOutsideRenderPassOperationContext(); scheduler.Record([buffer = *default_buffer](vk::CommandBuffer cmdbuf) { diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index e00fe3c40..8fbc25fd4 100755 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h @@ -21,7 +21,6 @@ #include "video_core/renderer_vulkan/vk_compute_pass.h" #include "video_core/renderer_vulkan/vk_descriptor_pool.h" #include "video_core/renderer_vulkan/vk_fence_manager.h" -#include "video_core/renderer_vulkan/vk_memory_manager.h" #include "video_core/renderer_vulkan/vk_pipeline_cache.h" #include "video_core/renderer_vulkan/vk_query_cache.h" #include "video_core/renderer_vulkan/vk_scheduler.h" @@ -30,6 +29,7 @@ #include "video_core/renderer_vulkan/vk_texture_cache.h" #include "video_core/renderer_vulkan/vk_update_descriptor.h" #include "video_core/shader/async_shaders.h" +#include "video_core/vulkan_common/vulkan_memory_allocator.h" #include "video_core/vulkan_common/vulkan_wrapper.h" namespace Core { @@ -56,7 +56,7 @@ public: explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, VKScreenInfo& screen_info_, const Device& device_, - VKMemoryManager& memory_manager_, StateTracker& state_tracker_, + MemoryAllocator& memory_allocator_, StateTracker& state_tracker_, VKScheduler& scheduler_); ~RasterizerVulkan() override; @@ -215,12 +215,12 @@ private: VKScreenInfo& screen_info; const Device& device; - VKMemoryManager& memory_manager; + MemoryAllocator& memory_allocator; StateTracker& state_tracker; VKScheduler& scheduler; VKStreamBuffer stream_buffer; - VKStagingBufferPool staging_pool; + StagingBufferPool staging_pool; VKDescriptorPool descriptor_pool; VKUpdateDescriptorQueue update_descriptor_queue; BlitImageHelper blit_image; @@ -236,7 +236,7 @@ private: VKFenceManager fence_manager; vk::Buffer default_buffer; - VKMemoryCommit default_buffer_commit; + MemoryCommit default_buffer_commit; vk::Event wfi_event; VideoCommon::Shader::AsyncShaders async_shaders; diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp index 1e0b8b922..97fd41cc1 100755 --- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp +++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp @@ -3,10 +3,12 @@ // Refer to the license.txt file included. #include -#include #include #include +#include + +#include "common/assert.h" #include "common/bit_util.h" #include "common/common_types.h" #include "video_core/renderer_vulkan/vk_scheduler.h" @@ -16,45 +18,51 @@ namespace Vulkan { -VKStagingBufferPool::StagingBuffer::StagingBuffer(std::unique_ptr buffer_) - : buffer{std::move(buffer_)} {} +StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& memory_allocator_, + VKScheduler& scheduler_) + : device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_} {} -VKStagingBufferPool::VKStagingBufferPool(const Device& device_, VKMemoryManager& memory_manager_, - VKScheduler& scheduler_) - : device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_} {} +StagingBufferPool::~StagingBufferPool() = default; -VKStagingBufferPool::~VKStagingBufferPool() = default; - -VKBuffer& VKStagingBufferPool::GetUnusedBuffer(std::size_t size, bool host_visible) { - if (const auto buffer = TryGetReservedBuffer(size, host_visible)) { - return *buffer; +StagingBufferRef StagingBufferPool::Request(size_t size, MemoryUsage usage) { + if (const std::optional ref = TryGetReservedBuffer(size, usage)) { + return *ref; } - return CreateStagingBuffer(size, host_visible); + return CreateStagingBuffer(size, usage); } -void VKStagingBufferPool::TickFrame() { - current_delete_level = (current_delete_level + 1) % NumLevels; +void StagingBufferPool::TickFrame() { + current_delete_level = (current_delete_level + 1) % NUM_LEVELS; - ReleaseCache(true); - ReleaseCache(false); + ReleaseCache(MemoryUsage::DeviceLocal); + ReleaseCache(MemoryUsage::Upload); + ReleaseCache(MemoryUsage::Download); } -VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_visible) { - for (StagingBuffer& entry : GetCache(host_visible)[Common::Log2Ceil64(size)].entries) { - if (!scheduler.IsFree(entry.tick)) { - continue; +std::optional StagingBufferPool::TryGetReservedBuffer(size_t size, + MemoryUsage usage) { + StagingBuffers& cache_level = GetCache(usage)[Common::Log2Ceil64(size)]; + + const auto is_free = [this](const StagingBuffer& entry) { + return scheduler.IsFree(entry.tick); + }; + auto& entries = cache_level.entries; + const auto hint_it = entries.begin() + cache_level.iterate_index; + auto it = std::find_if(entries.begin() + cache_level.iterate_index, entries.end(), is_free); + if (it == entries.end()) { + it = std::find_if(entries.begin(), hint_it, is_free); + if (it == hint_it) { + return std::nullopt; } - entry.tick = scheduler.CurrentTick(); - return &*entry.buffer; } - return nullptr; + cache_level.iterate_index = std::distance(entries.begin(), it) + 1; + it->tick = scheduler.CurrentTick(); + return it->Ref(); } -VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_visible) { +StagingBufferRef StagingBufferPool::CreateStagingBuffer(size_t size, MemoryUsage usage) { const u32 log2 = Common::Log2Ceil64(size); - - auto buffer = std::make_unique(); - buffer->handle = device.GetLogical().CreateBuffer({ + vk::Buffer buffer = device.GetLogical().CreateBuffer({ .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, .pNext = nullptr, .flags = 0, @@ -66,49 +74,63 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v .queueFamilyIndexCount = 0, .pQueueFamilyIndices = nullptr, }); - buffer->commit = memory_manager.Commit(buffer->handle, host_visible); + if (device.HasDebuggingToolAttached()) { + ++buffer_index; + buffer.SetObjectNameEXT(fmt::format("Staging Buffer {}", buffer_index).c_str()); + } + MemoryCommit commit = memory_allocator.Commit(buffer, usage); + const std::span mapped_span = IsHostVisible(usage) ? commit.Map() : std::span{}; - std::vector& entries = GetCache(host_visible)[log2].entries; - StagingBuffer& entry = entries.emplace_back(std::move(buffer)); - entry.tick = scheduler.CurrentTick(); - return *entry.buffer; + StagingBuffer& entry = GetCache(usage)[log2].entries.emplace_back(StagingBuffer{ + .buffer = std::move(buffer), + .commit = std::move(commit), + .mapped_span = mapped_span, + .tick = scheduler.CurrentTick(), + }); + return entry.Ref(); } -VKStagingBufferPool::StagingBuffersCache& VKStagingBufferPool::GetCache(bool host_visible) { - return host_visible ? host_staging_buffers : device_staging_buffers; -} - -void VKStagingBufferPool::ReleaseCache(bool host_visible) { - auto& cache = GetCache(host_visible); - const u64 size = ReleaseLevel(cache, current_delete_level); - if (size == 0) { - return; +StagingBufferPool::StagingBuffersCache& StagingBufferPool::GetCache(MemoryUsage usage) { + switch (usage) { + case MemoryUsage::DeviceLocal: + return device_local_cache; + case MemoryUsage::Upload: + return upload_cache; + case MemoryUsage::Download: + return download_cache; + default: + UNREACHABLE_MSG("Invalid memory usage={}", usage); + return upload_cache; } } -u64 VKStagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, std::size_t log2) { - static constexpr std::size_t deletions_per_tick = 16; +void StagingBufferPool::ReleaseCache(MemoryUsage usage) { + ReleaseLevel(GetCache(usage), current_delete_level); +} +void StagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, size_t log2) { + constexpr size_t deletions_per_tick = 16; auto& staging = cache[log2]; auto& entries = staging.entries; - const std::size_t old_size = entries.size(); + const size_t old_size = entries.size(); const auto is_deleteable = [this](const StagingBuffer& entry) { return scheduler.IsFree(entry.tick); }; - const std::size_t begin_offset = staging.delete_index; - const std::size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size); - const auto begin = std::begin(entries) + begin_offset; - const auto end = std::begin(entries) + end_offset; + const size_t begin_offset = staging.delete_index; + const size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size); + const auto begin = entries.begin() + begin_offset; + const auto end = entries.begin() + end_offset; entries.erase(std::remove_if(begin, end, is_deleteable), end); - const std::size_t new_size = entries.size(); + const size_t new_size = entries.size(); staging.delete_index += deletions_per_tick; if (staging.delete_index >= new_size) { staging.delete_index = 0; } - - return (1ULL << log2) * (old_size - new_size); + if (staging.iterate_index > new_size) { + staging.iterate_index = 0; + } } } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h index 90dadcbbe..882ae2beb 100755 --- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h +++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h @@ -9,7 +9,7 @@ #include "common/common_types.h" -#include "video_core/renderer_vulkan/vk_memory_manager.h" +#include "video_core/vulkan_common/vulkan_memory_allocator.h" #include "video_core/vulkan_common/vulkan_wrapper.h" namespace Vulkan { @@ -17,55 +17,65 @@ namespace Vulkan { class Device; class VKScheduler; -struct VKBuffer final { - vk::Buffer handle; - VKMemoryCommit commit; +struct StagingBufferRef { + VkBuffer buffer; + std::span mapped_span; }; -class VKStagingBufferPool final { +class StagingBufferPool { public: - explicit VKStagingBufferPool(const Device& device, VKMemoryManager& memory_manager, - VKScheduler& scheduler); - ~VKStagingBufferPool(); + explicit StagingBufferPool(const Device& device, MemoryAllocator& memory_allocator, + VKScheduler& scheduler); + ~StagingBufferPool(); - VKBuffer& GetUnusedBuffer(std::size_t size, bool host_visible); + StagingBufferRef Request(size_t size, MemoryUsage usage); void TickFrame(); private: - struct StagingBuffer final { - explicit StagingBuffer(std::unique_ptr buffer); - - std::unique_ptr buffer; + struct StagingBuffer { + vk::Buffer buffer; + MemoryCommit commit; + std::span mapped_span; u64 tick = 0; + + StagingBufferRef Ref() const noexcept { + return StagingBufferRef{ + .buffer = *buffer, + .mapped_span = mapped_span, + }; + } }; - struct StagingBuffers final { + struct StagingBuffers { std::vector entries; - std::size_t delete_index = 0; + size_t delete_index = 0; + size_t iterate_index = 0; }; - static constexpr std::size_t NumLevels = sizeof(std::size_t) * CHAR_BIT; - using StagingBuffersCache = std::array; + static constexpr size_t NUM_LEVELS = sizeof(size_t) * CHAR_BIT; + using StagingBuffersCache = std::array; - VKBuffer* TryGetReservedBuffer(std::size_t size, bool host_visible); + std::optional TryGetReservedBuffer(size_t size, MemoryUsage usage); - VKBuffer& CreateStagingBuffer(std::size_t size, bool host_visible); + StagingBufferRef CreateStagingBuffer(size_t size, MemoryUsage usage); - StagingBuffersCache& GetCache(bool host_visible); + StagingBuffersCache& GetCache(MemoryUsage usage); - void ReleaseCache(bool host_visible); + void ReleaseCache(MemoryUsage usage); - u64 ReleaseLevel(StagingBuffersCache& cache, std::size_t log2); + void ReleaseLevel(StagingBuffersCache& cache, size_t log2); const Device& device; - VKMemoryManager& memory_manager; + MemoryAllocator& memory_allocator; VKScheduler& scheduler; - StagingBuffersCache host_staging_buffers; - StagingBuffersCache device_staging_buffers; + StagingBuffersCache device_local_cache; + StagingBuffersCache upload_cache; + StagingBuffersCache download_cache; - std::size_t current_delete_level = 0; + size_t current_delete_level = 0; + u64 buffer_index = 0; }; } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp index bd11de012..24e5c82eb 100755 --- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp @@ -10,12 +10,12 @@ #include "video_core/engines/fermi_2d.h" #include "video_core/renderer_vulkan/blit_image.h" #include "video_core/renderer_vulkan/maxwell_to_vk.h" -#include "video_core/renderer_vulkan/vk_memory_manager.h" #include "video_core/renderer_vulkan/vk_rasterizer.h" #include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" #include "video_core/renderer_vulkan/vk_texture_cache.h" #include "video_core/vulkan_common/vulkan_device.h" +#include "video_core/vulkan_common/vulkan_memory_allocator.h" #include "video_core/vulkan_common/vulkan_wrapper.h" namespace Vulkan { @@ -554,10 +554,18 @@ void TextureCacheRuntime::Finish() { } ImageBufferMap TextureCacheRuntime::MapUploadBuffer(size_t size) { - const auto& buffer = staging_buffer_pool.GetUnusedBuffer(size, true); + const auto staging_ref = staging_buffer_pool.Request(size, MemoryUsage::Upload); return ImageBufferMap{ - .handle = *buffer.handle, - .map = buffer.commit->Map(size), + .handle = staging_ref.buffer, + .span = staging_ref.mapped_span, + }; +} + +ImageBufferMap TextureCacheRuntime::MapDownloadBuffer(size_t size) { + const auto staging_ref = staging_buffer_pool.Request(size, MemoryUsage::Download); + return ImageBufferMap{ + .handle = staging_ref.buffer, + .span = staging_ref.mapped_span, }; } @@ -788,9 +796,9 @@ Image::Image(TextureCacheRuntime& runtime, const ImageInfo& info_, GPUVAddr gpu_ image(MakeImage(runtime.device, info)), buffer(MakeBuffer(runtime.device, info)), aspect_mask(ImageAspectMask(info.format)) { if (image) { - commit = runtime.memory_manager.Commit(image, false); + commit = runtime.memory_allocator.Commit(image, MemoryUsage::DeviceLocal); } else { - commit = runtime.memory_manager.Commit(buffer, false); + commit = runtime.memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal); } if (IsPixelFormatASTC(info.format) && !runtime.device.IsOptimalAstcSupported()) { flags |= VideoCommon::ImageFlagBits::Converted; diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h index 92a7aad8b..a55d405d1 100755 --- a/src/video_core/renderer_vulkan/vk_texture_cache.h +++ b/src/video_core/renderer_vulkan/vk_texture_cache.h @@ -7,8 +7,8 @@ #include #include -#include "video_core/renderer_vulkan/vk_memory_manager.h" #include "video_core/texture_cache/texture_cache.h" +#include "video_core/vulkan_common/vulkan_memory_allocator.h" #include "video_core/vulkan_common/vulkan_wrapper.h" namespace Vulkan { @@ -19,14 +19,13 @@ using VideoCommon::Offset2D; using VideoCommon::RenderTargets; using VideoCore::Surface::PixelFormat; -class VKScheduler; -class VKStagingBufferPool; - class BlitImageHelper; class Device; class Image; class ImageView; class Framebuffer; +class StagingBufferPool; +class VKScheduler; struct RenderPassKey { constexpr auto operator<=>(const RenderPassKey&) const noexcept = default; @@ -60,18 +59,18 @@ struct ImageBufferMap { } [[nodiscard]] std::span Span() const noexcept { - return map.Span(); + return span; } VkBuffer handle; - MemoryMap map; + std::span span; }; struct TextureCacheRuntime { const Device& device; VKScheduler& scheduler; - VKMemoryManager& memory_manager; - VKStagingBufferPool& staging_buffer_pool; + MemoryAllocator& memory_allocator; + StagingBufferPool& staging_buffer_pool; BlitImageHelper& blit_image_helper; std::unordered_map renderpass_cache; @@ -79,10 +78,7 @@ struct TextureCacheRuntime { [[nodiscard]] ImageBufferMap MapUploadBuffer(size_t size); - [[nodiscard]] ImageBufferMap MapDownloadBuffer(size_t size) { - // TODO: Have a special function for this - return MapUploadBuffer(size); - } + [[nodiscard]] ImageBufferMap MapDownloadBuffer(size_t size); void BlitImage(Framebuffer* dst_framebuffer, ImageView& dst, ImageView& src, const std::array& dst_region, @@ -141,7 +137,7 @@ private: VKScheduler* scheduler; vk::Image image; vk::Buffer buffer; - VKMemoryCommit commit; + MemoryCommit commit; VkImageAspectFlags aspect_mask = 0; bool initialized = false; }; diff --git a/src/video_core/vulkan_common/vulkan_memory_allocator.cpp b/src/video_core/vulkan_common/vulkan_memory_allocator.cpp new file mode 100755 index 000000000..ecdefbb66 --- /dev/null +++ b/src/video_core/vulkan_common/vulkan_memory_allocator.cpp @@ -0,0 +1,268 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include +#include +#include +#include + +#include "common/alignment.h" +#include "common/assert.h" +#include "common/common_types.h" +#include "common/logging/log.h" +#include "video_core/vulkan_common/vulkan_device.h" +#include "video_core/vulkan_common/vulkan_memory_allocator.h" +#include "video_core/vulkan_common/vulkan_wrapper.h" + +namespace Vulkan { +namespace { +struct Range { + u64 begin; + u64 end; + + [[nodiscard]] bool Contains(u64 iterator, u64 size) const noexcept { + return iterator < end && begin < iterator + size; + } +}; + +[[nodiscard]] u64 AllocationChunkSize(u64 required_size) { + static constexpr std::array sizes{ + 0x1000ULL << 10, 0x1400ULL << 10, 0x1800ULL << 10, 0x1c00ULL << 10, 0x2000ULL << 10, + 0x3200ULL << 10, 0x4000ULL << 10, 0x6000ULL << 10, 0x8000ULL << 10, 0xA000ULL << 10, + 0x10000ULL << 10, 0x18000ULL << 10, 0x20000ULL << 10, + }; + static_assert(std::is_sorted(sizes.begin(), sizes.end())); + + const auto it = std::ranges::lower_bound(sizes, required_size); + return it != sizes.end() ? *it : Common::AlignUp(required_size, 4ULL << 20); +} + +[[nodiscard]] VkMemoryPropertyFlags MemoryUsagePropertyFlags(MemoryUsage usage) { + switch (usage) { + case MemoryUsage::DeviceLocal: + return VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + case MemoryUsage::Upload: + return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + case MemoryUsage::Download: + return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | + VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + } + UNREACHABLE_MSG("Invalid memory usage={}", usage); + return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; +} +} // Anonymous namespace + +class MemoryAllocation { +public: + explicit MemoryAllocation(const Device& device_, vk::DeviceMemory memory_, + VkMemoryPropertyFlags properties, u64 allocation_size_, u32 type) + : device{device_}, memory{std::move(memory_)}, allocation_size{allocation_size_}, + property_flags{properties}, shifted_memory_type{1U << type} {} + + [[nodiscard]] std::optional Commit(VkDeviceSize size, VkDeviceSize alignment) { + const std::optional alloc = FindFreeRegion(size, alignment); + if (!alloc) { + // Signal out of memory, it'll try to do more allocations. + return std::nullopt; + } + const Range range{ + .begin = *alloc, + .end = *alloc + size, + }; + commits.insert(std::ranges::upper_bound(commits, *alloc, {}, &Range::begin), range); + return std::make_optional(this, *memory, *alloc, *alloc + size); + } + + void Free(u64 begin) { + const auto it = std::ranges::find(commits, begin, &Range::begin); + ASSERT_MSG(it != commits.end(), "Invalid commit"); + commits.erase(it); + } + + [[nodiscard]] std::span Map() { + if (memory_mapped_span.empty()) { + u8* const raw_pointer = memory.Map(0, allocation_size); + memory_mapped_span = std::span(raw_pointer, allocation_size); + } + return memory_mapped_span; + } + + /// Returns whether this allocation is compatible with the arguments. + [[nodiscard]] bool IsCompatible(VkMemoryPropertyFlags flags, u32 type_mask) const { + return (flags & property_flags) && (type_mask & shifted_memory_type) != 0; + } + +private: + [[nodiscard]] static constexpr u32 ShiftType(u32 type) { + return 1U << type; + } + + [[nodiscard]] std::optional FindFreeRegion(u64 size, u64 alignment) noexcept { + ASSERT(std::has_single_bit(alignment)); + const u64 alignment_log2 = std::countr_zero(alignment); + std::optional candidate; + u64 iterator = 0; + auto commit = commits.begin(); + while (iterator + size <= allocation_size) { + candidate = candidate.value_or(iterator); + if (commit == commits.end()) { + break; + } + if (commit->Contains(*candidate, size)) { + candidate = std::nullopt; + } + iterator = Common::AlignBits(commit->end, alignment_log2); + ++commit; + } + return candidate; + } + + const Device& device; ///< Vulkan device. + const vk::DeviceMemory memory; ///< Vulkan memory allocation handler. + const u64 allocation_size; ///< Size of this allocation. + const VkMemoryPropertyFlags property_flags; ///< Vulkan memory property flags. + const u32 shifted_memory_type; ///< Shifted Vulkan memory type. + std::vector commits; ///< All commit ranges done from this allocation. + std::span memory_mapped_span; ///< Memory mapped span. Empty if not queried before. +}; + +MemoryCommit::MemoryCommit(MemoryAllocation* allocation_, VkDeviceMemory memory_, u64 begin_, + u64 end_) noexcept + : allocation{allocation_}, memory{memory_}, begin{begin_}, end{end_} {} + +MemoryCommit::~MemoryCommit() { + Release(); +} + +MemoryCommit& MemoryCommit::operator=(MemoryCommit&& rhs) noexcept { + Release(); + allocation = std::exchange(rhs.allocation, nullptr); + memory = rhs.memory; + begin = rhs.begin; + end = rhs.end; + span = std::exchange(rhs.span, std::span{}); + return *this; +} + +MemoryCommit::MemoryCommit(MemoryCommit&& rhs) noexcept + : allocation{std::exchange(rhs.allocation, nullptr)}, memory{rhs.memory}, begin{rhs.begin}, + end{rhs.end}, span{std::exchange(rhs.span, std::span{})} {} + +std::span MemoryCommit::Map() { + if (span.empty()) { + span = allocation->Map().subspan(begin, end - begin); + } + return span; +} + +void MemoryCommit::Release() { + if (allocation) { + allocation->Free(begin); + } +} + +MemoryAllocator::MemoryAllocator(const Device& device_) + : device{device_}, properties{device_.GetPhysical().GetMemoryProperties()} {} + +MemoryAllocator::~MemoryAllocator() = default; + +MemoryCommit MemoryAllocator::Commit(const VkMemoryRequirements& requirements, MemoryUsage usage) { + // Find the fastest memory flags we can afford with the current requirements + const VkMemoryPropertyFlags flags = MemoryPropertyFlags(requirements.memoryTypeBits, usage); + if (std::optional commit = TryCommit(requirements, flags)) { + return std::move(*commit); + } + // Commit has failed, allocate more memory. + // TODO(Rodrigo): Handle out of memory situations in some way like flushing to guest memory. + AllocMemory(flags, requirements.memoryTypeBits, AllocationChunkSize(requirements.size)); + + // Commit again, this time it won't fail since there's a fresh allocation above. + // If it does, there's a bug. + return TryCommit(requirements, flags).value(); +} + +MemoryCommit MemoryAllocator::Commit(const vk::Buffer& buffer, MemoryUsage usage) { + auto commit = Commit(device.GetLogical().GetBufferMemoryRequirements(*buffer), usage); + buffer.BindMemory(commit.Memory(), commit.Offset()); + return commit; +} + +MemoryCommit MemoryAllocator::Commit(const vk::Image& image, MemoryUsage usage) { + auto commit = Commit(device.GetLogical().GetImageMemoryRequirements(*image), usage); + image.BindMemory(commit.Memory(), commit.Offset()); + return commit; +} + +void MemoryAllocator::AllocMemory(VkMemoryPropertyFlags flags, u32 type_mask, u64 size) { + const u32 type = FindType(flags, type_mask).value(); + vk::DeviceMemory memory = device.GetLogical().AllocateMemory({ + .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, + .pNext = nullptr, + .allocationSize = size, + .memoryTypeIndex = type, + }); + allocations.push_back( + std::make_unique(device, std::move(memory), flags, size, type)); +} + +std::optional MemoryAllocator::TryCommit(const VkMemoryRequirements& requirements, + VkMemoryPropertyFlags flags) { + for (auto& allocation : allocations) { + if (!allocation->IsCompatible(flags, requirements.memoryTypeBits)) { + continue; + } + if (auto commit = allocation->Commit(requirements.size, requirements.alignment)) { + return commit; + } + } + return std::nullopt; +} + +VkMemoryPropertyFlags MemoryAllocator::MemoryPropertyFlags(u32 type_mask, MemoryUsage usage) const { + return MemoryPropertyFlags(type_mask, MemoryUsagePropertyFlags(usage)); +} + +VkMemoryPropertyFlags MemoryAllocator::MemoryPropertyFlags(u32 type_mask, + VkMemoryPropertyFlags flags) const { + if (FindType(flags, type_mask)) { + // Found a memory type with those requirements + return flags; + } + if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) { + // Remove host cached bit in case it's not supported + return MemoryPropertyFlags(type_mask, flags & ~VK_MEMORY_PROPERTY_HOST_CACHED_BIT); + } + if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) { + // Remove device local, if it's not supported by the requested resource + return MemoryPropertyFlags(type_mask, flags & ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + } + UNREACHABLE_MSG("No compatible memory types found"); + return 0; +} + +std::optional MemoryAllocator::FindType(VkMemoryPropertyFlags flags, u32 type_mask) const { + for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) { + const VkMemoryPropertyFlags type_flags = properties.memoryTypes[type_index].propertyFlags; + if ((type_mask & (1U << type_index)) && (type_flags & flags)) { + // The type matches in type and in the wanted properties. + return type_index; + } + } + // Failed to find index + return std::nullopt; +} + +bool IsHostVisible(MemoryUsage usage) noexcept { + switch (usage) { + case MemoryUsage::DeviceLocal: + return false; + case MemoryUsage::Upload: + case MemoryUsage::Download: + return true; + } + UNREACHABLE_MSG("Invalid memory usage={}", usage); + return false; +} + +} // namespace Vulkan diff --git a/src/video_core/vulkan_common/vulkan_memory_allocator.h b/src/video_core/vulkan_common/vulkan_memory_allocator.h new file mode 100755 index 000000000..53b3b275a --- /dev/null +++ b/src/video_core/vulkan_common/vulkan_memory_allocator.h @@ -0,0 +1,118 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include +#include +#include +#include +#include "common/common_types.h" +#include "video_core/vulkan_common/vulkan_wrapper.h" + +namespace Vulkan { + +class Device; +class MemoryMap; +class MemoryAllocation; + +/// Hints and requirements for the backing memory type of a commit +enum class MemoryUsage { + DeviceLocal, ///< Hints device local usages, fastest memory type to read and write from the GPU + Upload, ///< Requires a host visible memory type optimized for CPU to GPU uploads + Download, ///< Requires a host visible memory type optimized for GPU to CPU readbacks +}; + +/// Ownership handle of a memory commitment. +/// Points to a subregion of a memory allocation. +class MemoryCommit { +public: + explicit MemoryCommit() noexcept = default; + explicit MemoryCommit(MemoryAllocation* allocation_, VkDeviceMemory memory_, u64 begin_, + u64 end_) noexcept; + ~MemoryCommit(); + + MemoryCommit& operator=(MemoryCommit&&) noexcept; + MemoryCommit(MemoryCommit&&) noexcept; + + MemoryCommit& operator=(const MemoryCommit&) = delete; + MemoryCommit(const MemoryCommit&) = delete; + + /// Returns a host visible memory map. + /// It will map the backing allocation if it hasn't been mapped before. + std::span Map(); + + /// Returns the Vulkan memory handler. + VkDeviceMemory Memory() const { + return memory; + } + + /// Returns the start position of the commit relative to the allocation. + VkDeviceSize Offset() const { + return static_cast(begin); + } + +private: + void Release(); + + MemoryAllocation* allocation{}; ///< Pointer to the large memory allocation. + VkDeviceMemory memory{}; ///< Vulkan device memory handler. + u64 begin{}; ///< Beginning offset in bytes to where the commit exists. + u64 end{}; ///< Offset in bytes where the commit ends. + std::span span; ///< Host visible memory span. Empty if not queried before. +}; + +/// Memory allocator container. +/// Allocates and releases memory allocations on demand. +class MemoryAllocator { +public: + explicit MemoryAllocator(const Device& device_); + ~MemoryAllocator(); + + MemoryAllocator& operator=(const MemoryAllocator&) = delete; + MemoryAllocator(const MemoryAllocator&) = delete; + + /** + * Commits a memory with the specified requeriments. + * + * @param requirements Requirements returned from a Vulkan call. + * @param host_visible Signals the allocator that it *must* use host visible and coherent + * memory. When passing false, it will try to allocate device local memory. + * + * @returns A memory commit. + */ + MemoryCommit Commit(const VkMemoryRequirements& requirements, MemoryUsage usage); + + /// Commits memory required by the buffer and binds it. + MemoryCommit Commit(const vk::Buffer& buffer, MemoryUsage usage); + + /// Commits memory required by the image and binds it. + MemoryCommit Commit(const vk::Image& image, MemoryUsage usage); + +private: + /// Allocates a chunk of memory. + void AllocMemory(VkMemoryPropertyFlags flags, u32 type_mask, u64 size); + + /// Tries to allocate a memory commit. + std::optional TryCommit(const VkMemoryRequirements& requirements, + VkMemoryPropertyFlags flags); + + /// Returns the fastest compatible memory property flags from a wanted usage. + VkMemoryPropertyFlags MemoryPropertyFlags(u32 type_mask, MemoryUsage usage) const; + + /// Returns the fastest compatible memory property flags from the wanted flags. + VkMemoryPropertyFlags MemoryPropertyFlags(u32 type_mask, VkMemoryPropertyFlags flags) const; + + /// Returns index to the fastest memory type compatible with the passed requirements. + std::optional FindType(VkMemoryPropertyFlags flags, u32 type_mask) const; + + const Device& device; ///< Device handle. + const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties. + std::vector> allocations; ///< Current allocations. +}; + +/// Returns true when a memory usage is guaranteed to be host visible. +bool IsHostVisible(MemoryUsage usage) noexcept; + +} // namespace Vulkan