From 0e4972699b9398790b2c3ff60031c998044897fb Mon Sep 17 00:00:00 2001 From: pineappleEA Date: Sat, 1 Jan 2022 04:01:57 +0100 Subject: [PATCH] early-access version 2361 --- README.md | 2 +- src/video_core/memory_manager.cpp | 100 ++++++++----------- src/video_core/memory_manager.h | 5 + src/video_core/texture_cache/texture_cache.h | 4 +- src/video_core/texture_cache/util.cpp | 10 +- 5 files changed, 52 insertions(+), 69 deletions(-) diff --git a/README.md b/README.md index 6b6985a19..13d1faee8 100755 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ yuzu emulator early access ============= -This is the source code for early-access 2360. +This is the source code for early-access 2361. ## Legal Notice diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index dce00e829..4ff3fa268 100755 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -73,12 +73,12 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { } const auto submapped_ranges = GetSubmappedRange(gpu_addr, size); - for (const auto& map : submapped_ranges) { + for (const auto& [map_addr, map_size] : submapped_ranges) { // Flush and invalidate through the GPU interface, to be asynchronous if possible. - const std::optional cpu_addr = GpuToCpuAddress(map.first); + const std::optional cpu_addr = GpuToCpuAddress(map_addr); ASSERT(cpu_addr); - rasterizer->UnmapMemory(*cpu_addr, map.second); + rasterizer->UnmapMemory(*cpu_addr, map_size); } UpdateRange(gpu_addr, PageEntry::State::Unmapped, size); @@ -265,7 +265,8 @@ size_t MemoryManager::BytesToMapEnd(GPUVAddr gpu_addr) const noexcept { return it->second - (gpu_addr - it->first); } -void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const { +void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size, + bool is_safe) const { std::size_t remaining_size{size}; std::size_t page_index{gpu_src_addr >> page_bits}; std::size_t page_offset{gpu_src_addr & page_mask}; @@ -273,35 +274,15 @@ void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::siz while (remaining_size > 0) { const std::size_t copy_amount{ std::min(static_cast(page_size) - page_offset, remaining_size)}; - - if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { - const auto src_addr{*page_addr + page_offset}; - - // Flush must happen on the rasterizer interface, such that memory is always synchronous - // when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu. - rasterizer->FlushRegion(src_addr, copy_amount); - system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); - } - - page_index++; - page_offset = 0; - dest_buffer = static_cast(dest_buffer) + copy_amount; - remaining_size -= copy_amount; - } -} - -void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer, - const std::size_t size) const { - std::size_t remaining_size{size}; - std::size_t page_index{gpu_src_addr >> page_bits}; - std::size_t page_offset{gpu_src_addr & page_mask}; - - while (remaining_size > 0) { - const std::size_t copy_amount{ - std::min(static_cast(page_size) - page_offset, remaining_size)}; - - if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { + const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; + if (page_addr && *page_addr != 0) { const auto src_addr{*page_addr + page_offset}; + if (is_safe) { + // Flush must happen on the rasterizer interface, such that memory is always + // synchronous when it is read (even when in asynchronous GPU mode). + // Fixes Dead Cells title menu. + rasterizer->FlushRegion(src_addr, copy_amount); + } system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); } else { std::memset(dest_buffer, 0, copy_amount); @@ -314,7 +295,17 @@ void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer, } } -void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) { +void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const { + ReadBlockImpl(gpu_src_addr, dest_buffer, size, true); +} + +void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer, + const std::size_t size) const { + ReadBlockImpl(gpu_src_addr, dest_buffer, size, false); +} + +void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size, + bool is_safe) { std::size_t remaining_size{size}; std::size_t page_index{gpu_dest_addr >> page_bits}; std::size_t page_offset{gpu_dest_addr & page_mask}; @@ -322,13 +313,15 @@ void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, s while (remaining_size > 0) { const std::size_t copy_amount{ std::min(static_cast(page_size) - page_offset, remaining_size)}; - - if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { + const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; + if (page_addr && *page_addr != 0) { const auto dest_addr{*page_addr + page_offset}; - // Invalidate must happen on the rasterizer interface, such that memory is always - // synchronous when it is written (even when in asynchronous GPU mode). - rasterizer->InvalidateRegion(dest_addr, copy_amount); + if (is_safe) { + // Invalidate must happen on the rasterizer interface, such that memory is always + // synchronous when it is written (even when in asynchronous GPU mode). + rasterizer->InvalidateRegion(dest_addr, copy_amount); + } system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); } @@ -339,26 +332,13 @@ void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, s } } +void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) { + WriteBlockImpl(gpu_dest_addr, src_buffer, size, true); +} + void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) { - std::size_t remaining_size{size}; - std::size_t page_index{gpu_dest_addr >> page_bits}; - std::size_t page_offset{gpu_dest_addr & page_mask}; - - while (remaining_size > 0) { - const std::size_t copy_amount{ - std::min(static_cast(page_size) - page_offset, remaining_size)}; - - if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { - const auto dest_addr{*page_addr + page_offset}; - system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); - } - - page_index++; - page_offset = 0; - src_buffer = static_cast(src_buffer) + copy_amount; - remaining_size -= copy_amount; - } + WriteBlockImpl(gpu_dest_addr, src_buffer, size, false); } void MemoryManager::FlushRegion(GPUVAddr gpu_addr, size_t size) const { @@ -435,15 +415,15 @@ std::vector> MemoryManager::GetSubmappedRange( size_t page_offset{gpu_addr & page_mask}; std::optional> last_segment{}; std::optional old_page_addr{}; - const auto extend_size = [this, &last_segment, &page_index](std::size_t bytes) { + const auto extend_size = [&last_segment, &page_index, &page_offset](std::size_t bytes) { if (!last_segment) { - GPUVAddr new_base_addr = page_index << page_bits; + const GPUVAddr new_base_addr = (page_index << page_bits) + page_offset; last_segment = {new_base_addr, bytes}; } else { last_segment->second += bytes; } }; - const auto split = [this, &last_segment, &result] { + const auto split = [&last_segment, &result] { if (last_segment) { result.push_back(*last_segment); last_segment = std::nullopt; @@ -452,7 +432,7 @@ std::vector> MemoryManager::GetSubmappedRange( while (remaining_size > 0) { const size_t num_bytes{std::min(page_size - page_offset, remaining_size)}; const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; - if (!page_addr) { + if (!page_addr || *page_addr == 0) { split(); } else if (old_page_addr) { if (*old_page_addr + page_size != *page_addr) { diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 99d13e7f6..38d8d9d74 100755 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -155,6 +155,11 @@ private: void FlushRegion(GPUVAddr gpu_addr, size_t size) const; + void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size, + bool is_safe) const; + void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size, + bool is_safe); + [[nodiscard]] static constexpr std::size_t PageEntryIndex(GPUVAddr gpu_addr) { return (gpu_addr >> page_bits) & page_table_mask; } diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index b494152b8..198bb0cfb 100755 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -1376,9 +1376,7 @@ void TextureCache

::ForEachSparseSegment(ImageBase& image, Func&& func) { using FuncReturn = typename std::invoke_result::type; static constexpr bool RETURNS_BOOL = std::is_same_v; const auto segments = gpu_memory.GetSubmappedRange(image.gpu_addr, image.guest_size_bytes); - for (auto& segment : segments) { - const auto gpu_addr = segment.first; - const auto size = segment.second; + for (const auto& [gpu_addr, size] : segments) { std::optional cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); ASSERT(cpu_addr); if constexpr (RETURNS_BOOL) { diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp index 7bd31b211..d8e19cb2f 100755 --- a/src/video_core/texture_cache/util.cpp +++ b/src/video_core/texture_cache/util.cpp @@ -364,14 +364,14 @@ template [[nodiscard]] std::optional ResolveOverlapRightAddress2D( const ImageInfo& new_info, GPUVAddr gpu_addr, const ImageBase& overlap, bool strict_size) { - const u32 layer_stride = new_info.layer_stride; - const s32 new_size = layer_stride * new_info.resources.layers; - const s32 diff = static_cast(overlap.gpu_addr - gpu_addr); + const u64 layer_stride = new_info.layer_stride; + const u64 new_size = layer_stride * new_info.resources.layers; + const u64 diff = overlap.gpu_addr - gpu_addr; if (diff > new_size) { return std::nullopt; } - const s32 base_layer = diff / layer_stride; - const s32 mip_offset = diff % layer_stride; + const s32 base_layer = static_cast(diff / layer_stride); + const s32 mip_offset = static_cast(diff % layer_stride); const std::array offsets = CalculateMipLevelOffsets(new_info); const auto end = offsets.begin() + new_info.resources.levels; const auto it = std::find(offsets.begin(), end, static_cast(mip_offset));