From c141471dce8071282272474b4b7d7ba937732b16 Mon Sep 17 00:00:00 2001 From: pineappleEA Date: Sun, 21 Aug 2022 22:11:13 +0200 Subject: [PATCH] early-access version 2914 --- CMakeLists.txt | 4 +- README.md | 2 +- externals/CMakeLists.txt | 2 +- externals/find-modules/FindOpus.cmake | 19 +++++ externals/find-modules/Findlz4.cmake | 59 +++----------- externals/find-modules/Findzstd.cmake | 60 +++----------- externals/opus/CMakeLists.txt | 2 +- src/common/microprofile.h | 9 --- src/core/CMakeLists.txt | 2 +- src/core/arm/dynarmic/arm_dynarmic_32.cpp | 4 +- src/core/file_sys/ips_layer.cpp | 2 +- src/core/file_sys/patch_manager.cpp | 4 +- src/core/hle/service/nvdrv/core/nvmap.cpp | 6 +- .../service/nvdrv/devices/nvhost_as_gpu.cpp | 26 +++--- .../hle/service/nvdrv/devices/nvhost_as_gpu.h | 4 +- src/core/hle/service/nvdrv/devices/nvmap.cpp | 9 ++- src/core/loader/kip.cpp | 2 +- src/core/loader/nro.cpp | 2 +- src/core/loader/nso.cpp | 2 +- src/core/memory.cpp | 81 ++++++++++--------- src/core/memory.h | 6 +- src/tests/video_core/buffer_base.cpp | 7 +- src/video_core/buffer_cache/buffer_base.h | 2 +- src/video_core/buffer_cache/buffer_cache.h | 51 ++++++------ src/video_core/memory_manager.cpp | 8 +- src/video_core/query_cache.h | 12 +-- src/video_core/rasterizer_accelerated.cpp | 17 ++-- src/video_core/shader_cache.cpp | 12 +-- src/video_core/shader_cache.h | 4 +- src/video_core/texture_cache/texture_cache.h | 12 +-- .../texture_cache/texture_cache_base.h | 10 +-- 31 files changed, 193 insertions(+), 249 deletions(-) create mode 100755 externals/find-modules/FindOpus.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index eed57cc69..2ab0ea589 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -40,6 +40,8 @@ option(YUZU_TESTS "Compile tests" ON) option(YUZU_USE_BUNDLED_VCPKG "Use vcpkg for yuzu dependencies" "${MSVC}") +option(YUZU_CHECK_SUBMODULES "Check if submodules are present" ON) + if (YUZU_USE_BUNDLED_VCPKG) if (YUZU_TESTS) list(APPEND VCPKG_MANIFEST_FEATURES "yuzu-tests") @@ -81,7 +83,7 @@ function(check_submodules_present) endforeach() endfunction() -if(EXISTS ${PROJECT_SOURCE_DIR}/.gitmodules) +if(EXISTS ${PROJECT_SOURCE_DIR}/.gitmodules AND YUZU_CHECK_SUBMODULES) check_submodules_present() endif() configure_file(${PROJECT_SOURCE_DIR}/dist/compatibility_list/compatibility_list.qrc diff --git a/README.md b/README.md index a19302f7c..5028659f2 100755 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ yuzu emulator early access ============= -This is the source code for early-access 2913. +This is the source code for early-access 2914. ## Legal Notice diff --git a/externals/CMakeLists.txt b/externals/CMakeLists.txt index 6d04ace1d..eea70fc27 100755 --- a/externals/CMakeLists.txt +++ b/externals/CMakeLists.txt @@ -128,7 +128,7 @@ endif() if (YUZU_USE_BUNDLED_OPUS) add_subdirectory(opus EXCLUDE_FROM_ALL) else() - find_package(opus 1.3 REQUIRED) + find_package(Opus 1.3 REQUIRED) endif() # FFMpeg diff --git a/externals/find-modules/FindOpus.cmake b/externals/find-modules/FindOpus.cmake new file mode 100755 index 000000000..b68a6046b --- /dev/null +++ b/externals/find-modules/FindOpus.cmake @@ -0,0 +1,19 @@ +# SPDX-FileCopyrightText: 2022 yuzu Emulator Project +# SPDX-License-Identifier: GPL-2.0-or-later + +find_package(PkgConfig) + +if (PKG_CONFIG_FOUND) + pkg_search_module(opus IMPORTED_TARGET GLOBAL opus) + if (opus_FOUND) + add_library(Opus::opus ALIAS PkgConfig::opus) + endif() +endif() + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(Opus + REQUIRED_VARS + opus_LINK_LIBRARIES + opus_FOUND + VERSION_VAR opus_VERSION +) diff --git a/externals/find-modules/Findlz4.cmake b/externals/find-modules/Findlz4.cmake index 56dcca8f6..13ca5de66 100755 --- a/externals/find-modules/Findlz4.cmake +++ b/externals/find-modules/Findlz4.cmake @@ -1,56 +1,19 @@ -# SPDX-FileCopyrightText: 2020 yuzu Emulator Project +# SPDX-FileCopyrightText: 2022 yuzu Emulator Project # SPDX-License-Identifier: GPL-2.0-or-later -find_package(PkgConfig QUIET) -pkg_check_modules(PC_lz4 QUIET lz4) +find_package(PkgConfig) -find_path(lz4_INCLUDE_DIR - NAMES lz4.h - PATHS ${PC_lz4_INCLUDE_DIRS} -) -find_library(lz4_LIBRARY - NAMES lz4 - PATHS ${PC_lz4_LIBRARY_DIRS} -) - -if(lz4_INCLUDE_DIR) - file(STRINGS "${lz4_INCLUDE_DIR}/lz4.h" _lz4_version_lines - REGEX "#define[ \t]+LZ4_VERSION_(MAJOR|MINOR|RELEASE)") - string(REGEX REPLACE ".*LZ4_VERSION_MAJOR *\([0-9]*\).*" "\\1" _lz4_version_major "${_lz4_version_lines}") - string(REGEX REPLACE ".*LZ4_VERSION_MINOR *\([0-9]*\).*" "\\1" _lz4_version_minor "${_lz4_version_lines}") - string(REGEX REPLACE ".*LZ4_VERSION_RELEASE *\([0-9]*\).*" "\\1" _lz4_version_release "${_lz4_version_lines}") - set(lz4_VERSION "${_lz4_version_major}.${_lz4_version_minor}.${_lz4_version_release}") - unset(_lz4_version_major) - unset(_lz4_version_minor) - unset(_lz4_version_release) - unset(_lz4_version_lines) +if (PKG_CONFIG_FOUND) + pkg_search_module(liblz4 IMPORTED_TARGET GLOBAL liblz4) + if (liblz4_FOUND) + add_library(lz4::lz4 ALIAS PkgConfig::liblz4) + endif() endif() include(FindPackageHandleStandardArgs) find_package_handle_standard_args(lz4 - FOUND_VAR lz4_FOUND - REQUIRED_VARS - lz4_LIBRARY - lz4_INCLUDE_DIR - VERSION_VAR lz4_VERSION -) - -if(lz4_FOUND) - set(lz4_LIBRARIES ${lz4_LIBRARY}) - set(lz4_INCLUDE_DIRS ${lz4_INCLUDE_DIR}) - set(lz4_DEFINITIONS ${PC_lz4_CFLAGS_OTHER}) -endif() - -if(lz4_FOUND AND NOT TARGET lz4::lz4) - add_library(lz4::lz4 UNKNOWN IMPORTED) - set_target_properties(lz4::lz4 PROPERTIES - IMPORTED_LOCATION "${lz4_LIBRARY}" - INTERFACE_COMPILE_OPTIONS "${PC_lz4_CFLAGS_OTHER}" - INTERFACE_INCLUDE_DIRECTORIES "${lz4_INCLUDE_DIR}" - ) -endif() - -mark_as_advanced( - lz4_INCLUDE_DIR - lz4_LIBRARY + REQUIRED_VARS + liblz4_LINK_LIBRARIES + liblz4_FOUND + VERSION_VAR liblz4_VERSION ) diff --git a/externals/find-modules/Findzstd.cmake b/externals/find-modules/Findzstd.cmake index f0c56f499..f4031eb70 100755 --- a/externals/find-modules/Findzstd.cmake +++ b/externals/find-modules/Findzstd.cmake @@ -1,57 +1,19 @@ -# SPDX-FileCopyrightText: 2020 yuzu Emulator Project +# SPDX-FileCopyrightText: 2022 yuzu Emulator Project # SPDX-License-Identifier: GPL-2.0-or-later -find_package(PkgConfig QUIET) -pkg_check_modules(PC_zstd QUIET libzstd) +find_package(PkgConfig) -find_path(zstd_INCLUDE_DIR - NAMES zstd.h - PATHS ${PC_zstd_INCLUDE_DIRS} -) -find_library(zstd_LIBRARY - NAMES zstd - PATHS ${PC_zstd_LIBRARY_DIRS} -) - -if(zstd_INCLUDE_DIR) - file(STRINGS "${zstd_INCLUDE_DIR}/zstd.h" _zstd_version_lines - REGEX "#define[ \t]+ZSTD_VERSION_(MAJOR|MINOR|RELEASE)") - string(REGEX REPLACE ".*ZSTD_VERSION_MAJOR *\([0-9]*\).*" "\\1" _zstd_version_major "${_zstd_version_lines}") - string(REGEX REPLACE ".*ZSTD_VERSION_MINOR *\([0-9]*\).*" "\\1" _zstd_version_minor "${_zstd_version_lines}") - string(REGEX REPLACE ".*ZSTD_VERSION_RELEASE *\([0-9]*\).*" "\\1" _zstd_version_release "${_zstd_version_lines}") - set(zstd_VERSION "${_zstd_version_major}.${_zstd_version_minor}.${_zstd_version_release}") - unset(_zstd_version_major) - unset(_zstd_version_minor) - unset(_zstd_version_release) - unset(_zstd_version_lines) +if (PKG_CONFIG_FOUND) + pkg_search_module(libzstd IMPORTED_TARGET GLOBAL libzstd) + if (libzstd_FOUND) + add_library(zstd::zstd ALIAS PkgConfig::libzstd) + endif() endif() include(FindPackageHandleStandardArgs) find_package_handle_standard_args(zstd - FOUND_VAR zstd_FOUND - REQUIRED_VARS - zstd_LIBRARY - zstd_INCLUDE_DIR - zstd_VERSION - VERSION_VAR zstd_VERSION -) - -if(zstd_FOUND) - set(zstd_LIBRARIES ${zstd_LIBRARY}) - set(zstd_INCLUDE_DIRS ${zstd_INCLUDE_DIR}) - set(zstd_DEFINITIONS ${PC_zstd_CFLAGS_OTHER}) -endif() - -if(zstd_FOUND AND NOT TARGET zstd::zstd) - add_library(zstd::zstd UNKNOWN IMPORTED) - set_target_properties(zstd::zstd PROPERTIES - IMPORTED_LOCATION "${zstd_LIBRARY}" - INTERFACE_COMPILE_OPTIONS "${PC_zstd_CFLAGS_OTHER}" - INTERFACE_INCLUDE_DIRECTORIES "${zstd_INCLUDE_DIR}" - ) -endif() - -mark_as_advanced( - zstd_INCLUDE_DIR - zstd_LIBRARY + REQUIRED_VARS + libzstd_LINK_LIBRARIES + libzstd_FOUND + VERSION_VAR libzstd_VERSION ) diff --git a/externals/opus/CMakeLists.txt b/externals/opus/CMakeLists.txt index a92ffbd69..410ff7c08 100755 --- a/externals/opus/CMakeLists.txt +++ b/externals/opus/CMakeLists.txt @@ -256,4 +256,4 @@ PRIVATE opus/src ) -add_library(Opus::Opus ALIAS opus) +add_library(Opus::opus ALIAS opus) diff --git a/src/common/microprofile.h b/src/common/microprofile.h index 91d14d5e1..56ef0a2dc 100755 --- a/src/common/microprofile.h +++ b/src/common/microprofile.h @@ -22,12 +22,3 @@ typedef void* HANDLE; #include #define MP_RGB(r, g, b) ((r) << 16 | (g) << 8 | (b) << 0) - -// On OS X, some Mach header included by MicroProfile defines these as macros, conflicting with -// identifiers we use. -#ifdef PAGE_SIZE -#undef PAGE_SIZE -#endif -#ifdef PAGE_MASK -#undef PAGE_MASK -#endif diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 40c2e9f44..704b707cf 100755 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -788,7 +788,7 @@ endif() create_target_directory_groups(core) target_link_libraries(core PUBLIC common PRIVATE audio_core network video_core) -target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt::fmt nlohmann_json::nlohmann_json mbedtls Opus::Opus) +target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt::fmt nlohmann_json::nlohmann_json mbedtls Opus::opus) if (MINGW) target_link_libraries(core PRIVATE ${MSWSOCK_LIBRARY}) endif() diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index 3b8b43994..d1e70f19d 100755 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp @@ -190,8 +190,8 @@ std::shared_ptr ARM_Dynarmic_32::MakeJit(Common::PageTable* config.callbacks = cb.get(); config.coprocessors[15] = cp15; config.define_unpredictable_behaviour = true; - static constexpr std::size_t PAGE_BITS = 12; - static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - PAGE_BITS); + static constexpr std::size_t YUZU_PAGEBITS = 12; + static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - YUZU_PAGEBITS); if (page_table) { config.page_table = reinterpret_cast*>( page_table->pointers.data()); diff --git a/src/core/file_sys/ips_layer.cpp b/src/core/file_sys/ips_layer.cpp index c1a484497..5aab428bb 100755 --- a/src/core/file_sys/ips_layer.cpp +++ b/src/core/file_sys/ips_layer.cpp @@ -217,7 +217,7 @@ void IPSwitchCompiler::Parse() { break; } else if (StartsWith(line, "@nsobid-")) { // NSO Build ID Specifier - const auto raw_build_id = fmt::format("{:0>64}", line.substr(8)); + const auto raw_build_id = fmt::format("{:0<64}", line.substr(8)); nso_build_id = Common::HexStringToArray<0x20>(raw_build_id); } else if (StartsWith(line, "#")) { // Mandatory Comment diff --git a/src/core/file_sys/patch_manager.cpp b/src/core/file_sys/patch_manager.cpp index 41348ab26..4c80e13a9 100755 --- a/src/core/file_sys/patch_manager.cpp +++ b/src/core/file_sys/patch_manager.cpp @@ -191,7 +191,7 @@ VirtualDir PatchManager::PatchExeFS(VirtualDir exefs) const { std::vector PatchManager::CollectPatches(const std::vector& patch_dirs, const std::string& build_id) const { const auto& disabled = Settings::values.disabled_addons[title_id]; - const auto nso_build_id = fmt::format("{:0>64}", build_id); + const auto nso_build_id = fmt::format("{:0<64}", build_id); std::vector out; out.reserve(patch_dirs.size()); @@ -206,7 +206,7 @@ std::vector PatchManager::CollectPatches(const std::vectorGetName(); const auto this_build_id = - fmt::format("{:0>64}", name.substr(0, name.find('.'))); + fmt::format("{:0<64}", name.substr(0, name.find('.'))); if (nso_build_id == this_build_id) out.push_back(file); } else if (file->GetExtension() == "pchtxt") { diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index 942c2ac38..e63ec7717 100755 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -9,7 +9,7 @@ #include "core/memory.h" #include "video_core/host1x/host1x.h" -using Core::Memory::PAGE_SIZE; +using Core::Memory::YUZU_PAGESIZE; namespace Service::Nvidia::NvCore { NvMap::Handle::Handle(u64 size_, Id id_) @@ -27,7 +27,7 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) flags = pFlags; kind = pKind; - align = pAlign < PAGE_SIZE ? PAGE_SIZE : pAlign; + align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign; // This flag is only applicable for handles with an address passed if (pAddress) { @@ -37,7 +37,7 @@ NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) "Mapping nvmap handles without a CPU side address is unimplemented!"); } - size = Common::AlignUp(size, PAGE_SIZE); + size = Common::AlignUp(size, YUZU_PAGESIZE); aligned_size = Common::AlignUp(size, align); address = pAddress; allocated = true; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp index 034c39156..192503ffc 100755 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp @@ -153,7 +153,7 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector& input, std::vector< return NvResult::BadValue; } - if (params.page_size != VM::PAGE_SIZE && params.page_size != vm.big_page_size) { + if (params.page_size != VM::YUZU_PAGESIZE && params.page_size != vm.big_page_size) { return NvResult::BadValue; } @@ -163,11 +163,11 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector& input, std::vector< return NvResult::NotImplemented; } - const u32 page_size_bits{params.page_size == VM::PAGE_SIZE ? VM::PAGE_SIZE_BITS - : vm.big_page_size_bits}; + const u32 page_size_bits{params.page_size == VM::YUZU_PAGESIZE ? VM::PAGE_SIZE_BITS + : vm.big_page_size_bits}; - auto& allocator{params.page_size == VM::PAGE_SIZE ? *vm.small_page_allocator - : *vm.big_page_allocator}; + auto& allocator{params.page_size == VM::YUZU_PAGESIZE ? *vm.small_page_allocator + : *vm.big_page_allocator}; if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) { allocator.AllocateFixed(static_cast(params.offset >> page_size_bits), params.pages); @@ -190,7 +190,7 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector& input, std::vector< .mappings{}, .page_size = params.page_size, .sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None, - .big_pages = params.page_size != VM::PAGE_SIZE, + .big_pages = params.page_size != VM::YUZU_PAGESIZE, }; std::memcpy(output.data(), ¶ms, output.size()); @@ -248,10 +248,10 @@ NvResult nvhost_as_gpu::FreeSpace(const std::vector& input, std::vector& gmmu->Unmap(params.offset, allocation.size); } - auto& allocator{params.page_size == VM::PAGE_SIZE ? *vm.small_page_allocator - : *vm.big_page_allocator}; - u32 page_size_bits{params.page_size == VM::PAGE_SIZE ? VM::PAGE_SIZE_BITS - : vm.big_page_size_bits}; + auto& allocator{params.page_size == VM::YUZU_PAGESIZE ? *vm.small_page_allocator + : *vm.big_page_allocator}; + u32 page_size_bits{params.page_size == VM::YUZU_PAGESIZE ? VM::PAGE_SIZE_BITS + : vm.big_page_size_bits}; allocator.Free(static_cast(params.offset >> page_size_bits), static_cast(allocation.size >> page_size_bits)); @@ -369,7 +369,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vectoralign, vm.big_page_size)) return true; - else if (Common::IsAligned(handle->align, VM::PAGE_SIZE)) + else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE)) return false; else { ASSERT(false); @@ -396,7 +396,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector& input, std::vector(allocator.Allocate( @@ -473,7 +473,7 @@ void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) { params.regions = std::array{ VaRegion{ .offset = vm.small_page_allocator->GetVAStart() << VM::PAGE_SIZE_BITS, - .page_size = VM::PAGE_SIZE, + .page_size = VM::YUZU_PAGESIZE, ._pad0_{}, .pages = vm.small_page_allocator->GetVALimit() - vm.small_page_allocator->GetVAStart(), }, diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h index bb1493510..86fe71c75 100755 --- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h @@ -188,8 +188,8 @@ private: std::mutex mutex; //!< Locks all AS operations struct VM { - static constexpr u32 PAGE_SIZE{0x1000}; - static constexpr u32 PAGE_SIZE_BITS{std::countr_zero(PAGE_SIZE)}; + static constexpr u32 YUZU_PAGESIZE{0x1000}; + static constexpr u32 PAGE_SIZE_BITS{std::countr_zero(YUZU_PAGESIZE)}; static constexpr u32 SUPPORTED_BIG_PAGE_SIZES{0x30000}; static constexpr u32 DEFAULT_BIG_PAGE_SIZE{0x20000}; diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 2a58a7b8c..ddf273b5e 100755 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -16,7 +16,7 @@ #include "core/hle/service/nvdrv/devices/nvmap.h" #include "core/memory.h" -using Core::Memory::PAGE_SIZE; +using Core::Memory::YUZU_PAGESIZE; namespace Service::Nvidia::Devices { @@ -75,7 +75,8 @@ NvResult nvmap::IocCreate(const std::vector& input, std::vector& output) LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size); std::shared_ptr handle_description{}; - auto result = file.CreateHandle(Common::AlignUp(params.size, PAGE_SIZE), handle_description); + auto result = + file.CreateHandle(Common::AlignUp(params.size, YUZU_PAGESIZE), handle_description); if (result != NvResult::Success) { LOG_CRITICAL(Service_NVDRV, "Failed to create Object"); return result; @@ -104,8 +105,8 @@ NvResult nvmap::IocAlloc(const std::vector& input, std::vector& output) } // Force page size alignment at a minimum - if (params.align < PAGE_SIZE) { - params.align = PAGE_SIZE; + if (params.align < YUZU_PAGESIZE) { + params.align = YUZU_PAGESIZE; } auto handle_description{file.GetHandle(params.handle)}; diff --git a/src/core/loader/kip.cpp b/src/core/loader/kip.cpp index 9af46a0f7..d8a1bf82a 100755 --- a/src/core/loader/kip.cpp +++ b/src/core/loader/kip.cpp @@ -14,7 +14,7 @@ namespace Loader { namespace { constexpr u32 PageAlignSize(u32 size) { - return static_cast((size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK); + return static_cast((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK); } } // Anonymous namespace diff --git a/src/core/loader/nro.cpp b/src/core/loader/nro.cpp index 1b0bb0876..73d04d7ee 100755 --- a/src/core/loader/nro.cpp +++ b/src/core/loader/nro.cpp @@ -125,7 +125,7 @@ FileType AppLoader_NRO::IdentifyType(const FileSys::VirtualFile& nro_file) { } static constexpr u32 PageAlignSize(u32 size) { - return static_cast((size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK); + return static_cast((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK); } static bool LoadNroImpl(Kernel::KProcess& process, const std::vector& data) { diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp index 8dd956fc6..4c3b3c655 100755 --- a/src/core/loader/nso.cpp +++ b/src/core/loader/nso.cpp @@ -45,7 +45,7 @@ std::vector DecompressSegment(const std::vector& compressed_data, } constexpr u32 PageAlignSize(u32 size) { - return static_cast((size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK); + return static_cast((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK); } } // Anonymous namespace diff --git a/src/core/memory.cpp b/src/core/memory.cpp index df41aa3f1..2ac792566 100755 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -36,10 +36,11 @@ struct Memory::Impl { } void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { - ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); - ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); + ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); + ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base); ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", target); - MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); + MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target, + Common::PageType::Memory); if (Settings::IsFastmemEnabled()) { system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size); @@ -47,9 +48,10 @@ struct Memory::Impl { } void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { - ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); - ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); - MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, 0, Common::PageType::Unmapped); + ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); + ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base); + MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0, + Common::PageType::Unmapped); if (Settings::IsFastmemEnabled()) { system.DeviceMemory().buffer.Unmap(base, size); @@ -57,7 +59,7 @@ struct Memory::Impl { } [[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const { - const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]}; + const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]}; if (!paddr) { return {}; @@ -67,7 +69,7 @@ struct Memory::Impl { } [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const { - const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]}; + const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]}; if (paddr == 0) { return {}; @@ -176,13 +178,14 @@ struct Memory::Impl { auto on_unmapped, auto on_memory, auto on_rasterizer, auto increment) { const auto& page_table = process.PageTable().PageTableImpl(); std::size_t remaining_size = size; - std::size_t page_index = addr >> PAGE_BITS; - std::size_t page_offset = addr & PAGE_MASK; + std::size_t page_index = addr >> YUZU_PAGEBITS; + std::size_t page_offset = addr & YUZU_PAGEMASK; while (remaining_size) { const std::size_t copy_amount = - std::min(static_cast(PAGE_SIZE) - page_offset, remaining_size); - const auto current_vaddr = static_cast((page_index << PAGE_BITS) + page_offset); + std::min(static_cast(YUZU_PAGESIZE) - page_offset, remaining_size); + const auto current_vaddr = + static_cast((page_index << YUZU_PAGEBITS) + page_offset); const auto [pointer, type] = page_table.pointers[page_index].PointerType(); switch (type) { @@ -192,7 +195,7 @@ struct Memory::Impl { } case Common::PageType::Memory: { DEBUG_ASSERT(pointer); - u8* mem_ptr = pointer + page_offset + (page_index << PAGE_BITS); + u8* mem_ptr = pointer + page_offset + (page_index << YUZU_PAGEBITS); on_memory(copy_amount, mem_ptr); break; } @@ -339,10 +342,10 @@ struct Memory::Impl { // Iterate over a contiguous CPU address space, marking/unmarking the region. // The region is at a granularity of CPU pages. - const u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; - for (u64 i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { + const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1; + for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) { const Common::PageType page_type{ - current_page_table->pointers[vaddr >> PAGE_BITS].Type()}; + current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()}; if (debug) { // Switch page type to debug if now debug switch (page_type) { @@ -354,7 +357,7 @@ struct Memory::Impl { // Page is already marked. break; case Common::PageType::Memory: - current_page_table->pointers[vaddr >> PAGE_BITS].Store( + current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( nullptr, Common::PageType::DebugMemory); break; default: @@ -371,9 +374,9 @@ struct Memory::Impl { // Don't mess with already non-debug or rasterizer memory. break; case Common::PageType::DebugMemory: { - u8* const pointer{GetPointerFromDebugMemory(vaddr & ~PAGE_MASK)}; - current_page_table->pointers[vaddr >> PAGE_BITS].Store( - pointer - (vaddr & ~PAGE_MASK), Common::PageType::Memory); + u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)}; + current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( + pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory); break; } default: @@ -398,10 +401,10 @@ struct Memory::Impl { // granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size // is different). This assumes the specified GPU address region is contiguous as well. - const u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; - for (u64 i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { + const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1; + for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) { const Common::PageType page_type{ - current_page_table->pointers[vaddr >> PAGE_BITS].Type()}; + current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()}; if (cached) { // Switch page type to cached if now cached switch (page_type) { @@ -411,7 +414,7 @@ struct Memory::Impl { break; case Common::PageType::DebugMemory: case Common::PageType::Memory: - current_page_table->pointers[vaddr >> PAGE_BITS].Store( + current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( nullptr, Common::PageType::RasterizerCachedMemory); break; case Common::PageType::RasterizerCachedMemory: @@ -434,16 +437,16 @@ struct Memory::Impl { // that this area is already unmarked as cached. break; case Common::PageType::RasterizerCachedMemory: { - u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~PAGE_MASK)}; + u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~YUZU_PAGEMASK)}; if (pointer == nullptr) { // It's possible that this function has been called while updating the // pagetable after unmapping a VMA. In that case the underlying VMA will no // longer exist, and we should just leave the pagetable entry blank. - current_page_table->pointers[vaddr >> PAGE_BITS].Store( + current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( nullptr, Common::PageType::Unmapped); } else { - current_page_table->pointers[vaddr >> PAGE_BITS].Store( - pointer - (vaddr & ~PAGE_MASK), Common::PageType::Memory); + current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( + pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory); } break; } @@ -465,8 +468,8 @@ struct Memory::Impl { */ void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target, Common::PageType type) { - LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * PAGE_SIZE, - (base + size) * PAGE_SIZE); + LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * YUZU_PAGESIZE, + (base + size) * YUZU_PAGESIZE); // During boot, current_page_table might not be set yet, in which case we need not flush if (system.IsPoweredOn()) { @@ -474,7 +477,7 @@ struct Memory::Impl { for (u64 i = 0; i < size; i++) { const auto page = base + i; if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) { - gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE); + gpu.FlushAndInvalidateRegion(page << YUZU_PAGEBITS, YUZU_PAGESIZE); } } } @@ -485,7 +488,7 @@ struct Memory::Impl { if (!target) { ASSERT_MSG(type != Common::PageType::Memory, - "Mapping memory page without a pointer @ {:016x}", base * PAGE_SIZE); + "Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE); while (base != end) { page_table.pointers[base].Store(nullptr, type); @@ -496,14 +499,14 @@ struct Memory::Impl { } else { while (base != end) { page_table.pointers[base].Store( - system.DeviceMemory().GetPointer(target) - (base << PAGE_BITS), type); - page_table.backing_addr[base] = target - (base << PAGE_BITS); + system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type); + page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS); ASSERT_MSG(page_table.pointers[base].Pointer(), "memory mapping base yield a nullptr within the table"); base += 1; - target += PAGE_SIZE; + target += YUZU_PAGESIZE; } } } @@ -518,7 +521,7 @@ struct Memory::Impl { } // Avoid adding any extra logic to this fast-path block - const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw(); + const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw(); if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { return &pointer[vaddr]; } @@ -662,7 +665,7 @@ void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { bool Memory::IsValidVirtualAddress(const VAddr vaddr) const { const Kernel::KProcess& process = *system.CurrentProcess(); const auto& page_table = process.PageTable().PageTableImpl(); - const size_t page = vaddr >> PAGE_BITS; + const size_t page = vaddr >> YUZU_PAGEBITS; if (page >= page_table.pointers.size()) { return false; } @@ -673,9 +676,9 @@ bool Memory::IsValidVirtualAddress(const VAddr vaddr) const { bool Memory::IsValidVirtualAddressRange(VAddr base, u64 size) const { VAddr end = base + size; - VAddr page = Common::AlignDown(base, PAGE_SIZE); + VAddr page = Common::AlignDown(base, YUZU_PAGESIZE); - for (; page < end; page += PAGE_SIZE) { + for (; page < end; page += YUZU_PAGESIZE) { if (!IsValidVirtualAddress(page)) { return false; } diff --git a/src/core/memory.h b/src/core/memory.h index 17903c006..81eac448b 100755 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -27,9 +27,9 @@ namespace Core::Memory { * Page size used by the ARM architecture. This is the smallest granularity with which memory can * be mapped. */ -constexpr std::size_t PAGE_BITS = 12; -constexpr u64 PAGE_SIZE = 1ULL << PAGE_BITS; -constexpr u64 PAGE_MASK = PAGE_SIZE - 1; +constexpr std::size_t YUZU_PAGEBITS = 12; +constexpr u64 YUZU_PAGESIZE = 1ULL << YUZU_PAGEBITS; +constexpr u64 YUZU_PAGEMASK = YUZU_PAGESIZE - 1; /// Virtual user-space memory regions enum : VAddr { diff --git a/src/tests/video_core/buffer_base.cpp b/src/tests/video_core/buffer_base.cpp index a1be8dcf1..71121e42a 100755 --- a/src/tests/video_core/buffer_base.cpp +++ b/src/tests/video_core/buffer_base.cpp @@ -22,8 +22,9 @@ constexpr VAddr c = 0x1328914000; class RasterizerInterface { public: void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) { - const u64 page_start{addr >> Core::Memory::PAGE_BITS}; - const u64 page_end{(addr + size + Core::Memory::PAGE_SIZE - 1) >> Core::Memory::PAGE_BITS}; + const u64 page_start{addr >> Core::Memory::YUZU_PAGEBITS}; + const u64 page_end{(addr + size + Core::Memory::YUZU_PAGESIZE - 1) >> + Core::Memory::YUZU_PAGEBITS}; for (u64 page = page_start; page < page_end; ++page) { int& value = page_table[page]; value += delta; @@ -37,7 +38,7 @@ public: } [[nodiscard]] int Count(VAddr addr) const noexcept { - const auto it = page_table.find(addr >> Core::Memory::PAGE_BITS); + const auto it = page_table.find(addr >> Core::Memory::YUZU_PAGEBITS); return it == page_table.end() ? 0 : it->second; } diff --git a/src/video_core/buffer_cache/buffer_base.h b/src/video_core/buffer_cache/buffer_base.h index 3e20608ca..0b2bc67b1 100755 --- a/src/video_core/buffer_cache/buffer_base.h +++ b/src/video_core/buffer_cache/buffer_base.h @@ -36,7 +36,7 @@ struct NullBufferParams {}; template class BufferBase { static constexpr u64 PAGES_PER_WORD = 64; - static constexpr u64 BYTES_PER_PAGE = Core::Memory::PAGE_SIZE; + static constexpr u64 BYTES_PER_PAGE = Core::Memory::YUZU_PAGESIZE; static constexpr u64 BYTES_PER_WORD = PAGES_PER_WORD * BYTES_PER_PAGE; /// Vector tracking modified pages tightly packed with small vector optimization diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 08fe0bda3..8e26b3f95 100755 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -60,8 +60,8 @@ class BufferCache : public VideoCommon::ChannelSetupCaches void ForEachBufferInRange(VAddr cpu_addr, u64 size, Func&& func) { - const u64 page_end = Common::DivCeil(cpu_addr + size, PAGE_SIZE); - for (u64 page = cpu_addr >> PAGE_BITS; page < page_end;) { + const u64 page_end = Common::DivCeil(cpu_addr + size, YUZU_PAGESIZE); + for (u64 page = cpu_addr >> YUZU_PAGEBITS; page < page_end;) { const BufferId buffer_id = page_table[page]; if (!buffer_id) { ++page; @@ -224,7 +224,7 @@ private: func(buffer_id, buffer); const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes(); - page = Common::DivCeil(end_addr, PAGE_SIZE); + page = Common::DivCeil(end_addr, YUZU_PAGESIZE); } } @@ -259,8 +259,8 @@ private: } static bool IsRangeGranular(VAddr cpu_addr, size_t size) { - return (cpu_addr & ~Core::Memory::PAGE_MASK) == - ((cpu_addr + size) & ~Core::Memory::PAGE_MASK); + return (cpu_addr & ~Core::Memory::YUZU_PAGEMASK) == + ((cpu_addr + size) & ~Core::Memory::YUZU_PAGEMASK); } void RunGarbageCollector(); @@ -433,7 +433,7 @@ private: u64 minimum_memory = 0; u64 critical_memory = 0; - std::array> PAGE_BITS)> page_table; + std::array> YUZU_PAGEBITS)> page_table; }; template @@ -929,8 +929,8 @@ void BufferCache

::PopAsyncFlushes() {} template bool BufferCache

::IsRegionGpuModified(VAddr addr, size_t size) { - const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE); - for (u64 page = addr >> PAGE_BITS; page < page_end;) { + const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE); + for (u64 page = addr >> YUZU_PAGEBITS; page < page_end;) { const BufferId image_id = page_table[page]; if (!image_id) { ++page; @@ -941,7 +941,7 @@ bool BufferCache

::IsRegionGpuModified(VAddr addr, size_t size) { return true; } const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes(); - page = Common::DivCeil(end_addr, PAGE_SIZE); + page = Common::DivCeil(end_addr, YUZU_PAGESIZE); } return false; } @@ -949,8 +949,8 @@ bool BufferCache

::IsRegionGpuModified(VAddr addr, size_t size) { template bool BufferCache

::IsRegionRegistered(VAddr addr, size_t size) { const VAddr end_addr = addr + size; - const u64 page_end = Common::DivCeil(end_addr, PAGE_SIZE); - for (u64 page = addr >> PAGE_BITS; page < page_end;) { + const u64 page_end = Common::DivCeil(end_addr, YUZU_PAGESIZE); + for (u64 page = addr >> YUZU_PAGEBITS; page < page_end;) { const BufferId buffer_id = page_table[page]; if (!buffer_id) { ++page; @@ -962,15 +962,15 @@ bool BufferCache

::IsRegionRegistered(VAddr addr, size_t size) { if (buf_start_addr < end_addr && addr < buf_end_addr) { return true; } - page = Common::DivCeil(end_addr, PAGE_SIZE); + page = Common::DivCeil(end_addr, YUZU_PAGESIZE); } return false; } template bool BufferCache

::IsRegionCpuModified(VAddr addr, size_t size) { - const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE); - for (u64 page = addr >> PAGE_BITS; page < page_end;) { + const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE); + for (u64 page = addr >> YUZU_PAGEBITS; page < page_end;) { const BufferId image_id = page_table[page]; if (!image_id) { ++page; @@ -981,7 +981,7 @@ bool BufferCache

::IsRegionCpuModified(VAddr addr, size_t size) { return true; } const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes(); - page = Common::DivCeil(end_addr, PAGE_SIZE); + page = Common::DivCeil(end_addr, YUZU_PAGESIZE); } return false; } @@ -1470,7 +1470,7 @@ BufferId BufferCache

::FindBuffer(VAddr cpu_addr, u32 size) { if (cpu_addr == 0) { return NULL_BUFFER_ID; } - const u64 page = cpu_addr >> PAGE_BITS; + const u64 page = cpu_addr >> YUZU_PAGEBITS; const BufferId buffer_id = page_table[page]; if (!buffer_id) { return CreateBuffer(cpu_addr, size); @@ -1491,8 +1491,9 @@ typename BufferCache

::OverlapResult BufferCache

::ResolveOverlaps(VAddr cpu VAddr end = cpu_addr + wanted_size; int stream_score = 0; bool has_stream_leap = false; - for (; cpu_addr >> PAGE_BITS < Common::DivCeil(end, PAGE_SIZE); cpu_addr += PAGE_SIZE) { - const BufferId overlap_id = page_table[cpu_addr >> PAGE_BITS]; + for (; cpu_addr >> YUZU_PAGEBITS < Common::DivCeil(end, YUZU_PAGESIZE); + cpu_addr += YUZU_PAGESIZE) { + const BufferId overlap_id = page_table[cpu_addr >> YUZU_PAGEBITS]; if (!overlap_id) { continue; } @@ -1518,11 +1519,11 @@ typename BufferCache

::OverlapResult BufferCache

::ResolveOverlaps(VAddr cpu // as a stream buffer. Increase the size to skip constantly recreating buffers. has_stream_leap = true; if (expands_right) { - begin -= PAGE_SIZE * 256; + begin -= YUZU_PAGESIZE * 256; cpu_addr = begin; } if (expands_left) { - end += PAGE_SIZE * 256; + end += YUZU_PAGESIZE * 256; } } } @@ -1598,8 +1599,8 @@ void BufferCache

::ChangeRegister(BufferId buffer_id) { } const VAddr cpu_addr_begin = buffer.CpuAddr(); const VAddr cpu_addr_end = cpu_addr_begin + size; - const u64 page_begin = cpu_addr_begin / PAGE_SIZE; - const u64 page_end = Common::DivCeil(cpu_addr_end, PAGE_SIZE); + const u64 page_begin = cpu_addr_begin / YUZU_PAGESIZE; + const u64 page_end = Common::DivCeil(cpu_addr_end, YUZU_PAGESIZE); for (u64 page = page_begin; page != page_end; ++page) { if constexpr (insert) { page_table[page] = buffer_id; @@ -1848,7 +1849,7 @@ typename BufferCache

::Binding BufferCache

::StorageBufferBinding(GPUVAddr s if (!cpu_addr || size == 0) { return NULL_BINDING; } - const VAddr cpu_end = Common::AlignUp(*cpu_addr + size, Core::Memory::PAGE_SIZE); + const VAddr cpu_end = Common::AlignUp(*cpu_addr + size, Core::Memory::YUZU_PAGESIZE); const Binding binding{ .cpu_addr = *cpu_addr, .size = is_written ? size : static_cast(cpu_end - *cpu_addr), diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 470a2bdd5..cca401c74 100755 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -570,14 +570,14 @@ bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const { const std::size_t page{(page_index & big_page_mask) + size}; return page <= big_page_size; } - const std::size_t page{(gpu_addr & Core::Memory::PAGE_MASK) + size}; - return page <= Core::Memory::PAGE_SIZE; + const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; + return page <= Core::Memory::YUZU_PAGESIZE; } if (GetEntry(gpu_addr) != EntryType::Mapped) { return false; } - const std::size_t page{(gpu_addr & Core::Memory::PAGE_MASK) + size}; - return page <= Core::Memory::PAGE_SIZE; + const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; + return page <= Core::Memory::YUZU_PAGESIZE; } bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const { diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h index 2cc7f4130..b0ebe71b7 100755 --- a/src/video_core/query_cache.h +++ b/src/video_core/query_cache.h @@ -214,8 +214,8 @@ private: return cache_begin < addr_end && addr_begin < cache_end; }; - const u64 page_end = addr_end >> PAGE_BITS; - for (u64 page = addr_begin >> PAGE_BITS; page <= page_end; ++page) { + const u64 page_end = addr_end >> YUZU_PAGEBITS; + for (u64 page = addr_begin >> YUZU_PAGEBITS; page <= page_end; ++page) { const auto& it = cached_queries.find(page); if (it == std::end(cached_queries)) { continue; @@ -235,14 +235,14 @@ private: /// Registers the passed parameters as cached and returns a pointer to the stored cached query. CachedQuery* Register(VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr, bool timestamp) { rasterizer.UpdatePagesCachedCount(cpu_addr, CachedQuery::SizeInBytes(timestamp), 1); - const u64 page = static_cast(cpu_addr) >> PAGE_BITS; + const u64 page = static_cast(cpu_addr) >> YUZU_PAGEBITS; return &cached_queries[page].emplace_back(static_cast(*this), type, cpu_addr, host_ptr); } /// Tries to a get a cached query. Returns nullptr on failure. CachedQuery* TryGet(VAddr addr) { - const u64 page = static_cast(addr) >> PAGE_BITS; + const u64 page = static_cast(addr) >> YUZU_PAGEBITS; const auto it = cached_queries.find(page); if (it == std::end(cached_queries)) { return nullptr; @@ -260,8 +260,8 @@ private: uncommitted_flushes->push_back(addr); } - static constexpr std::uintptr_t PAGE_SIZE = 4096; - static constexpr unsigned PAGE_BITS = 12; + static constexpr std::uintptr_t YUZU_PAGESIZE = 4096; + static constexpr unsigned YUZU_PAGEBITS = 12; VideoCore::RasterizerInterface& rasterizer; diff --git a/src/video_core/rasterizer_accelerated.cpp b/src/video_core/rasterizer_accelerated.cpp index 87a29e144..4a197d65d 100755 --- a/src/video_core/rasterizer_accelerated.cpp +++ b/src/video_core/rasterizer_accelerated.cpp @@ -24,8 +24,8 @@ void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int del u64 cache_bytes = 0; std::atomic_thread_fence(std::memory_order_acquire); - const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE); - for (u64 page = addr >> PAGE_BITS; page != page_end; ++page) { + const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE); + for (u64 page = addr >> YUZU_PAGEBITS; page != page_end; ++page) { std::atomic_uint16_t& count = cached_pages.at(page >> 2).Count(page); if (delta > 0) { @@ -44,26 +44,27 @@ void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int del if (uncache_bytes == 0) { uncache_begin = page; } - uncache_bytes += PAGE_SIZE; + uncache_bytes += YUZU_PAGESIZE; } else if (uncache_bytes > 0) { - cpu_memory.RasterizerMarkRegionCached(uncache_begin << PAGE_BITS, uncache_bytes, false); + cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes, + false); uncache_bytes = 0; } if (count.load(std::memory_order::relaxed) == 1 && delta > 0) { if (cache_bytes == 0) { cache_begin = page; } - cache_bytes += PAGE_SIZE; + cache_bytes += YUZU_PAGESIZE; } else if (cache_bytes > 0) { - cpu_memory.RasterizerMarkRegionCached(cache_begin << PAGE_BITS, cache_bytes, true); + cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true); cache_bytes = 0; } } if (uncache_bytes > 0) { - cpu_memory.RasterizerMarkRegionCached(uncache_begin << PAGE_BITS, uncache_bytes, false); + cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes, false); } if (cache_bytes > 0) { - cpu_memory.RasterizerMarkRegionCached(cache_begin << PAGE_BITS, cache_bytes, true); + cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true); } } diff --git a/src/video_core/shader_cache.cpp b/src/video_core/shader_cache.cpp index 4a77dd40e..f53066579 100755 --- a/src/video_core/shader_cache.cpp +++ b/src/video_core/shader_cache.cpp @@ -120,8 +120,8 @@ void ShaderCache::Register(std::unique_ptr data, VAddr addr, size_t const VAddr addr_end = addr + size; Entry* const entry = NewEntry(addr, addr_end, data.get()); - const u64 page_end = (addr_end + PAGE_SIZE - 1) >> PAGE_BITS; - for (u64 page = addr >> PAGE_BITS; page < page_end; ++page) { + const u64 page_end = (addr_end + YUZU_PAGESIZE - 1) >> YUZU_PAGEBITS; + for (u64 page = addr >> YUZU_PAGEBITS; page < page_end; ++page) { invalidation_cache[page].push_back(entry); } @@ -132,8 +132,8 @@ void ShaderCache::Register(std::unique_ptr data, VAddr addr, size_t void ShaderCache::InvalidatePagesInRegion(VAddr addr, size_t size) { const VAddr addr_end = addr + size; - const u64 page_end = (addr_end + PAGE_SIZE - 1) >> PAGE_BITS; - for (u64 page = addr >> PAGE_BITS; page < page_end; ++page) { + const u64 page_end = (addr_end + YUZU_PAGESIZE - 1) >> YUZU_PAGEBITS; + for (u64 page = addr >> YUZU_PAGEBITS; page < page_end; ++page) { auto it = invalidation_cache.find(page); if (it == invalidation_cache.end()) { continue; @@ -186,8 +186,8 @@ void ShaderCache::InvalidatePageEntries(std::vector& entries, VAddr addr } void ShaderCache::RemoveEntryFromInvalidationCache(const Entry* entry) { - const u64 page_end = (entry->addr_end + PAGE_SIZE - 1) >> PAGE_BITS; - for (u64 page = entry->addr_start >> PAGE_BITS; page < page_end; ++page) { + const u64 page_end = (entry->addr_end + YUZU_PAGESIZE - 1) >> YUZU_PAGEBITS; + for (u64 page = entry->addr_start >> YUZU_PAGEBITS; page < page_end; ++page) { const auto entries_it = invalidation_cache.find(page); ASSERT(entries_it != invalidation_cache.end()); std::vector& entries = entries_it->second; diff --git a/src/video_core/shader_cache.h b/src/video_core/shader_cache.h index a96d69c47..a4391202d 100755 --- a/src/video_core/shader_cache.h +++ b/src/video_core/shader_cache.h @@ -34,8 +34,8 @@ struct ShaderInfo { }; class ShaderCache : public VideoCommon::ChannelSetupCaches { - static constexpr u64 PAGE_BITS = 14; - static constexpr u64 PAGE_SIZE = u64(1) << PAGE_BITS; + static constexpr u64 YUZU_PAGEBITS = 14; + static constexpr u64 YUZU_PAGESIZE = u64(1) << YUZU_PAGEBITS; static constexpr size_t NUM_PROGRAMS = 6; diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index c85869dd0..eaf4a1c95 100755 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -600,7 +600,7 @@ void TextureCache

::BlitImage(const Tegra::Engines::Fermi2D::Surface& dst, template typename P::ImageView* TextureCache

::TryFindFramebufferImageView(VAddr cpu_addr) { // TODO: Properly implement this - const auto it = page_table.find(cpu_addr >> PAGE_BITS); + const auto it = page_table.find(cpu_addr >> YUZU_PAGEBITS); if (it == page_table.end()) { return nullptr; } @@ -1506,14 +1506,14 @@ void TextureCache

::UnregisterImage(ImageId image_id) { selected_page_table) { const auto page_it = selected_page_table.find(page); if (page_it == selected_page_table.end()) { - ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS); + ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS); return; } std::vector& image_ids = page_it->second; const auto vector_it = std::ranges::find(image_ids, image_id); if (vector_it == image_ids.end()) { ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}", - page << PAGE_BITS); + page << YUZU_PAGEBITS); return; } image_ids.erase(vector_it); @@ -1526,14 +1526,14 @@ void TextureCache

::UnregisterImage(ImageId image_id) { ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) { const auto page_it = page_table.find(page); if (page_it == page_table.end()) { - ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS); + ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS); return; } std::vector& image_map_ids = page_it->second; const auto vector_it = std::ranges::find(image_map_ids, map_id); if (vector_it == image_map_ids.end()) { ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}", - page << PAGE_BITS); + page << YUZU_PAGEBITS); return; } image_map_ids.erase(vector_it); @@ -1554,7 +1554,7 @@ void TextureCache

::UnregisterImage(ImageId image_id) { ForEachCPUPage(cpu_addr, size, [this, image_id](u64 page) { const auto page_it = page_table.find(page); if (page_it == page_table.end()) { - ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS); + ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS); return; } std::vector& image_map_ids = page_it->second; diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h index c6790abc0..2fa8445eb 100755 --- a/src/video_core/texture_cache/texture_cache_base.h +++ b/src/video_core/texture_cache/texture_cache_base.h @@ -82,7 +82,7 @@ public: template class TextureCache : public VideoCommon::ChannelSetupCaches { /// Address shift for caching images into a hash table - static constexpr u64 PAGE_BITS = 20; + static constexpr u64 YUZU_PAGEBITS = 20; /// Enables debugging features to the texture cache static constexpr bool ENABLE_VALIDATION = P::ENABLE_VALIDATION; @@ -210,8 +210,8 @@ private: template static void ForEachCPUPage(VAddr addr, size_t size, Func&& func) { static constexpr bool RETURNS_BOOL = std::is_same_v, bool>; - const u64 page_end = (addr + size - 1) >> PAGE_BITS; - for (u64 page = addr >> PAGE_BITS; page <= page_end; ++page) { + const u64 page_end = (addr + size - 1) >> YUZU_PAGEBITS; + for (u64 page = addr >> YUZU_PAGEBITS; page <= page_end; ++page) { if constexpr (RETURNS_BOOL) { if (func(page)) { break; @@ -225,8 +225,8 @@ private: template static void ForEachGPUPage(GPUVAddr addr, size_t size, Func&& func) { static constexpr bool RETURNS_BOOL = std::is_same_v, bool>; - const u64 page_end = (addr + size - 1) >> PAGE_BITS; - for (u64 page = addr >> PAGE_BITS; page <= page_end; ++page) { + const u64 page_end = (addr + size - 1) >> YUZU_PAGEBITS; + for (u64 page = addr >> YUZU_PAGEBITS; page <= page_end; ++page) { if constexpr (RETURNS_BOOL) { if (func(page)) { break;